extern void __VERIFIER_error() __attribute__ ((__noreturn__)); /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef unsigned char __u8; typedef short __s16; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef long long __s64; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef long __kernel_long_t; typedef unsigned long __kernel_ulong_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_long_t __kernel_ssize_t; typedef long long __kernel_loff_t; typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef __u16 __le16; typedef __u16 __be16; typedef __u32 __le32; typedef __u32 __be32; typedef __u64 __le64; typedef __u16 __sum16; typedef __u32 __wsum; struct kernel_symbol { unsigned long value ; char const *name ; }; struct module; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef unsigned short umode_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef u64 dma_addr_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; typedef unsigned int oom_flags_t; typedef u64 phys_addr_t; typedef phys_addr_t resource_size_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct callback_head { struct callback_head *next ; void (*func)(struct callback_head * ) ; }; typedef u64 cycle_t; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct __anonstruct____missing_field_name_9 { unsigned int a ; unsigned int b ; }; struct __anonstruct____missing_field_name_10 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4 ; struct __anonstruct____missing_field_name_10 __annonCompField5 ; }; struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6 ; }; typedef unsigned long pteval_t; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct __anonstruct_pte_t_11 { pteval_t pte ; }; typedef struct __anonstruct_pte_t_11 pte_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_12 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_12 pgd_t; struct page; typedef struct page *pgtable_t; struct file; struct seq_file; struct thread_struct; struct mm_struct; struct task_struct; struct cpumask; struct qspinlock { atomic_t val ; }; typedef struct qspinlock arch_spinlock_t; struct qrwlock { atomic_t cnts ; arch_spinlock_t lock ; }; typedef struct qrwlock arch_rwlock_t; typedef void (*ctor_fn_t)(void); struct _ddebug { char const *modname ; char const *function ; char const *filename ; char const *format ; unsigned int lineno : 18 ; unsigned char flags ; }; struct device; struct net_device; struct file_operations; struct completion; enum system_states { SYSTEM_BOOTING = 0, SYSTEM_RUNNING = 1, SYSTEM_HALT = 2, SYSTEM_POWER_OFF = 3, SYSTEM_RESTART = 4 } ; struct lockdep_map; struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion____missing_field_name_15 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion____missing_field_name_15 __annonCompField7 ; }; struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct cpumask { unsigned long bits[128U] ; }; typedef struct cpumask cpumask_t; typedef struct cpumask *cpumask_var_t; struct fregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct____missing_field_name_25 { u64 rip ; u64 rdp ; }; struct __anonstruct____missing_field_name_26 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion____missing_field_name_24 { struct __anonstruct____missing_field_name_25 __annonCompField11 ; struct __anonstruct____missing_field_name_26 __annonCompField12 ; }; union __anonunion____missing_field_name_27 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct fxregs_state { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion____missing_field_name_24 __annonCompField13 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion____missing_field_name_27 __annonCompField14 ; }; struct swregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct xstate_header { u64 xfeatures ; u64 xcomp_bv ; u64 reserved[6U] ; }; struct xregs_state { struct fxregs_state i387 ; struct xstate_header header ; u8 __reserved[464U] ; }; union fpregs_state { struct fregs_state fsave ; struct fxregs_state fxsave ; struct swregs_state soft ; struct xregs_state xsave ; }; struct fpu { union fpregs_state state ; unsigned int last_cpu ; unsigned char fpstate_active ; unsigned char fpregs_active ; unsigned char counter ; }; struct seq_operations; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct fpu fpu ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_nr ; unsigned long error_code ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; typedef atomic64_t atomic_long_t; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; }; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache[2U] ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 1 ; unsigned char hardirqs_off : 1 ; unsigned short references : 12 ; unsigned int pin_count ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct____missing_field_name_31 { u8 __padding[24U] ; struct lockdep_map dep_map ; }; union __anonunion____missing_field_name_30 { struct raw_spinlock rlock ; struct __anonstruct____missing_field_name_31 __annonCompField16 ; }; struct spinlock { union __anonunion____missing_field_name_30 __annonCompField17 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_32 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_32 rwlock_t; struct optimistic_spin_queue { atomic_t tail ; }; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct task_struct *owner ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct timespec; struct compat_timespec; struct __anonstruct_futex_34 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_35 { clockid_t clockid ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_36 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion____missing_field_name_33 { struct __anonstruct_futex_34 futex ; struct __anonstruct_nanosleep_35 nanosleep ; struct __anonstruct_poll_36 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion____missing_field_name_33 __annonCompField18 ; }; typedef int pao_T__; typedef int pao_T_____0; struct seqcount { unsigned int sequence ; struct lockdep_map dep_map ; }; typedef struct seqcount seqcount_t; struct __anonstruct_seqlock_t_45 { struct seqcount seqcount ; spinlock_t lock ; }; typedef struct __anonstruct_seqlock_t_45 seqlock_t; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct timer_list { struct hlist_node entry ; unsigned long expires ; void (*function)(unsigned long ) ; unsigned long data ; u32 flags ; int slack ; int start_pid ; void *start_site ; char start_comm[16U] ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct completion { unsigned int done ; wait_queue_head_t wait ; }; struct notifier_block; struct rb_node { unsigned long __rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct ctl_table; struct nsproxy; struct ctl_table_root; struct ctl_table_header; struct ctl_dir; typedef int proc_handler(struct ctl_table * , int , void * , size_t * , loff_t * ); struct ctl_table_poll { atomic_t event ; wait_queue_head_t wait ; }; struct ctl_table { char const *procname ; void *data ; int maxlen ; umode_t mode ; struct ctl_table *child ; proc_handler *proc_handler ; struct ctl_table_poll *poll ; void *extra1 ; void *extra2 ; }; struct ctl_node { struct rb_node node ; struct ctl_table_header *header ; }; struct __anonstruct____missing_field_name_47 { struct ctl_table *ctl_table ; int used ; int count ; int nreg ; }; union __anonunion____missing_field_name_46 { struct __anonstruct____missing_field_name_47 __annonCompField19 ; struct callback_head rcu ; }; struct ctl_table_set; struct ctl_table_header { union __anonunion____missing_field_name_46 __annonCompField20 ; struct completion *unregistering ; struct ctl_table *ctl_table_arg ; struct ctl_table_root *root ; struct ctl_table_set *set ; struct ctl_dir *parent ; struct ctl_node *node ; }; struct ctl_dir { struct ctl_table_header header ; struct rb_root root ; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set * ) ; struct ctl_dir dir ; }; struct ctl_table_root { struct ctl_table_set default_set ; struct ctl_table_set *(*lookup)(struct ctl_table_root * , struct nsproxy * ) ; int (*permissions)(struct ctl_table_header * , struct ctl_table * ) ; }; struct workqueue_struct; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; struct workqueue_struct *wq ; int cpu ; }; struct vm_area_struct; struct __anonstruct_nodemask_t_48 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_48 nodemask_t; struct free_area { struct list_head free_list[6U] ; unsigned long nr_free ; }; struct pglist_data; struct zone_padding { char x[0U] ; }; struct zone_reclaim_stat { unsigned long recent_rotated[2U] ; unsigned long recent_scanned[2U] ; }; struct zone; struct lruvec { struct list_head lists[5U] ; struct zone_reclaim_stat reclaim_stat ; struct zone *zone ; }; struct per_cpu_pages { int count ; int high ; int batch ; struct list_head lists[3U] ; }; struct per_cpu_pageset { struct per_cpu_pages pcp ; s8 expire ; s8 stat_threshold ; s8 vm_stat_diff[39U] ; }; enum zone_type { ZONE_DMA = 0, ZONE_DMA32 = 1, ZONE_NORMAL = 2, ZONE_MOVABLE = 3, __MAX_NR_ZONES = 4 } ; struct zone { unsigned long watermark[3U] ; long lowmem_reserve[4U] ; int node ; unsigned int inactive_ratio ; struct pglist_data *zone_pgdat ; struct per_cpu_pageset *pageset ; unsigned long dirty_balance_reserve ; unsigned long min_unmapped_pages ; unsigned long min_slab_pages ; unsigned long zone_start_pfn ; unsigned long managed_pages ; unsigned long spanned_pages ; unsigned long present_pages ; char const *name ; int nr_migrate_reserve_block ; unsigned long nr_isolate_pageblock ; seqlock_t span_seqlock ; wait_queue_head_t *wait_table ; unsigned long wait_table_hash_nr_entries ; unsigned long wait_table_bits ; struct zone_padding _pad1_ ; struct free_area free_area[11U] ; unsigned long flags ; spinlock_t lock ; struct zone_padding _pad2_ ; spinlock_t lru_lock ; struct lruvec lruvec ; atomic_long_t inactive_age ; unsigned long percpu_drift_mark ; unsigned long compact_cached_free_pfn ; unsigned long compact_cached_migrate_pfn[2U] ; unsigned int compact_considered ; unsigned int compact_defer_shift ; int compact_order_failed ; bool compact_blockskip_flush ; struct zone_padding _pad3_ ; atomic_long_t vm_stat[39U] ; }; struct zonelist_cache { unsigned short z_to_n[4096U] ; unsigned long fullzones[64U] ; unsigned long last_full_zap ; }; struct zoneref { struct zone *zone ; int zone_idx ; }; struct zonelist { struct zonelist_cache *zlcache_ptr ; struct zoneref _zonerefs[4097U] ; struct zonelist_cache zlcache ; }; struct pglist_data { struct zone node_zones[4U] ; struct zonelist node_zonelists[2U] ; int nr_zones ; spinlock_t node_size_lock ; unsigned long node_start_pfn ; unsigned long node_present_pages ; unsigned long node_spanned_pages ; int node_id ; wait_queue_head_t kswapd_wait ; wait_queue_head_t pfmemalloc_wait ; struct task_struct *kswapd ; int kswapd_max_order ; enum zone_type classzone_idx ; spinlock_t numabalancing_migrate_lock ; unsigned long numabalancing_migrate_next_window ; unsigned long numabalancing_migrate_nr_pages ; unsigned long first_deferred_pfn ; }; typedef struct pglist_data pg_data_t; struct rw_semaphore; struct rw_semaphore { long count ; struct list_head wait_list ; raw_spinlock_t wait_lock ; struct optimistic_spin_queue osq ; struct task_struct *owner ; struct lockdep_map dep_map ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct resource { resource_size_t start ; resource_size_t end ; char const *name ; unsigned long flags ; struct resource *parent ; struct resource *sibling ; struct resource *child ; }; struct pci_dev; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_late)(struct device * ) ; int (*resume_early)(struct device * ) ; int (*freeze_late)(struct device * ) ; int (*thaw_early)(struct device * ) ; int (*poweroff_late)(struct device * ) ; int (*restore_early)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; struct wakeup_source; struct wake_irq; struct pm_subsys_data { spinlock_t lock ; unsigned int refcount ; struct list_head clock_list ; }; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char async_suspend : 1 ; bool is_prepared ; bool is_suspended ; bool is_noirq_suspended ; bool is_late_suspended ; bool ignore_children ; bool early_init ; bool direct_complete ; spinlock_t lock ; struct list_head entry ; struct completion completion ; struct wakeup_source *wakeup ; bool wakeup_path ; bool syscore ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; struct wake_irq *wakeirq ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; unsigned char no_callbacks : 1 ; unsigned char irq_safe : 1 ; unsigned char use_autosuspend : 1 ; unsigned char timer_autosuspends : 1 ; unsigned char memalloc_noio : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; int autosuspend_delay ; unsigned long last_busy ; unsigned long active_jiffies ; unsigned long suspended_jiffies ; unsigned long accounting_timestamp ; struct pm_subsys_data *subsys_data ; void (*set_latency_tolerance)(struct device * , s32 ) ; struct dev_pm_qos *qos ; }; struct dev_pm_domain { struct dev_pm_ops ops ; void (*detach)(struct device * , bool ) ; int (*activate)(struct device * ) ; void (*sync)(struct device * ) ; void (*dismiss)(struct device * ) ; }; struct pci_bus; struct __anonstruct_mm_context_t_113 { void *ldt ; int size ; unsigned short ia32_compat ; struct mutex lock ; void *vdso ; atomic_t perf_rdpmc_allowed ; }; typedef struct __anonstruct_mm_context_t_113 mm_context_t; struct bio_vec; struct llist_node; struct llist_node { struct llist_node *next ; }; struct kmem_cache; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct inode; struct dentry; struct user_namespace; struct plist_node { int prio ; struct list_head prio_list ; struct list_head node_list ; }; struct arch_uprobe_task { unsigned long saved_scratch_register ; unsigned int saved_trap_nr ; unsigned int saved_tf ; }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; struct __anonstruct____missing_field_name_146 { struct arch_uprobe_task autask ; unsigned long vaddr ; }; struct __anonstruct____missing_field_name_147 { struct callback_head dup_xol_work ; unsigned long dup_xol_addr ; }; union __anonunion____missing_field_name_145 { struct __anonstruct____missing_field_name_146 __annonCompField33 ; struct __anonstruct____missing_field_name_147 __annonCompField34 ; }; struct uprobe; struct return_instance; struct uprobe_task { enum uprobe_task_state state ; union __anonunion____missing_field_name_145 __annonCompField35 ; struct uprobe *active_uprobe ; unsigned long xol_vaddr ; struct return_instance *return_instances ; unsigned int depth ; }; struct xol_area; struct uprobes_state { struct xol_area *xol_area ; }; struct address_space; struct mem_cgroup; typedef void compound_page_dtor(struct page * ); union __anonunion____missing_field_name_148 { struct address_space *mapping ; void *s_mem ; }; union __anonunion____missing_field_name_150 { unsigned long index ; void *freelist ; bool pfmemalloc ; }; struct __anonstruct____missing_field_name_154 { unsigned short inuse ; unsigned short objects : 15 ; unsigned char frozen : 1 ; }; union __anonunion____missing_field_name_153 { atomic_t _mapcount ; struct __anonstruct____missing_field_name_154 __annonCompField38 ; int units ; }; struct __anonstruct____missing_field_name_152 { union __anonunion____missing_field_name_153 __annonCompField39 ; atomic_t _count ; }; union __anonunion____missing_field_name_151 { unsigned long counters ; struct __anonstruct____missing_field_name_152 __annonCompField40 ; unsigned int active ; }; struct __anonstruct____missing_field_name_149 { union __anonunion____missing_field_name_150 __annonCompField37 ; union __anonunion____missing_field_name_151 __annonCompField41 ; }; struct __anonstruct____missing_field_name_156 { struct page *next ; int pages ; int pobjects ; }; struct slab; struct __anonstruct____missing_field_name_157 { compound_page_dtor *compound_dtor ; unsigned long compound_order ; }; union __anonunion____missing_field_name_155 { struct list_head lru ; struct __anonstruct____missing_field_name_156 __annonCompField43 ; struct slab *slab_page ; struct callback_head callback_head ; struct __anonstruct____missing_field_name_157 __annonCompField44 ; pgtable_t pmd_huge_pte ; }; union __anonunion____missing_field_name_158 { unsigned long private ; spinlock_t *ptl ; struct kmem_cache *slab_cache ; struct page *first_page ; }; struct page { unsigned long flags ; union __anonunion____missing_field_name_148 __annonCompField36 ; struct __anonstruct____missing_field_name_149 __annonCompField42 ; union __anonunion____missing_field_name_155 __annonCompField45 ; union __anonunion____missing_field_name_158 __annonCompField46 ; struct mem_cgroup *mem_cgroup ; }; struct page_frag { struct page *page ; __u32 offset ; __u32 size ; }; struct __anonstruct_shared_159 { struct rb_node rb ; unsigned long rb_subtree_last ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; struct rb_node vm_rb ; unsigned long rb_subtree_gap ; struct mm_struct *vm_mm ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct __anonstruct_shared_159 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct task_rss_stat { int events ; int count[3U] ; }; struct mm_rss_stat { atomic_long_t count[3U] ; }; struct kioctx_table; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; u32 vmacache_seqnum ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; unsigned long mmap_base ; unsigned long mmap_legacy_base ; unsigned long task_size ; unsigned long highest_vm_end ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; atomic_long_t nr_ptes ; atomic_long_t nr_pmds ; int map_count ; spinlock_t page_table_lock ; struct rw_semaphore mmap_sem ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long pinned_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long def_flags ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[46U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_var_t cpu_vm_mask_var ; mm_context_t context ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct kioctx_table *ioctx_table ; struct task_struct *owner ; struct file *exe_file ; struct mmu_notifier_mm *mmu_notifier_mm ; struct cpumask cpumask_allocation ; unsigned long numa_next_scan ; unsigned long numa_scan_offset ; int numa_scan_seq ; bool tlb_flush_pending ; struct uprobes_state uprobes_state ; void *bd_addr ; }; typedef unsigned long cputime_t; struct __anonstruct_kuid_t_161 { uid_t val ; }; typedef struct __anonstruct_kuid_t_161 kuid_t; struct __anonstruct_kgid_t_162 { gid_t val ; }; typedef struct __anonstruct_kgid_t_162 kgid_t; struct sem_undo_list; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct user_struct; struct sysv_shm { struct list_head shm_clist ; }; struct __anonstruct_sigset_t_163 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_163 sigset_t; struct siginfo; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_165 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_166 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_167 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_168 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__addr_bnd_170 { void *_lower ; void *_upper ; }; struct __anonstruct__sigfault_169 { void *_addr ; short _addr_lsb ; struct __anonstruct__addr_bnd_170 _addr_bnd ; }; struct __anonstruct__sigpoll_171 { long _band ; int _fd ; }; struct __anonstruct__sigsys_172 { void *_call_addr ; int _syscall ; unsigned int _arch ; }; union __anonunion__sifields_164 { int _pad[28U] ; struct __anonstruct__kill_165 _kill ; struct __anonstruct__timer_166 _timer ; struct __anonstruct__rt_167 _rt ; struct __anonstruct__sigchld_168 _sigchld ; struct __anonstruct__sigfault_169 _sigfault ; struct __anonstruct__sigpoll_171 _sigpoll ; struct __anonstruct__sigsys_172 _sigsys ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_164 _sifields ; }; typedef struct siginfo siginfo_t; struct sigpending { struct list_head list ; sigset_t signal ; }; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct callback_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct percpu_counter { raw_spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct seccomp_filter; struct seccomp { int mode ; struct seccomp_filter *filter ; }; struct rt_mutex { raw_spinlock_t wait_lock ; struct rb_root waiters ; struct rb_node *waiters_leftmost ; struct task_struct *owner ; int save_state ; char const *name ; char const *file ; int line ; void *magic ; }; struct rt_mutex_waiter; struct rlimit { __kernel_ulong_t rlim_cur ; __kernel_ulong_t rlim_max ; }; struct timerqueue_node { struct rb_node node ; ktime_t expires ; }; struct timerqueue_head { struct rb_root head ; struct timerqueue_node *next ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct timerqueue_node node ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; int index ; clockid_t clockid ; struct timerqueue_head active ; ktime_t (*get_time)(void) ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; seqcount_t seq ; struct hrtimer *running ; unsigned int cpu ; unsigned int active_bases ; unsigned int clock_was_set_seq ; bool migration_enabled ; bool nohz_active ; unsigned char in_hrtirq : 1 ; unsigned char hres_active : 1 ; unsigned char hang_detected : 1 ; ktime_t expires_next ; struct hrtimer *next_timer ; unsigned int nr_events ; unsigned int nr_retries ; unsigned int nr_hangs ; unsigned int max_hang_time ; struct hrtimer_clock_base clock_base[4U] ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct assoc_array_ptr; struct assoc_array { struct assoc_array_ptr *root ; unsigned long nr_leaves_on_tree ; }; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct cred; struct key_type; struct keyring_index_key { struct key_type *type ; char const *description ; size_t desc_len ; }; union __anonunion____missing_field_name_179 { struct list_head graveyard_link ; struct rb_node serial_node ; }; struct key_user; union __anonunion____missing_field_name_180 { time_t expiry ; time_t revoked_at ; }; struct __anonstruct____missing_field_name_182 { struct key_type *type ; char *description ; }; union __anonunion____missing_field_name_181 { struct keyring_index_key index_key ; struct __anonstruct____missing_field_name_182 __annonCompField49 ; }; union __anonunion_type_data_183 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; int reject_error ; }; union __anonunion_payload_185 { unsigned long value ; void *rcudata ; void *data ; void *data2[2U] ; }; union __anonunion____missing_field_name_184 { union __anonunion_payload_185 payload ; struct assoc_array keys ; }; struct key { atomic_t usage ; key_serial_t serial ; union __anonunion____missing_field_name_179 __annonCompField47 ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion____missing_field_name_180 __annonCompField48 ; time_t last_used_at ; kuid_t uid ; kgid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; union __anonunion____missing_field_name_181 __annonCompField50 ; union __anonunion_type_data_183 type_data ; union __anonunion____missing_field_name_184 __annonCompField51 ; }; struct audit_context; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; kgid_t small_block[32U] ; kgid_t *blocks[0U] ; }; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; kuid_t uid ; kgid_t gid ; kuid_t suid ; kgid_t sgid ; kuid_t euid ; kgid_t egid ; kuid_t fsuid ; kgid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *session_keyring ; struct key *process_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; void *security ; struct user_struct *user ; struct user_namespace *user_ns ; struct group_info *group_info ; struct callback_head rcu ; }; union __anonunion____missing_field_name_186 { unsigned long bitmap[4U] ; struct callback_head callback_head ; }; struct idr_layer { int prefix ; int layer ; struct idr_layer *ary[256U] ; int count ; union __anonunion____missing_field_name_186 __annonCompField52 ; }; struct idr { struct idr_layer *hint ; struct idr_layer *top ; int layers ; int cur ; spinlock_t lock ; int id_free_cnt ; struct idr_layer *id_free ; }; struct ida_bitmap { long nr_busy ; unsigned long bitmap[15U] ; }; struct ida { struct idr idr ; struct ida_bitmap *free_bitmap ; }; struct percpu_ref; typedef void percpu_ref_func_t(struct percpu_ref * ); struct percpu_ref { atomic_long_t count ; unsigned long percpu_count_ptr ; percpu_ref_func_t *release ; percpu_ref_func_t *confirm_switch ; bool force_atomic ; struct callback_head rcu ; }; struct cgroup; struct cgroup_root; struct cgroup_subsys; struct cgroup_taskset; struct kernfs_node; struct kernfs_ops; struct kernfs_open_file; struct cgroup_subsys_state { struct cgroup *cgroup ; struct cgroup_subsys *ss ; struct percpu_ref refcnt ; struct cgroup_subsys_state *parent ; struct list_head sibling ; struct list_head children ; int id ; unsigned int flags ; u64 serial_nr ; struct callback_head callback_head ; struct work_struct destroy_work ; }; struct css_set { atomic_t refcount ; struct hlist_node hlist ; struct list_head tasks ; struct list_head mg_tasks ; struct list_head cgrp_links ; struct cgroup *dfl_cgrp ; struct cgroup_subsys_state *subsys[12U] ; struct list_head mg_preload_node ; struct list_head mg_node ; struct cgroup *mg_src_cgrp ; struct css_set *mg_dst_cset ; struct list_head e_cset_node[12U] ; struct callback_head callback_head ; }; struct cgroup { struct cgroup_subsys_state self ; unsigned long flags ; int id ; int populated_cnt ; struct kernfs_node *kn ; struct kernfs_node *procs_kn ; struct kernfs_node *populated_kn ; unsigned int subtree_control ; unsigned int child_subsys_mask ; struct cgroup_subsys_state *subsys[12U] ; struct cgroup_root *root ; struct list_head cset_links ; struct list_head e_csets[12U] ; struct list_head pidlists ; struct mutex pidlist_mutex ; wait_queue_head_t offline_waitq ; struct work_struct release_agent_work ; }; struct kernfs_root; struct cgroup_root { struct kernfs_root *kf_root ; unsigned int subsys_mask ; int hierarchy_id ; struct cgroup cgrp ; atomic_t nr_cgrps ; struct list_head root_list ; unsigned int flags ; struct idr cgroup_idr ; char release_agent_path[4096U] ; char name[64U] ; }; struct cftype { char name[64U] ; int private ; umode_t mode ; size_t max_write_len ; unsigned int flags ; struct cgroup_subsys *ss ; struct list_head node ; struct kernfs_ops *kf_ops ; u64 (*read_u64)(struct cgroup_subsys_state * , struct cftype * ) ; s64 (*read_s64)(struct cgroup_subsys_state * , struct cftype * ) ; int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; int (*write_u64)(struct cgroup_subsys_state * , struct cftype * , u64 ) ; int (*write_s64)(struct cgroup_subsys_state * , struct cftype * , s64 ) ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; struct lock_class_key lockdep_key ; }; struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state * ) ; int (*css_online)(struct cgroup_subsys_state * ) ; void (*css_offline)(struct cgroup_subsys_state * ) ; void (*css_released)(struct cgroup_subsys_state * ) ; void (*css_free)(struct cgroup_subsys_state * ) ; void (*css_reset)(struct cgroup_subsys_state * ) ; void (*css_e_css_changed)(struct cgroup_subsys_state * ) ; int (*can_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*cancel_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*fork)(struct task_struct * ) ; void (*exit)(struct cgroup_subsys_state * , struct cgroup_subsys_state * , struct task_struct * ) ; void (*bind)(struct cgroup_subsys_state * ) ; int disabled ; int early_init ; bool broken_hierarchy ; bool warned_broken_hierarchy ; int id ; char const *name ; struct cgroup_root *root ; struct idr css_idr ; struct list_head cfts ; struct cftype *dfl_cftypes ; struct cftype *legacy_cftypes ; unsigned int depends_on ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; struct nameidata; struct cfs_rq; struct task_group; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct cputime { cputime_t utime ; cputime_t stime ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct task_cputime_atomic { atomic64_t utime ; atomic64_t stime ; atomic64_t sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic ; int running ; }; struct autogroup; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; struct list_head thread_head ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; unsigned char is_child_subreaper : 1 ; unsigned char has_child_subreaper : 1 ; int posix_timer_id ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; struct autogroup *autogroup ; seqlock_t stats_lock ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; unsigned int audit_tty_log_passwd ; struct tty_audit_buf *tty_audit_buf ; oom_flags_t oom_flags ; short oom_score_adj ; short oom_score_adj_min ; struct mutex cred_guard_mutex ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t fanotify_listeners ; atomic_long_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; kuid_t uid ; atomic_long_t locked_vm ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; u64 blkio_start ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; u64 freepages_start ; u64 freepages_delay ; u32 freepages_count ; }; struct wake_q_node { struct wake_q_node *next ; }; struct io_context; struct pipe_inode_info; struct uts_namespace; struct load_weight { unsigned long weight ; u32 inv_weight ; }; struct sched_avg { u64 last_runnable_update ; s64 decay_count ; unsigned long load_avg_contrib ; unsigned long utilization_avg_contrib ; u32 runnable_avg_sum ; u32 avg_period ; u32 running_avg_sum ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; int depth ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; struct sched_avg avg ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned long watchdog_stamp ; unsigned int time_slice ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct sched_dl_entity { struct rb_node rb_node ; u64 dl_runtime ; u64 dl_deadline ; u64 dl_period ; u64 dl_bw ; s64 runtime ; u64 deadline ; unsigned int flags ; int dl_throttled ; int dl_new ; int dl_boosted ; int dl_yielded ; struct hrtimer dl_timer ; }; struct memcg_oom_info { struct mem_cgroup *memcg ; gfp_t gfp_mask ; int order ; unsigned char may_oom : 1 ; }; struct sched_class; struct files_struct; struct compat_robust_list_head; struct numa_group; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; struct llist_node wake_entry ; int on_cpu ; struct task_struct *last_wakee ; unsigned long wakee_flips ; unsigned long wakee_flip_decay_ts ; int wake_cpu ; int on_rq ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct task_group *sched_task_group ; struct sched_dl_entity dl ; struct hlist_head preempt_notifiers ; unsigned int policy ; int nr_cpus_allowed ; cpumask_t cpus_allowed ; unsigned long rcu_tasks_nvcsw ; bool rcu_tasks_holdout ; struct list_head rcu_tasks_holdout_list ; int rcu_tasks_idle_cpu ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct rb_node pushable_dl_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; u32 vmacache_seqnum ; struct vm_area_struct *vmacache[4U] ; struct task_rss_stat rss_stat ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned long jobctl ; unsigned int personality ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char sched_reset_on_fork : 1 ; unsigned char sched_contributes_to_load : 1 ; unsigned char sched_migrated : 1 ; unsigned char memcg_kmem_skip_account : 1 ; unsigned char brk_randomized : 1 ; unsigned long atomic_flags ; struct restart_block restart_block ; pid_t pid ; pid_t tgid ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct list_head thread_node ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; u64 start_time ; u64 real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; char comm[16U] ; struct nameidata *nameidata ; struct sysv_sem sysvsem ; struct sysv_shm sysvshm ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct callback_head *task_works ; struct audit_context *audit_context ; kuid_t loginuid ; unsigned int sessionid ; struct seccomp seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; raw_spinlock_t pi_lock ; struct wake_q_node wake_q ; struct rb_root pi_waiters ; struct rb_node *pi_waiters_leftmost ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct blk_plug *plug ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; seqcount_t mems_allowed_seq ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp[2U] ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; short pref_node_fork ; int numa_scan_seq ; unsigned int numa_scan_period ; unsigned int numa_scan_period_max ; int numa_preferred_nid ; unsigned long numa_migrate_retry ; u64 node_stamp ; u64 last_task_numa_placement ; u64 last_sum_exec_runtime ; struct callback_head numa_work ; struct list_head numa_entry ; struct numa_group *numa_group ; unsigned long *numa_faults ; unsigned long total_numa_faults ; unsigned long numa_faults_locality[3U] ; unsigned long numa_pages_migrated ; struct callback_head rcu ; struct pipe_inode_info *splice_pipe ; struct page_frag task_frag ; struct task_delay_info *delays ; int make_it_fail ; int nr_dirtied ; int nr_dirtied_pause ; unsigned long dirty_paused_when ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; unsigned int kasan_depth ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_oom_info memcg_oom ; struct uprobe_task *utask ; unsigned int sequential_io ; unsigned int sequential_io_avg ; unsigned long task_state_change ; int pagefault_disabled ; }; enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; typedef enum irqreturn irqreturn_t; struct e1000_hw; struct ethtool_ringparam; struct ethtool_pauseparam; struct ethtool_wolinfo; struct ethtool_eeprom; struct ethtool_coalesce; struct ethtool_eee; struct ethtool_cmd; struct ethtool_rxnfc; struct ethtool_channels; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; kuid_t uid ; kgid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; struct iattr; struct super_block; struct file_system_type; struct kernfs_open_node; struct kernfs_iattrs; struct kernfs_elem_dir { unsigned long subdirs ; struct rb_root children ; struct kernfs_root *root ; }; struct kernfs_elem_symlink { struct kernfs_node *target_kn ; }; struct kernfs_elem_attr { struct kernfs_ops const *ops ; struct kernfs_open_node *open ; loff_t size ; struct kernfs_node *notify_next ; }; union __anonunion____missing_field_name_209 { struct kernfs_elem_dir dir ; struct kernfs_elem_symlink symlink ; struct kernfs_elem_attr attr ; }; struct kernfs_node { atomic_t count ; atomic_t active ; struct lockdep_map dep_map ; struct kernfs_node *parent ; char const *name ; struct rb_node rb ; void const *ns ; unsigned int hash ; union __anonunion____missing_field_name_209 __annonCompField56 ; void *priv ; unsigned short flags ; umode_t mode ; unsigned int ino ; struct kernfs_iattrs *iattr ; }; struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root * , int * , char * ) ; int (*show_options)(struct seq_file * , struct kernfs_root * ) ; int (*mkdir)(struct kernfs_node * , char const * , umode_t ) ; int (*rmdir)(struct kernfs_node * ) ; int (*rename)(struct kernfs_node * , struct kernfs_node * , char const * ) ; }; struct kernfs_root { struct kernfs_node *kn ; unsigned int flags ; struct ida ino_ida ; struct kernfs_syscall_ops *syscall_ops ; struct list_head supers ; wait_queue_head_t deactivate_waitq ; }; struct kernfs_open_file { struct kernfs_node *kn ; struct file *file ; void *priv ; struct mutex mutex ; int event ; struct list_head list ; char *prealloc_buf ; size_t atomic_write_len ; bool mmapped ; struct vm_operations_struct const *vm_ops ; }; struct kernfs_ops { int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; ssize_t (*read)(struct kernfs_open_file * , char * , size_t , loff_t ) ; size_t atomic_write_len ; bool prealloc ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; int (*mmap)(struct kernfs_open_file * , struct vm_area_struct * ) ; struct lock_class_key lockdep_key ; }; struct sock; struct kobject; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct kobj_ns_type_operations { enum kobj_ns_type type ; bool (*current_may_mount)(void) ; void *(*grab_current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; void (*drop_ns)(void * ) ; }; struct bin_attribute; struct attribute { char const *name ; umode_t mode ; bool ignore_lockdep ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; umode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; struct bin_attribute **bin_attrs ; }; struct bin_attribute { struct attribute attr ; size_t size ; void *private ; ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; }; struct kref { atomic_t refcount ; }; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct kernfs_node *sd ; struct kref kref ; struct delayed_work release ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *argv[3U] ; char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct kernel_param; struct kernel_param_ops { unsigned int flags ; int (*set)(char const * , struct kernel_param const * ) ; int (*get)(char * , struct kernel_param const * ) ; void (*free)(void * ) ; }; struct kparam_string; struct kparam_array; union __anonunion____missing_field_name_210 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; struct module *mod ; struct kernel_param_ops const *ops ; u16 const perm ; s8 level ; u8 flags ; union __anonunion____missing_field_name_210 __annonCompField57 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int elemsize ; unsigned int *num ; struct kernel_param_ops const *ops ; void *elem ; }; struct latch_tree_node { struct rb_node node[2U] ; }; struct mod_arch_specific { }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; struct completion *kobj_completion ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct exception_table_entry; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; struct mod_tree_node { struct module *mod ; struct latch_tree_node node ; }; struct module_sect_attrs; struct module_notes_attrs; struct tracepoint; struct trace_event_call; struct trace_enum_map; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct mutex param_lock ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; bool sig_ok ; bool async_probe_requested ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; struct mod_tree_node mtn_core ; struct mod_tree_node mtn_init ; unsigned int init_ro_size ; unsigned int core_ro_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; char *args ; void *percpu ; unsigned int percpu_size ; unsigned int num_tracepoints ; struct tracepoint * const *tracepoints_ptrs ; unsigned int num_trace_bprintk_fmt ; char const **trace_bprintk_fmt_start ; struct trace_event_call **trace_events ; unsigned int num_trace_events ; struct trace_enum_map **trace_enums ; unsigned int num_trace_enums ; bool klp_alive ; struct list_head source_list ; struct list_head target_list ; void (*exit)(void) ; atomic_t refcnt ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; struct shrink_control { gfp_t gfp_mask ; unsigned long nr_to_scan ; int nid ; struct mem_cgroup *memcg ; }; struct shrinker { unsigned long (*count_objects)(struct shrinker * , struct shrink_control * ) ; unsigned long (*scan_objects)(struct shrinker * , struct shrink_control * ) ; int seeks ; long batch ; unsigned long flags ; struct list_head list ; atomic_long_t *nr_deferred ; }; struct file_ra_state; struct writeback_control; struct bdi_writeback; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *cow_page ; struct page *page ; unsigned long max_pgoff ; pte_t *pte ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; void (*map_pages)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*pfn_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; char const *(*name)(struct vm_area_struct * ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; struct page *(*find_special_page)(struct vm_area_struct * , unsigned long ) ; }; struct kvec; struct hlist_bl_node; struct hlist_bl_head { struct hlist_bl_node *first ; }; struct hlist_bl_node { struct hlist_bl_node *next ; struct hlist_bl_node **pprev ; }; struct __anonstruct____missing_field_name_220 { spinlock_t lock ; int count ; }; union __anonunion____missing_field_name_219 { struct __anonstruct____missing_field_name_220 __annonCompField58 ; }; struct lockref { union __anonunion____missing_field_name_219 __annonCompField59 ; }; struct path; struct vfsmount; struct __anonstruct____missing_field_name_222 { u32 hash ; u32 len ; }; union __anonunion____missing_field_name_221 { struct __anonstruct____missing_field_name_222 __annonCompField60 ; u64 hash_len ; }; struct qstr { union __anonunion____missing_field_name_221 __annonCompField61 ; unsigned char const *name ; }; struct dentry_operations; union __anonunion_d_u_223 { struct hlist_node d_alias ; struct callback_head d_rcu ; }; struct dentry { unsigned int d_flags ; seqcount_t d_seq ; struct hlist_bl_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct inode *d_inode ; unsigned char d_iname[32U] ; struct lockref d_lockref ; struct dentry_operations const *d_op ; struct super_block *d_sb ; unsigned long d_time ; void *d_fsdata ; struct list_head d_lru ; struct list_head d_child ; struct list_head d_subdirs ; union __anonunion_d_u_223 d_u ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , unsigned int ) ; int (*d_weak_revalidate)(struct dentry * , unsigned int ) ; int (*d_hash)(struct dentry const * , struct qstr * ) ; int (*d_compare)(struct dentry const * , struct dentry const * , unsigned int , char const * , struct qstr const * ) ; int (*d_delete)(struct dentry const * ) ; void (*d_release)(struct dentry * ) ; void (*d_prune)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; struct vfsmount *(*d_automount)(struct path * ) ; int (*d_manage)(struct dentry * , bool ) ; struct inode *(*d_select_inode)(struct dentry * , unsigned int ) ; }; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct list_lru_one { struct list_head list ; long nr_items ; }; struct list_lru_memcg { struct list_lru_one *lru[0U] ; }; struct list_lru_node { spinlock_t lock ; struct list_lru_one lru ; struct list_lru_memcg *memcg_lrus ; }; struct list_lru { struct list_lru_node *node ; struct list_head list ; }; struct __anonstruct____missing_field_name_227 { struct radix_tree_node *parent ; void *private_data ; }; union __anonunion____missing_field_name_226 { struct __anonstruct____missing_field_name_227 __annonCompField62 ; struct callback_head callback_head ; }; struct radix_tree_node { unsigned int path ; unsigned int count ; union __anonunion____missing_field_name_226 __annonCompField63 ; struct list_head private_list ; void *slots[64U] ; unsigned long tags[3U][1U] ; }; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; struct block_device; struct bio_vec { struct page *bv_page ; unsigned int bv_len ; unsigned int bv_offset ; }; struct export_operations; struct iovec; struct kiocb; struct poll_table_struct; struct kstatfs; struct swap_info_struct; struct iov_iter; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; kuid_t ia_uid ; kgid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct dquot; typedef __kernel_uid32_t projid_t; struct __anonstruct_kprojid_t_231 { projid_t val ; }; typedef struct __anonstruct_kprojid_t_231 kprojid_t; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; typedef long long qsize_t; union __anonunion____missing_field_name_232 { kuid_t uid ; kgid_t gid ; kprojid_t projid ; }; struct kqid { union __anonunion____missing_field_name_232 __annonCompField65 ; enum quota_type type ; }; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_max_spc_limit ; qsize_t dqi_max_ino_limit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; struct kqid dq_id ; loff_t dq_off ; unsigned long dq_flags ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; int (*get_projid)(struct inode * , kprojid_t * ) ; }; struct qc_dqblk { int d_fieldmask ; u64 d_spc_hardlimit ; u64 d_spc_softlimit ; u64 d_ino_hardlimit ; u64 d_ino_softlimit ; u64 d_space ; u64 d_ino_count ; s64 d_ino_timer ; s64 d_spc_timer ; int d_ino_warns ; int d_spc_warns ; u64 d_rt_spc_hardlimit ; u64 d_rt_spc_softlimit ; u64 d_rt_space ; s64 d_rt_spc_timer ; int d_rt_spc_warns ; }; struct qc_type_state { unsigned int flags ; unsigned int spc_timelimit ; unsigned int ino_timelimit ; unsigned int rt_spc_timelimit ; unsigned int spc_warnlimit ; unsigned int ino_warnlimit ; unsigned int rt_spc_warnlimit ; unsigned long long ino ; blkcnt_t blocks ; blkcnt_t nextents ; }; struct qc_state { unsigned int s_incoredqs ; struct qc_type_state s_state[3U] ; }; struct qc_info { int i_fieldmask ; unsigned int i_flags ; unsigned int i_spc_timelimit ; unsigned int i_ino_timelimit ; unsigned int i_rt_spc_timelimit ; unsigned int i_spc_warnlimit ; unsigned int i_ino_warnlimit ; unsigned int i_rt_spc_warnlimit ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , struct path * ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_enable)(struct super_block * , unsigned int ) ; int (*quota_disable)(struct super_block * , unsigned int ) ; int (*quota_sync)(struct super_block * , int ) ; int (*set_info)(struct super_block * , int , struct qc_info * ) ; int (*get_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*set_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*get_state)(struct super_block * , struct qc_state * ) ; int (*rm_xquota)(struct super_block * , unsigned int ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct inode *files[3U] ; struct mem_dqinfo info[3U] ; struct quota_format_ops const *ops[3U] ; }; struct kiocb { struct file *ki_filp ; loff_t ki_pos ; void (*ki_complete)(struct kiocb * , long , long ) ; void *private ; int ki_flags ; }; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned int , unsigned int ) ; int (*releasepage)(struct page * , gfp_t ) ; void (*freepage)(struct page * ) ; ssize_t (*direct_IO)(struct kiocb * , struct iov_iter * , loff_t ) ; int (*migratepage)(struct address_space * , struct page * , struct page * , enum migrate_mode ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , unsigned long , unsigned long ) ; void (*is_dirty_writeback)(struct page * , bool * , bool * ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; int (*swap_activate)(struct swap_info_struct * , struct file * , sector_t * ) ; void (*swap_deactivate)(struct file * ) ; }; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; atomic_t i_mmap_writable ; struct rb_root i_mmap ; struct rw_semaphore i_mmap_rwsem ; unsigned long nrpages ; unsigned long nrshadows ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; spinlock_t private_lock ; struct list_head private_list ; void *private_data ; }; struct request_queue; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; int bd_openers ; struct inode *bd_inode ; struct super_block *bd_super ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; bool bd_write_holder ; struct list_head bd_holder_disks ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct request_queue *bd_queue ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; }; struct posix_acl; struct inode_operations; union __anonunion____missing_field_name_235 { unsigned int const i_nlink ; unsigned int __i_nlink ; }; union __anonunion____missing_field_name_236 { struct hlist_head i_dentry ; struct callback_head i_rcu ; }; struct file_lock_context; struct cdev; union __anonunion____missing_field_name_237 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; char *i_link ; }; struct inode { umode_t i_mode ; unsigned short i_opflags ; kuid_t i_uid ; kgid_t i_gid ; unsigned int i_flags ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; struct inode_operations const *i_op ; struct super_block *i_sb ; struct address_space *i_mapping ; void *i_security ; unsigned long i_ino ; union __anonunion____missing_field_name_235 __annonCompField66 ; dev_t i_rdev ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; spinlock_t i_lock ; unsigned short i_bytes ; unsigned int i_blkbits ; blkcnt_t i_blocks ; unsigned long i_state ; struct mutex i_mutex ; unsigned long dirtied_when ; unsigned long dirtied_time_when ; struct hlist_node i_hash ; struct list_head i_wb_list ; struct bdi_writeback *i_wb ; int i_wb_frn_winner ; u16 i_wb_frn_avg_time ; u16 i_wb_frn_history ; struct list_head i_lru ; struct list_head i_sb_list ; union __anonunion____missing_field_name_236 __annonCompField67 ; u64 i_version ; atomic_t i_count ; atomic_t i_dio_count ; atomic_t i_writecount ; atomic_t i_readcount ; struct file_operations const *i_fop ; struct file_lock_context *i_flctx ; struct address_space i_data ; struct list_head i_devices ; union __anonunion____missing_field_name_237 __annonCompField68 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_marks ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; kuid_t uid ; kuid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_238 { struct llist_node fu_llist ; struct callback_head fu_rcuhead ; }; struct file { union __anonunion_f_u_238 f_u ; struct path f_path ; struct inode *f_inode ; struct file_operations const *f_op ; spinlock_t f_lock ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; struct mutex f_pos_lock ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct list_head f_tfile_llink ; struct address_space *f_mapping ; }; typedef void *fl_owner_t; struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock * , struct file_lock * ) ; unsigned long (*lm_owner_key)(struct file_lock * ) ; fl_owner_t (*lm_get_owner)(fl_owner_t ) ; void (*lm_put_owner)(fl_owner_t ) ; void (*lm_notify)(struct file_lock * ) ; int (*lm_grant)(struct file_lock * , int ) ; bool (*lm_break)(struct file_lock * ) ; int (*lm_change)(struct file_lock * , int , struct list_head * ) ; void (*lm_setup)(struct file_lock * , void ** ) ; }; struct net; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_240 { struct list_head link ; int state ; }; union __anonunion_fl_u_239 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_240 afs ; }; struct file_lock { struct file_lock *fl_next ; struct list_head fl_list ; struct hlist_node fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned int fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; int fl_link_cpu ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; unsigned long fl_downgrade_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_239 fl_u ; }; struct file_lock_context { spinlock_t flc_lock ; struct list_head flc_flock ; struct list_head flc_posix ; struct list_head flc_lease ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct callback_head fa_rcu ; }; struct sb_writers { struct percpu_counter counter[3U] ; wait_queue_head_t wait ; int frozen ; wait_queue_head_t wait_unfrozen ; struct lockdep_map lock_map[3U] ; }; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_iflags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_bl_head s_anon ; struct list_head s_mounts ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct hlist_node s_instances ; unsigned int s_quota_types ; struct quota_info s_dquot ; struct sb_writers s_writers ; char s_id[32U] ; u8 s_uuid[16U] ; void *s_fs_info ; unsigned int s_max_links ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; struct dentry_operations const *s_d_op ; int cleancache_poolid ; struct shrinker s_shrink ; atomic_long_t s_remove_count ; int s_readonly_remount ; struct workqueue_struct *s_dio_done_wq ; struct hlist_head s_pins ; struct list_lru s_dentry_lru ; struct list_lru s_inode_lru ; struct callback_head rcu ; int s_stack_depth ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct dir_context; struct dir_context { int (*actor)(struct dir_context * , char const * , int , loff_t , u64 , unsigned int ) ; loff_t pos ; }; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*read_iter)(struct kiocb * , struct iov_iter * ) ; ssize_t (*write_iter)(struct kiocb * , struct iov_iter * ) ; int (*iterate)(struct file * , struct dir_context * ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*mremap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , loff_t , loff_t , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** , void ** ) ; long (*fallocate)(struct file * , int , loff_t , loff_t ) ; void (*show_fdinfo)(struct seq_file * , struct file * ) ; }; struct inode_operations { struct dentry *(*lookup)(struct inode * , struct dentry * , unsigned int ) ; char const *(*follow_link)(struct dentry * , void ** ) ; int (*permission)(struct inode * , int ) ; struct posix_acl *(*get_acl)(struct inode * , int ) ; int (*readlink)(struct dentry * , char * , int ) ; void (*put_link)(struct inode * , void * ) ; int (*create)(struct inode * , struct dentry * , umode_t , bool ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , umode_t ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , umode_t , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; int (*rename2)(struct inode * , struct dentry * , struct inode * , struct dentry * , unsigned int ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; int (*update_time)(struct inode * , struct timespec * , int ) ; int (*atomic_open)(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; int (*tmpfile)(struct inode * , struct dentry * , umode_t ) ; int (*set_acl)(struct inode * , struct posix_acl * , int ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * , int ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; int (*drop_inode)(struct inode * ) ; void (*evict_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_super)(struct super_block * ) ; int (*freeze_fs)(struct super_block * ) ; int (*thaw_super)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct dentry * ) ; int (*show_devname)(struct seq_file * , struct dentry * ) ; int (*show_path)(struct seq_file * , struct dentry * ) ; int (*show_stats)(struct seq_file * , struct dentry * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; struct dquot **(*get_dquots)(struct inode * ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; long (*nr_cached_objects)(struct super_block * , struct shrink_control * ) ; long (*free_cached_objects)(struct super_block * , struct shrink_control * ) ; }; struct file_system_type { char const *name ; int fs_flags ; struct dentry *(*mount)(struct file_system_type * , int , char const * , void * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct hlist_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key s_writers_key[3U] ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; }; struct exception_table_entry { int insn ; int fixup ; }; struct proc_dir_entry; struct klist_node; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct seq_file { char *buf ; size_t size ; size_t from ; size_t count ; size_t pad_until ; loff_t index ; loff_t read_pos ; u64 version ; struct mutex lock ; struct seq_operations const *op ; int poll_event ; struct user_namespace *user_ns ; void *private ; }; struct seq_operations { void *(*start)(struct seq_file * , loff_t * ) ; void (*stop)(struct seq_file * , void * ) ; void *(*next)(struct seq_file * , void * , loff_t * ) ; int (*show)(struct seq_file * , void * ) ; }; struct pinctrl; struct pinctrl_state; struct dev_pin_info { struct pinctrl *p ; struct pinctrl_state *default_state ; struct pinctrl_state *sleep_state ; struct pinctrl_state *idle_state ; }; struct dma_map_ops; struct dev_archdata { struct dma_map_ops *dma_ops ; void *iommu ; }; struct device_private; struct device_driver; struct driver_private; struct class; struct subsys_private; struct bus_type; struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; struct device_attribute; struct bus_type { char const *name ; char const *dev_name ; struct device *dev_root ; struct device_attribute *dev_attrs ; struct attribute_group const **bus_groups ; struct attribute_group const **dev_groups ; struct attribute_group const **drv_groups ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*online)(struct device * ) ; int (*offline)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct iommu_ops const *iommu_ops ; struct subsys_private *p ; struct lock_class_key lock_key ; }; struct device_type; enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; struct of_device_id; struct acpi_device_id; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; enum probe_type probe_type ; struct of_device_id const *of_match_table ; struct acpi_device_id const *acpi_match_table ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct attribute_group const **dev_groups ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct subsys_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * , kuid_t * , kgid_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct dma_coherent_mem; struct cma; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type const *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; void *driver_data ; struct dev_pm_info power ; struct dev_pm_domain *pm_domain ; struct dev_pin_info *pins ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; unsigned long dma_pfn_offset ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct cma *cma_area ; struct dev_archdata archdata ; struct device_node *of_node ; struct fwnode_handle *fwnode ; dev_t devt ; u32 id ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; struct iommu_group *iommu_group ; bool offline_disabled ; bool offline ; }; struct wakeup_source { char const *name ; struct list_head entry ; spinlock_t lock ; struct wake_irq *wakeirq ; struct timer_list timer ; unsigned long timer_expires ; ktime_t total_time ; ktime_t max_time ; ktime_t last_time ; ktime_t start_prevent_time ; ktime_t prevent_sleep_time ; unsigned long event_count ; unsigned long active_count ; unsigned long relax_count ; unsigned long expire_count ; unsigned long wakeup_count ; bool active ; bool autosleep_enabled ; }; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; struct kvec { void *iov_base ; size_t iov_len ; }; union __anonunion____missing_field_name_249 { struct iovec const *iov ; struct kvec const *kvec ; struct bio_vec const *bvec ; }; struct iov_iter { int type ; size_t iov_offset ; size_t count ; union __anonunion____missing_field_name_249 __annonCompField76 ; unsigned long nr_segs ; }; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct sg_table { struct scatterlist *sgl ; unsigned int nents ; unsigned int orig_nents ; }; struct dql { unsigned int num_queued ; unsigned int adj_limit ; unsigned int last_obj_cnt ; unsigned int limit ; unsigned int num_completed ; unsigned int prev_ovlimit ; unsigned int prev_num_queued ; unsigned int prev_last_obj_cnt ; unsigned int lowest_slack ; unsigned long slack_start_time ; unsigned int max_limit ; unsigned int min_limit ; unsigned int slack_hold_time ; }; typedef unsigned short __kernel_sa_family_t; typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family ; char sa_data[14U] ; }; struct msghdr { void *msg_name ; int msg_namelen ; struct iov_iter msg_iter ; void *msg_control ; __kernel_size_t msg_controllen ; unsigned int msg_flags ; struct kiocb *msg_iocb ; }; struct __anonstruct_sync_serial_settings_251 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; }; typedef struct __anonstruct_sync_serial_settings_251 sync_serial_settings; struct __anonstruct_te1_settings_252 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; unsigned int slot_map ; }; typedef struct __anonstruct_te1_settings_252 te1_settings; struct __anonstruct_raw_hdlc_proto_253 { unsigned short encoding ; unsigned short parity ; }; typedef struct __anonstruct_raw_hdlc_proto_253 raw_hdlc_proto; struct __anonstruct_fr_proto_254 { unsigned int t391 ; unsigned int t392 ; unsigned int n391 ; unsigned int n392 ; unsigned int n393 ; unsigned short lmi ; unsigned short dce ; }; typedef struct __anonstruct_fr_proto_254 fr_proto; struct __anonstruct_fr_proto_pvc_255 { unsigned int dlci ; }; typedef struct __anonstruct_fr_proto_pvc_255 fr_proto_pvc; struct __anonstruct_fr_proto_pvc_info_256 { unsigned int dlci ; char master[16U] ; }; typedef struct __anonstruct_fr_proto_pvc_info_256 fr_proto_pvc_info; struct __anonstruct_cisco_proto_257 { unsigned int interval ; unsigned int timeout ; }; typedef struct __anonstruct_cisco_proto_257 cisco_proto; struct ifmap { unsigned long mem_start ; unsigned long mem_end ; unsigned short base_addr ; unsigned char irq ; unsigned char dma ; unsigned char port ; }; union __anonunion_ifs_ifsu_258 { raw_hdlc_proto *raw_hdlc ; cisco_proto *cisco ; fr_proto *fr ; fr_proto_pvc *fr_pvc ; fr_proto_pvc_info *fr_pvc_info ; sync_serial_settings *sync ; te1_settings *te1 ; }; struct if_settings { unsigned int type ; unsigned int size ; union __anonunion_ifs_ifsu_258 ifs_ifsu ; }; union __anonunion_ifr_ifrn_259 { char ifrn_name[16U] ; }; union __anonunion_ifr_ifru_260 { struct sockaddr ifru_addr ; struct sockaddr ifru_dstaddr ; struct sockaddr ifru_broadaddr ; struct sockaddr ifru_netmask ; struct sockaddr ifru_hwaddr ; short ifru_flags ; int ifru_ivalue ; int ifru_mtu ; struct ifmap ifru_map ; char ifru_slave[16U] ; char ifru_newname[16U] ; void *ifru_data ; struct if_settings ifru_settings ; }; struct ifreq { union __anonunion_ifr_ifrn_259 ifr_ifrn ; union __anonunion_ifr_ifru_260 ifr_ifru ; }; typedef s32 compat_time_t; typedef s32 compat_long_t; typedef u32 compat_uptr_t; struct compat_timespec { compat_time_t tv_sec ; s32 tv_nsec ; }; struct compat_robust_list { compat_uptr_t next ; }; struct compat_robust_list_head { struct compat_robust_list list ; compat_long_t futex_offset ; compat_uptr_t list_op_pending ; }; enum ldv_25570 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; typedef enum ldv_25570 socket_state; struct socket_wq { wait_queue_head_t wait ; struct fasync_struct *fasync_list ; struct callback_head rcu ; }; struct proto_ops; struct socket { socket_state state ; short type ; unsigned long flags ; struct socket_wq *wq ; struct file *file ; struct sock *sk ; struct proto_ops const *ops ; }; struct proto_ops { int family ; struct module *owner ; int (*release)(struct socket * ) ; int (*bind)(struct socket * , struct sockaddr * , int ) ; int (*connect)(struct socket * , struct sockaddr * , int , int ) ; int (*socketpair)(struct socket * , struct socket * ) ; int (*accept)(struct socket * , struct socket * , int ) ; int (*getname)(struct socket * , struct sockaddr * , int * , int ) ; unsigned int (*poll)(struct file * , struct socket * , struct poll_table_struct * ) ; int (*ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*listen)(struct socket * , int ) ; int (*shutdown)(struct socket * , int ) ; int (*setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct socket * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct socket * , int , int , char * , int * ) ; int (*sendmsg)(struct socket * , struct msghdr * , size_t ) ; int (*recvmsg)(struct socket * , struct msghdr * , size_t , int ) ; int (*mmap)(struct file * , struct socket * , struct vm_area_struct * ) ; ssize_t (*sendpage)(struct socket * , struct page * , int , size_t , int ) ; ssize_t (*splice_read)(struct socket * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*set_peek_off)(struct sock * , int ) ; }; struct in6_addr; struct sk_buff; struct dma_attrs { unsigned long flags[1U] ; }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; struct dma_map_ops { void *(*alloc)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; void (*free)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; int (*mmap)(struct device * , struct vm_area_struct * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; int (*get_sgtable)(struct device * , struct sg_table * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; dma_addr_t (*map_page)(struct device * , struct page * , unsigned long , size_t , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_page)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ) ; int (*map_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*sync_single_for_cpu)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_single_for_device)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; void (*sync_sg_for_device)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; int (*mapping_error)(struct device * , dma_addr_t ) ; int (*dma_supported)(struct device * , u64 ) ; int (*set_dma_mask)(struct device * , u64 ) ; int is_phys ; }; typedef u64 netdev_features_t; union __anonunion_in6_u_276 { __u8 u6_addr8[16U] ; __be16 u6_addr16[8U] ; __be32 u6_addr32[4U] ; }; struct in6_addr { union __anonunion_in6_u_276 in6_u ; }; struct ethhdr { unsigned char h_dest[6U] ; unsigned char h_source[6U] ; __be16 h_proto ; }; struct pipe_buf_operations; struct pipe_buffer { struct page *page ; unsigned int offset ; unsigned int len ; struct pipe_buf_operations const *ops ; unsigned int flags ; unsigned long private ; }; struct pipe_inode_info { struct mutex mutex ; wait_queue_head_t wait ; unsigned int nrbufs ; unsigned int curbuf ; unsigned int buffers ; unsigned int readers ; unsigned int writers ; unsigned int files ; unsigned int waiting_writers ; unsigned int r_counter ; unsigned int w_counter ; struct page *tmp_page ; struct fasync_struct *fasync_readers ; struct fasync_struct *fasync_writers ; struct pipe_buffer *bufs ; }; struct pipe_buf_operations { int can_merge ; int (*confirm)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*release)(struct pipe_inode_info * , struct pipe_buffer * ) ; int (*steal)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*get)(struct pipe_inode_info * , struct pipe_buffer * ) ; }; struct napi_struct; struct nf_conntrack { atomic_t use ; }; union __anonunion____missing_field_name_281 { struct net_device *physoutdev ; char neigh_header[8U] ; }; union __anonunion____missing_field_name_282 { __be32 ipv4_daddr ; struct in6_addr ipv6_daddr ; }; struct nf_bridge_info { atomic_t use ; unsigned char orig_proto ; bool pkt_otherhost ; __u16 frag_max_size ; unsigned int mask ; struct net_device *physindev ; union __anonunion____missing_field_name_281 __annonCompField80 ; union __anonunion____missing_field_name_282 __annonCompField81 ; }; struct sk_buff_head { struct sk_buff *next ; struct sk_buff *prev ; __u32 qlen ; spinlock_t lock ; }; struct skb_frag_struct; typedef struct skb_frag_struct skb_frag_t; struct __anonstruct_page_283 { struct page *p ; }; struct skb_frag_struct { struct __anonstruct_page_283 page ; __u32 page_offset ; __u32 size ; }; struct skb_shared_hwtstamps { ktime_t hwtstamp ; }; struct skb_shared_info { unsigned char nr_frags ; __u8 tx_flags ; unsigned short gso_size ; unsigned short gso_segs ; unsigned short gso_type ; struct sk_buff *frag_list ; struct skb_shared_hwtstamps hwtstamps ; u32 tskey ; __be32 ip6_frag_id ; atomic_t dataref ; void *destructor_arg ; skb_frag_t frags[17U] ; }; typedef unsigned int sk_buff_data_t; struct __anonstruct____missing_field_name_285 { u32 stamp_us ; u32 stamp_jiffies ; }; union __anonunion____missing_field_name_284 { u64 v64 ; struct __anonstruct____missing_field_name_285 __annonCompField82 ; }; struct skb_mstamp { union __anonunion____missing_field_name_284 __annonCompField83 ; }; union __anonunion____missing_field_name_288 { ktime_t tstamp ; struct skb_mstamp skb_mstamp ; }; struct __anonstruct____missing_field_name_287 { struct sk_buff *next ; struct sk_buff *prev ; union __anonunion____missing_field_name_288 __annonCompField84 ; }; union __anonunion____missing_field_name_286 { struct __anonstruct____missing_field_name_287 __annonCompField85 ; struct rb_node rbnode ; }; struct sec_path; struct __anonstruct____missing_field_name_290 { __u16 csum_start ; __u16 csum_offset ; }; union __anonunion____missing_field_name_289 { __wsum csum ; struct __anonstruct____missing_field_name_290 __annonCompField87 ; }; union __anonunion____missing_field_name_291 { unsigned int napi_id ; unsigned int sender_cpu ; }; union __anonunion____missing_field_name_292 { __u32 mark ; __u32 reserved_tailroom ; }; union __anonunion____missing_field_name_293 { __be16 inner_protocol ; __u8 inner_ipproto ; }; struct sk_buff { union __anonunion____missing_field_name_286 __annonCompField86 ; struct sock *sk ; struct net_device *dev ; char cb[48U] ; unsigned long _skb_refdst ; void (*destructor)(struct sk_buff * ) ; struct sec_path *sp ; struct nf_conntrack *nfct ; struct nf_bridge_info *nf_bridge ; unsigned int len ; unsigned int data_len ; __u16 mac_len ; __u16 hdr_len ; __u16 queue_mapping ; unsigned char cloned : 1 ; unsigned char nohdr : 1 ; unsigned char fclone : 2 ; unsigned char peeked : 1 ; unsigned char head_frag : 1 ; unsigned char xmit_more : 1 ; __u32 headers_start[0U] ; __u8 __pkt_type_offset[0U] ; unsigned char pkt_type : 3 ; unsigned char pfmemalloc : 1 ; unsigned char ignore_df : 1 ; unsigned char nfctinfo : 3 ; unsigned char nf_trace : 1 ; unsigned char ip_summed : 2 ; unsigned char ooo_okay : 1 ; unsigned char l4_hash : 1 ; unsigned char sw_hash : 1 ; unsigned char wifi_acked_valid : 1 ; unsigned char wifi_acked : 1 ; unsigned char no_fcs : 1 ; unsigned char encapsulation : 1 ; unsigned char encap_hdr_csum : 1 ; unsigned char csum_valid : 1 ; unsigned char csum_complete_sw : 1 ; unsigned char csum_level : 2 ; unsigned char csum_bad : 1 ; unsigned char ndisc_nodetype : 2 ; unsigned char ipvs_property : 1 ; unsigned char inner_protocol_type : 1 ; unsigned char remcsum_offload : 1 ; __u16 tc_index ; __u16 tc_verd ; union __anonunion____missing_field_name_289 __annonCompField88 ; __u32 priority ; int skb_iif ; __u32 hash ; __be16 vlan_proto ; __u16 vlan_tci ; union __anonunion____missing_field_name_291 __annonCompField89 ; __u32 secmark ; union __anonunion____missing_field_name_292 __annonCompField90 ; union __anonunion____missing_field_name_293 __annonCompField91 ; __u16 inner_transport_header ; __u16 inner_network_header ; __u16 inner_mac_header ; __be16 protocol ; __u16 transport_header ; __u16 network_header ; __u16 mac_header ; __u32 headers_end[0U] ; sk_buff_data_t tail ; sk_buff_data_t end ; unsigned char *head ; unsigned char *data ; unsigned int truesize ; atomic_t users ; }; struct dst_entry; struct rtable; enum pkt_hash_types { PKT_HASH_TYPE_NONE = 0, PKT_HASH_TYPE_L2 = 1, PKT_HASH_TYPE_L3 = 2, PKT_HASH_TYPE_L4 = 3 } ; struct ethtool_cmd { __u32 cmd ; __u32 supported ; __u32 advertising ; __u16 speed ; __u8 duplex ; __u8 port ; __u8 phy_address ; __u8 transceiver ; __u8 autoneg ; __u8 mdio_support ; __u32 maxtxpkt ; __u32 maxrxpkt ; __u16 speed_hi ; __u8 eth_tp_mdix ; __u8 eth_tp_mdix_ctrl ; __u32 lp_advertising ; __u32 reserved[2U] ; }; struct ethtool_drvinfo { __u32 cmd ; char driver[32U] ; char version[32U] ; char fw_version[32U] ; char bus_info[32U] ; char erom_version[32U] ; char reserved2[12U] ; __u32 n_priv_flags ; __u32 n_stats ; __u32 testinfo_len ; __u32 eedump_len ; __u32 regdump_len ; }; struct ethtool_wolinfo { __u32 cmd ; __u32 supported ; __u32 wolopts ; __u8 sopass[6U] ; }; struct ethtool_tunable { __u32 cmd ; __u32 id ; __u32 type_id ; __u32 len ; void *data[0U] ; }; struct ethtool_regs { __u32 cmd ; __u32 version ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eeprom { __u32 cmd ; __u32 magic ; __u32 offset ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eee { __u32 cmd ; __u32 supported ; __u32 advertised ; __u32 lp_advertised ; __u32 eee_active ; __u32 eee_enabled ; __u32 tx_lpi_enabled ; __u32 tx_lpi_timer ; __u32 reserved[2U] ; }; struct ethtool_modinfo { __u32 cmd ; __u32 type ; __u32 eeprom_len ; __u32 reserved[8U] ; }; struct ethtool_coalesce { __u32 cmd ; __u32 rx_coalesce_usecs ; __u32 rx_max_coalesced_frames ; __u32 rx_coalesce_usecs_irq ; __u32 rx_max_coalesced_frames_irq ; __u32 tx_coalesce_usecs ; __u32 tx_max_coalesced_frames ; __u32 tx_coalesce_usecs_irq ; __u32 tx_max_coalesced_frames_irq ; __u32 stats_block_coalesce_usecs ; __u32 use_adaptive_rx_coalesce ; __u32 use_adaptive_tx_coalesce ; __u32 pkt_rate_low ; __u32 rx_coalesce_usecs_low ; __u32 rx_max_coalesced_frames_low ; __u32 tx_coalesce_usecs_low ; __u32 tx_max_coalesced_frames_low ; __u32 pkt_rate_high ; __u32 rx_coalesce_usecs_high ; __u32 rx_max_coalesced_frames_high ; __u32 tx_coalesce_usecs_high ; __u32 tx_max_coalesced_frames_high ; __u32 rate_sample_interval ; }; struct ethtool_ringparam { __u32 cmd ; __u32 rx_max_pending ; __u32 rx_mini_max_pending ; __u32 rx_jumbo_max_pending ; __u32 tx_max_pending ; __u32 rx_pending ; __u32 rx_mini_pending ; __u32 rx_jumbo_pending ; __u32 tx_pending ; }; struct ethtool_channels { __u32 cmd ; __u32 max_rx ; __u32 max_tx ; __u32 max_other ; __u32 max_combined ; __u32 rx_count ; __u32 tx_count ; __u32 other_count ; __u32 combined_count ; }; struct ethtool_pauseparam { __u32 cmd ; __u32 autoneg ; __u32 rx_pause ; __u32 tx_pause ; }; struct ethtool_test { __u32 cmd ; __u32 flags ; __u32 reserved ; __u32 len ; __u64 data[0U] ; }; struct ethtool_stats { __u32 cmd ; __u32 n_stats ; __u64 data[0U] ; }; struct ethtool_tcpip4_spec { __be32 ip4src ; __be32 ip4dst ; __be16 psrc ; __be16 pdst ; __u8 tos ; }; struct ethtool_ah_espip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 spi ; __u8 tos ; }; struct ethtool_usrip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 l4_4_bytes ; __u8 tos ; __u8 ip_ver ; __u8 proto ; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec ; struct ethtool_tcpip4_spec udp_ip4_spec ; struct ethtool_tcpip4_spec sctp_ip4_spec ; struct ethtool_ah_espip4_spec ah_ip4_spec ; struct ethtool_ah_espip4_spec esp_ip4_spec ; struct ethtool_usrip4_spec usr_ip4_spec ; struct ethhdr ether_spec ; __u8 hdata[52U] ; }; struct ethtool_flow_ext { __u8 padding[2U] ; unsigned char h_dest[6U] ; __be16 vlan_etype ; __be16 vlan_tci ; __be32 data[2U] ; }; struct ethtool_rx_flow_spec { __u32 flow_type ; union ethtool_flow_union h_u ; struct ethtool_flow_ext h_ext ; union ethtool_flow_union m_u ; struct ethtool_flow_ext m_ext ; __u64 ring_cookie ; __u32 location ; }; struct ethtool_rxnfc { __u32 cmd ; __u32 flow_type ; __u64 data ; struct ethtool_rx_flow_spec fs ; __u32 rule_cnt ; __u32 rule_locs[0U] ; }; struct ethtool_flash { __u32 cmd ; __u32 region ; char data[128U] ; }; struct ethtool_dump { __u32 cmd ; __u32 version ; __u32 flag ; __u32 len ; __u8 data[0U] ; }; struct ethtool_ts_info { __u32 cmd ; __u32 so_timestamping ; __s32 phc_index ; __u32 tx_types ; __u32 tx_reserved[3U] ; __u32 rx_filters ; __u32 rx_reserved[3U] ; }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; struct ethtool_ops { int (*get_settings)(struct net_device * , struct ethtool_cmd * ) ; int (*set_settings)(struct net_device * , struct ethtool_cmd * ) ; void (*get_drvinfo)(struct net_device * , struct ethtool_drvinfo * ) ; int (*get_regs_len)(struct net_device * ) ; void (*get_regs)(struct net_device * , struct ethtool_regs * , void * ) ; void (*get_wol)(struct net_device * , struct ethtool_wolinfo * ) ; int (*set_wol)(struct net_device * , struct ethtool_wolinfo * ) ; u32 (*get_msglevel)(struct net_device * ) ; void (*set_msglevel)(struct net_device * , u32 ) ; int (*nway_reset)(struct net_device * ) ; u32 (*get_link)(struct net_device * ) ; int (*get_eeprom_len)(struct net_device * ) ; int (*get_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; int (*set_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; void (*get_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; int (*set_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; void (*get_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; int (*set_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; void (*self_test)(struct net_device * , struct ethtool_test * , u64 * ) ; void (*get_strings)(struct net_device * , u32 , u8 * ) ; int (*set_phys_id)(struct net_device * , enum ethtool_phys_id_state ) ; void (*get_ethtool_stats)(struct net_device * , struct ethtool_stats * , u64 * ) ; int (*begin)(struct net_device * ) ; void (*complete)(struct net_device * ) ; u32 (*get_priv_flags)(struct net_device * ) ; int (*set_priv_flags)(struct net_device * , u32 ) ; int (*get_sset_count)(struct net_device * , int ) ; int (*get_rxnfc)(struct net_device * , struct ethtool_rxnfc * , u32 * ) ; int (*set_rxnfc)(struct net_device * , struct ethtool_rxnfc * ) ; int (*flash_device)(struct net_device * , struct ethtool_flash * ) ; int (*reset)(struct net_device * , u32 * ) ; u32 (*get_rxfh_key_size)(struct net_device * ) ; u32 (*get_rxfh_indir_size)(struct net_device * ) ; int (*get_rxfh)(struct net_device * , u32 * , u8 * , u8 * ) ; int (*set_rxfh)(struct net_device * , u32 const * , u8 const * , u8 const ) ; void (*get_channels)(struct net_device * , struct ethtool_channels * ) ; int (*set_channels)(struct net_device * , struct ethtool_channels * ) ; int (*get_dump_flag)(struct net_device * , struct ethtool_dump * ) ; int (*get_dump_data)(struct net_device * , struct ethtool_dump * , void * ) ; int (*set_dump)(struct net_device * , struct ethtool_dump * ) ; int (*get_ts_info)(struct net_device * , struct ethtool_ts_info * ) ; int (*get_module_info)(struct net_device * , struct ethtool_modinfo * ) ; int (*get_module_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_eee)(struct net_device * , struct ethtool_eee * ) ; int (*set_eee)(struct net_device * , struct ethtool_eee * ) ; int (*get_tunable)(struct net_device * , struct ethtool_tunable const * , void * ) ; int (*set_tunable)(struct net_device * , struct ethtool_tunable const * , void const * ) ; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr ; int sysctl_somaxconn ; struct prot_inuse *inuse ; }; struct u64_stats_sync { }; struct ipstats_mib { u64 mibs[36U] ; struct u64_stats_sync syncp ; }; struct icmp_mib { unsigned long mibs[28U] ; }; struct icmpmsg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6_mib { unsigned long mibs[6U] ; }; struct icmpv6_mib_device { atomic_long_t mibs[6U] ; }; struct icmpv6msg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512U] ; }; struct tcp_mib { unsigned long mibs[16U] ; }; struct udp_mib { unsigned long mibs[9U] ; }; struct linux_mib { unsigned long mibs[115U] ; }; struct linux_xfrm_mib { unsigned long mibs[29U] ; }; struct netns_mib { struct tcp_mib *tcp_statistics ; struct ipstats_mib *ip_statistics ; struct linux_mib *net_statistics ; struct udp_mib *udp_statistics ; struct udp_mib *udplite_statistics ; struct icmp_mib *icmp_statistics ; struct icmpmsg_mib *icmpmsg_statistics ; struct proc_dir_entry *proc_net_devsnmp6 ; struct udp_mib *udp_stats_in6 ; struct udp_mib *udplite_stats_in6 ; struct ipstats_mib *ipv6_statistics ; struct icmpv6_mib *icmpv6_statistics ; struct icmpv6msg_mib *icmpv6msg_statistics ; struct linux_xfrm_mib *xfrm_statistics ; }; struct netns_unix { int sysctl_max_dgram_qlen ; struct ctl_table_header *ctl ; }; struct netns_packet { struct mutex sklist_lock ; struct hlist_head sklist ; }; struct netns_frags { struct percpu_counter mem ; int timeout ; int high_thresh ; int low_thresh ; }; struct ipv4_devconf; struct fib_rules_ops; struct fib_table; struct local_ports { seqlock_t lock ; int range[2U] ; bool warned ; }; struct ping_group_range { seqlock_t lock ; kgid_t range[2U] ; }; struct inet_peer_base; struct xt_table; struct netns_ipv4 { struct ctl_table_header *forw_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *ipv4_hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *xfrm4_hdr ; struct ipv4_devconf *devconf_all ; struct ipv4_devconf *devconf_dflt ; struct fib_rules_ops *rules_ops ; bool fib_has_custom_rules ; struct fib_table *fib_local ; struct fib_table *fib_main ; struct fib_table *fib_default ; int fib_num_tclassid_users ; struct hlist_head *fib_table_hash ; bool fib_offload_disabled ; struct sock *fibnl ; struct sock **icmp_sk ; struct sock *mc_autojoin_sk ; struct inet_peer_base *peers ; struct sock **tcp_sk ; struct netns_frags frags ; struct xt_table *iptable_filter ; struct xt_table *iptable_mangle ; struct xt_table *iptable_raw ; struct xt_table *arptable_filter ; struct xt_table *iptable_security ; struct xt_table *nat_table ; int sysctl_icmp_echo_ignore_all ; int sysctl_icmp_echo_ignore_broadcasts ; int sysctl_icmp_ignore_bogus_error_responses ; int sysctl_icmp_ratelimit ; int sysctl_icmp_ratemask ; int sysctl_icmp_errors_use_inbound_ifaddr ; struct local_ports ip_local_ports ; int sysctl_tcp_ecn ; int sysctl_tcp_ecn_fallback ; int sysctl_ip_no_pmtu_disc ; int sysctl_ip_fwd_use_pmtu ; int sysctl_ip_nonlocal_bind ; int sysctl_fwmark_reflect ; int sysctl_tcp_fwmark_accept ; int sysctl_tcp_mtu_probing ; int sysctl_tcp_base_mss ; int sysctl_tcp_probe_threshold ; u32 sysctl_tcp_probe_interval ; struct ping_group_range ping_group_range ; atomic_t dev_addr_genid ; unsigned long *sysctl_local_reserved_ports ; struct list_head mr_tables ; struct fib_rules_ops *mr_rules_ops ; atomic_t rt_genid ; }; struct neighbour; struct dst_ops { unsigned short family ; unsigned int gc_thresh ; int (*gc)(struct dst_ops * ) ; struct dst_entry *(*check)(struct dst_entry * , __u32 ) ; unsigned int (*default_advmss)(struct dst_entry const * ) ; unsigned int (*mtu)(struct dst_entry const * ) ; u32 *(*cow_metrics)(struct dst_entry * , unsigned long ) ; void (*destroy)(struct dst_entry * ) ; void (*ifdown)(struct dst_entry * , struct net_device * , int ) ; struct dst_entry *(*negative_advice)(struct dst_entry * ) ; void (*link_failure)(struct sk_buff * ) ; void (*update_pmtu)(struct dst_entry * , struct sock * , struct sk_buff * , u32 ) ; void (*redirect)(struct dst_entry * , struct sock * , struct sk_buff * ) ; int (*local_out)(struct sk_buff * ) ; struct neighbour *(*neigh_lookup)(struct dst_entry const * , struct sk_buff * , void const * ) ; struct kmem_cache *kmem_cachep ; struct percpu_counter pcpuc_entries ; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *icmp_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *xfrm6_hdr ; int bindv6only ; int flush_delay ; int ip6_rt_max_size ; int ip6_rt_gc_min_interval ; int ip6_rt_gc_timeout ; int ip6_rt_gc_interval ; int ip6_rt_gc_elasticity ; int ip6_rt_mtu_expires ; int ip6_rt_min_advmss ; int flowlabel_consistency ; int auto_flowlabels ; int icmpv6_time ; int anycast_src_echo_reply ; int fwmark_reflect ; int idgen_retries ; int idgen_delay ; int flowlabel_state_ranges ; }; struct ipv6_devconf; struct rt6_info; struct rt6_statistics; struct fib6_table; struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl ; struct ipv6_devconf *devconf_all ; struct ipv6_devconf *devconf_dflt ; struct inet_peer_base *peers ; struct netns_frags frags ; struct xt_table *ip6table_filter ; struct xt_table *ip6table_mangle ; struct xt_table *ip6table_raw ; struct xt_table *ip6table_security ; struct xt_table *ip6table_nat ; struct rt6_info *ip6_null_entry ; struct rt6_statistics *rt6_stats ; struct timer_list ip6_fib_timer ; struct hlist_head *fib_table_hash ; struct fib6_table *fib6_main_tbl ; struct dst_ops ip6_dst_ops ; unsigned int ip6_rt_gc_expire ; unsigned long ip6_rt_last_gc ; struct rt6_info *ip6_prohibit_entry ; struct rt6_info *ip6_blk_hole_entry ; struct fib6_table *fib6_local_tbl ; struct fib_rules_ops *fib6_rules_ops ; struct sock **icmp_sk ; struct sock *ndisc_sk ; struct sock *tcp_sk ; struct sock *igmp_sk ; struct sock *mc_autojoin_sk ; struct list_head mr6_tables ; struct fib_rules_ops *mr6_rules_ops ; atomic_t dev_addr_genid ; atomic_t fib6_sernum ; }; struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl ; struct netns_frags frags ; }; struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr ; }; struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl ; struct netns_frags frags ; }; struct sctp_mib; struct netns_sctp { struct sctp_mib *sctp_statistics ; struct proc_dir_entry *proc_net_sctp ; struct ctl_table_header *sysctl_header ; struct sock *ctl_sock ; struct list_head local_addr_list ; struct list_head addr_waitq ; struct timer_list addr_wq_timer ; struct list_head auto_asconf_splist ; spinlock_t addr_wq_lock ; spinlock_t local_addr_lock ; unsigned int rto_initial ; unsigned int rto_min ; unsigned int rto_max ; int rto_alpha ; int rto_beta ; int max_burst ; int cookie_preserve_enable ; char *sctp_hmac_alg ; unsigned int valid_cookie_life ; unsigned int sack_timeout ; unsigned int hb_interval ; int max_retrans_association ; int max_retrans_path ; int max_retrans_init ; int pf_retrans ; int sndbuf_policy ; int rcvbuf_policy ; int default_auto_asconf ; int addip_enable ; int addip_noauth ; int prsctp_enable ; int auth_enable ; int scope_policy ; int rwnd_upd_shift ; unsigned long max_autoclose ; }; struct netns_dccp { struct sock *v4_ctl_sk ; struct sock *v6_ctl_sk ; }; struct nf_logger; struct netns_nf { struct proc_dir_entry *proc_netfilter ; struct nf_logger const *nf_loggers[13U] ; struct ctl_table_header *nf_log_dir_header ; }; struct ebt_table; struct netns_xt { struct list_head tables[13U] ; bool notrack_deprecated_warning ; bool clusterip_deprecated_warning ; struct ebt_table *broute_table ; struct ebt_table *frame_filter ; struct ebt_table *frame_nat ; }; struct hlist_nulls_node; struct hlist_nulls_head { struct hlist_nulls_node *first ; }; struct hlist_nulls_node { struct hlist_nulls_node *next ; struct hlist_nulls_node **pprev ; }; struct nf_proto_net { struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; struct ctl_table_header *ctl_compat_header ; struct ctl_table *ctl_compat_table ; unsigned int users ; }; struct nf_generic_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_tcp_net { struct nf_proto_net pn ; unsigned int timeouts[14U] ; unsigned int tcp_loose ; unsigned int tcp_be_liberal ; unsigned int tcp_max_retrans ; }; struct nf_udp_net { struct nf_proto_net pn ; unsigned int timeouts[2U] ; }; struct nf_icmp_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_ip_net { struct nf_generic_net generic ; struct nf_tcp_net tcp ; struct nf_udp_net udp ; struct nf_icmp_net icmp ; struct nf_icmp_net icmpv6 ; struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; }; struct ct_pcpu { spinlock_t lock ; struct hlist_nulls_head unconfirmed ; struct hlist_nulls_head dying ; struct hlist_nulls_head tmpl ; }; struct ip_conntrack_stat; struct nf_ct_event_notifier; struct nf_exp_event_notifier; struct netns_ct { atomic_t count ; unsigned int expect_count ; struct delayed_work ecache_dwork ; bool ecache_dwork_pending ; struct ctl_table_header *sysctl_header ; struct ctl_table_header *acct_sysctl_header ; struct ctl_table_header *tstamp_sysctl_header ; struct ctl_table_header *event_sysctl_header ; struct ctl_table_header *helper_sysctl_header ; char *slabname ; unsigned int sysctl_log_invalid ; int sysctl_events ; int sysctl_acct ; int sysctl_auto_assign_helper ; bool auto_assign_helper_warned ; int sysctl_tstamp ; int sysctl_checksum ; unsigned int htable_size ; seqcount_t generation ; struct kmem_cache *nf_conntrack_cachep ; struct hlist_nulls_head *hash ; struct hlist_head *expect_hash ; struct ct_pcpu *pcpu_lists ; struct ip_conntrack_stat *stat ; struct nf_ct_event_notifier *nf_conntrack_event_cb ; struct nf_exp_event_notifier *nf_expect_event_cb ; struct nf_ip_net nf_ct_proto ; unsigned int labels_used ; u8 label_words ; struct hlist_head *nat_bysource ; unsigned int nat_htable_size ; }; struct nft_af_info; struct netns_nftables { struct list_head af_info ; struct list_head commit_list ; struct nft_af_info *ipv4 ; struct nft_af_info *ipv6 ; struct nft_af_info *inet ; struct nft_af_info *arp ; struct nft_af_info *bridge ; struct nft_af_info *netdev ; unsigned int base_seq ; u8 gencursor ; }; struct tasklet_struct { struct tasklet_struct *next ; unsigned long state ; atomic_t count ; void (*func)(unsigned long ) ; unsigned long data ; }; struct flow_cache_percpu { struct hlist_head *hash_table ; int hash_count ; u32 hash_rnd ; int hash_rnd_recalc ; struct tasklet_struct flush_tasklet ; }; struct flow_cache { u32 hash_shift ; struct flow_cache_percpu *percpu ; struct notifier_block hotcpu_notifier ; int low_watermark ; int high_watermark ; struct timer_list rnd_timer ; }; struct xfrm_policy_hash { struct hlist_head *table ; unsigned int hmask ; u8 dbits4 ; u8 sbits4 ; u8 dbits6 ; u8 sbits6 ; }; struct xfrm_policy_hthresh { struct work_struct work ; seqlock_t lock ; u8 lbits4 ; u8 rbits4 ; u8 lbits6 ; u8 rbits6 ; }; struct netns_xfrm { struct list_head state_all ; struct hlist_head *state_bydst ; struct hlist_head *state_bysrc ; struct hlist_head *state_byspi ; unsigned int state_hmask ; unsigned int state_num ; struct work_struct state_hash_work ; struct hlist_head state_gc_list ; struct work_struct state_gc_work ; struct list_head policy_all ; struct hlist_head *policy_byidx ; unsigned int policy_idx_hmask ; struct hlist_head policy_inexact[3U] ; struct xfrm_policy_hash policy_bydst[3U] ; unsigned int policy_count[6U] ; struct work_struct policy_hash_work ; struct xfrm_policy_hthresh policy_hthresh ; struct sock *nlsk ; struct sock *nlsk_stash ; u32 sysctl_aevent_etime ; u32 sysctl_aevent_rseqth ; int sysctl_larval_drop ; u32 sysctl_acq_expires ; struct ctl_table_header *sysctl_hdr ; struct dst_ops xfrm4_dst_ops ; struct dst_ops xfrm6_dst_ops ; spinlock_t xfrm_state_lock ; rwlock_t xfrm_policy_lock ; struct mutex xfrm_cfg_mutex ; struct flow_cache flow_cache_global ; atomic_t flow_cache_genid ; struct list_head flow_cache_gc_list ; spinlock_t flow_cache_gc_lock ; struct work_struct flow_cache_gc_work ; struct work_struct flow_cache_flush_work ; struct mutex flow_flush_sem ; }; struct mpls_route; struct netns_mpls { size_t platform_labels ; struct mpls_route **platform_label ; struct ctl_table_header *ctl ; }; struct proc_ns_operations; struct ns_common { atomic_long_t stashed ; struct proc_ns_operations const *ops ; unsigned int inum ; }; struct net_generic; struct netns_ipvs; struct net { atomic_t passive ; atomic_t count ; spinlock_t rules_mod_lock ; atomic64_t cookie_gen ; struct list_head list ; struct list_head cleanup_list ; struct list_head exit_list ; struct user_namespace *user_ns ; spinlock_t nsid_lock ; struct idr netns_ids ; struct ns_common ns ; struct proc_dir_entry *proc_net ; struct proc_dir_entry *proc_net_stat ; struct ctl_table_set sysctls ; struct sock *rtnl ; struct sock *genl_sock ; struct list_head dev_base_head ; struct hlist_head *dev_name_head ; struct hlist_head *dev_index_head ; unsigned int dev_base_seq ; int ifindex ; unsigned int dev_unreg_count ; struct list_head rules_ops ; struct net_device *loopback_dev ; struct netns_core core ; struct netns_mib mib ; struct netns_packet packet ; struct netns_unix unx ; struct netns_ipv4 ipv4 ; struct netns_ipv6 ipv6 ; struct netns_ieee802154_lowpan ieee802154_lowpan ; struct netns_sctp sctp ; struct netns_dccp dccp ; struct netns_nf nf ; struct netns_xt xt ; struct netns_ct ct ; struct netns_nftables nft ; struct netns_nf_frag nf_frag ; struct sock *nfnl ; struct sock *nfnl_stash ; struct sk_buff_head wext_nlevents ; struct net_generic *gen ; struct netns_xfrm xfrm ; struct netns_ipvs *ipvs ; struct netns_mpls mpls ; struct sock *diag_nlsk ; atomic_t fnhe_genid ; }; struct __anonstruct_possible_net_t_302 { struct net *net ; }; typedef struct __anonstruct_possible_net_t_302 possible_net_t; typedef unsigned long kernel_ulong_t; struct pci_device_id { __u32 vendor ; __u32 device ; __u32 subvendor ; __u32 subdevice ; __u32 class ; __u32 class_mask ; kernel_ulong_t driver_data ; }; struct acpi_device_id { __u8 id[9U] ; kernel_ulong_t driver_data ; }; struct of_device_id { char name[32U] ; char type[32U] ; char compatible[128U] ; void const *data ; }; enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_PDATA = 3 } ; struct fwnode_handle { enum fwnode_type type ; struct fwnode_handle *secondary ; }; typedef u32 phandle; struct property { char *name ; int length ; void *value ; struct property *next ; unsigned long _flags ; unsigned int unique_id ; struct bin_attribute attr ; }; struct device_node { char const *name ; char const *type ; phandle phandle ; char const *full_name ; struct fwnode_handle fwnode ; struct property *properties ; struct property *deadprops ; struct device_node *parent ; struct device_node *child ; struct device_node *sibling ; struct kobject kobj ; unsigned long _flags ; void *data ; }; struct mii_ioctl_data { __u16 phy_id ; __u16 reg_num ; __u16 val_in ; __u16 val_out ; }; enum ldv_28719 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_MAX = 16 } ; typedef enum ldv_28719 phy_interface_t; enum ldv_28773 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; struct phy_device; struct mii_bus { char const *name ; char id[17U] ; void *priv ; int (*read)(struct mii_bus * , int , int ) ; int (*write)(struct mii_bus * , int , int , u16 ) ; int (*reset)(struct mii_bus * ) ; struct mutex mdio_lock ; struct device *parent ; enum ldv_28773 state ; struct device dev ; struct phy_device *phy_map[32U] ; u32 phy_mask ; u32 phy_ignore_ta_mask ; int *irq ; }; enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; struct phy_c45_device_ids { u32 devices_in_package ; u32 device_ids[8U] ; }; struct phy_driver; struct phy_device { struct phy_driver *drv ; struct mii_bus *bus ; struct device dev ; u32 phy_id ; struct phy_c45_device_ids c45_ids ; bool is_c45 ; bool is_internal ; bool has_fixups ; bool suspended ; enum phy_state state ; u32 dev_flags ; phy_interface_t interface ; int addr ; int speed ; int duplex ; int pause ; int asym_pause ; int link ; u32 interrupts ; u32 supported ; u32 advertising ; u32 lp_advertising ; int autoneg ; int link_timeout ; int irq ; void *priv ; struct work_struct phy_queue ; struct delayed_work state_queue ; atomic_t irq_disable ; struct mutex lock ; struct net_device *attached_dev ; void (*adjust_link)(struct net_device * ) ; }; struct phy_driver { u32 phy_id ; char *name ; unsigned int phy_id_mask ; u32 features ; u32 flags ; void const *driver_data ; int (*soft_reset)(struct phy_device * ) ; int (*config_init)(struct phy_device * ) ; int (*probe)(struct phy_device * ) ; int (*suspend)(struct phy_device * ) ; int (*resume)(struct phy_device * ) ; int (*config_aneg)(struct phy_device * ) ; int (*aneg_done)(struct phy_device * ) ; int (*read_status)(struct phy_device * ) ; int (*ack_interrupt)(struct phy_device * ) ; int (*config_intr)(struct phy_device * ) ; int (*did_interrupt)(struct phy_device * ) ; void (*remove)(struct phy_device * ) ; int (*match_phy_device)(struct phy_device * ) ; int (*ts_info)(struct phy_device * , struct ethtool_ts_info * ) ; int (*hwtstamp)(struct phy_device * , struct ifreq * ) ; bool (*rxtstamp)(struct phy_device * , struct sk_buff * , int ) ; void (*txtstamp)(struct phy_device * , struct sk_buff * , int ) ; int (*set_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*get_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*link_change_notify)(struct phy_device * ) ; int (*read_mmd_indirect)(struct phy_device * , int , int , int ) ; void (*write_mmd_indirect)(struct phy_device * , int , int , int , u32 ) ; int (*module_info)(struct phy_device * , struct ethtool_modinfo * ) ; int (*module_eeprom)(struct phy_device * , struct ethtool_eeprom * , u8 * ) ; struct device_driver driver ; }; struct fixed_phy_status { int link ; int speed ; int duplex ; int pause ; int asym_pause ; }; enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4 } ; struct dsa_chip_data { struct device *host_dev ; int sw_addr ; int eeprom_len ; struct device_node *of_node ; char *port_names[12U] ; struct device_node *port_dn[12U] ; s8 *rtable ; }; struct dsa_platform_data { struct device *netdev ; struct net_device *of_netdev ; int nr_chips ; struct dsa_chip_data *chip ; }; struct packet_type; struct dsa_switch; struct dsa_switch_tree { struct dsa_platform_data *pd ; struct net_device *master_netdev ; int (*rcv)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; enum dsa_tag_protocol tag_protocol ; s8 cpu_switch ; s8 cpu_port ; int link_poll_needed ; struct work_struct link_poll_work ; struct timer_list link_poll_timer ; struct dsa_switch *ds[4U] ; }; struct dsa_switch_driver; struct dsa_switch { struct dsa_switch_tree *dst ; int index ; enum dsa_tag_protocol tag_protocol ; struct dsa_chip_data *pd ; struct dsa_switch_driver *drv ; struct device *master_dev ; char hwmon_name[24U] ; struct device *hwmon_dev ; u32 dsa_port_mask ; u32 phys_port_mask ; u32 phys_mii_mask ; struct mii_bus *slave_mii_bus ; struct net_device *ports[12U] ; }; struct dsa_switch_driver { struct list_head list ; enum dsa_tag_protocol tag_protocol ; int priv_size ; char *(*probe)(struct device * , int ) ; int (*setup)(struct dsa_switch * ) ; int (*set_addr)(struct dsa_switch * , u8 * ) ; u32 (*get_phy_flags)(struct dsa_switch * , int ) ; int (*phy_read)(struct dsa_switch * , int , int ) ; int (*phy_write)(struct dsa_switch * , int , int , u16 ) ; void (*poll_link)(struct dsa_switch * ) ; void (*adjust_link)(struct dsa_switch * , int , struct phy_device * ) ; void (*fixed_link_update)(struct dsa_switch * , int , struct fixed_phy_status * ) ; void (*get_strings)(struct dsa_switch * , int , uint8_t * ) ; void (*get_ethtool_stats)(struct dsa_switch * , int , uint64_t * ) ; int (*get_sset_count)(struct dsa_switch * ) ; void (*get_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*set_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*suspend)(struct dsa_switch * ) ; int (*resume)(struct dsa_switch * ) ; int (*port_enable)(struct dsa_switch * , int , struct phy_device * ) ; void (*port_disable)(struct dsa_switch * , int , struct phy_device * ) ; int (*set_eee)(struct dsa_switch * , int , struct phy_device * , struct ethtool_eee * ) ; int (*get_eee)(struct dsa_switch * , int , struct ethtool_eee * ) ; int (*get_temp)(struct dsa_switch * , int * ) ; int (*get_temp_limit)(struct dsa_switch * , int * ) ; int (*set_temp_limit)(struct dsa_switch * , int ) ; int (*get_temp_alarm)(struct dsa_switch * , bool * ) ; int (*get_eeprom_len)(struct dsa_switch * ) ; int (*get_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*get_regs_len)(struct dsa_switch * , int ) ; void (*get_regs)(struct dsa_switch * , int , struct ethtool_regs * , void * ) ; int (*port_join_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_leave_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_stp_update)(struct dsa_switch * , int , u8 ) ; int (*fdb_add)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_del)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_getnext)(struct dsa_switch * , int , unsigned char * , bool * ) ; }; struct ieee_ets { __u8 willing ; __u8 ets_cap ; __u8 cbs ; __u8 tc_tx_bw[8U] ; __u8 tc_rx_bw[8U] ; __u8 tc_tsa[8U] ; __u8 prio_tc[8U] ; __u8 tc_reco_bw[8U] ; __u8 tc_reco_tsa[8U] ; __u8 reco_prio_tc[8U] ; }; struct ieee_maxrate { __u64 tc_maxrate[8U] ; }; struct ieee_qcn { __u8 rpg_enable[8U] ; __u32 rppp_max_rps[8U] ; __u32 rpg_time_reset[8U] ; __u32 rpg_byte_reset[8U] ; __u32 rpg_threshold[8U] ; __u32 rpg_max_rate[8U] ; __u32 rpg_ai_rate[8U] ; __u32 rpg_hai_rate[8U] ; __u32 rpg_gd[8U] ; __u32 rpg_min_dec_fac[8U] ; __u32 rpg_min_rate[8U] ; __u32 cndd_state_machine[8U] ; }; struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U] ; __u32 rppp_created_rps[8U] ; }; struct ieee_pfc { __u8 pfc_cap ; __u8 pfc_en ; __u8 mbc ; __u16 delay ; __u64 requests[8U] ; __u64 indications[8U] ; }; struct cee_pg { __u8 willing ; __u8 error ; __u8 pg_en ; __u8 tcs_supported ; __u8 pg_bw[8U] ; __u8 prio_pg[8U] ; }; struct cee_pfc { __u8 willing ; __u8 error ; __u8 pfc_en ; __u8 tcs_supported ; }; struct dcb_app { __u8 selector ; __u8 priority ; __u16 protocol ; }; struct dcb_peer_app_info { __u8 willing ; __u8 error ; }; struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_setets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_getmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_setmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_getqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_setqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_getqcnstats)(struct net_device * , struct ieee_qcn_stats * ) ; int (*ieee_getpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_setpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_getapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_setapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_delapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_peer_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_peer_getpfc)(struct net_device * , struct ieee_pfc * ) ; u8 (*getstate)(struct net_device * ) ; u8 (*setstate)(struct net_device * , u8 ) ; void (*getpermhwaddr)(struct net_device * , u8 * ) ; void (*setpgtccfgtx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgtx)(struct net_device * , int , u8 ) ; void (*setpgtccfgrx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgrx)(struct net_device * , int , u8 ) ; void (*getpgtccfgtx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgtx)(struct net_device * , int , u8 * ) ; void (*getpgtccfgrx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgrx)(struct net_device * , int , u8 * ) ; void (*setpfccfg)(struct net_device * , int , u8 ) ; void (*getpfccfg)(struct net_device * , int , u8 * ) ; u8 (*setall)(struct net_device * ) ; u8 (*getcap)(struct net_device * , int , u8 * ) ; int (*getnumtcs)(struct net_device * , int , u8 * ) ; int (*setnumtcs)(struct net_device * , int , u8 ) ; u8 (*getpfcstate)(struct net_device * ) ; void (*setpfcstate)(struct net_device * , u8 ) ; void (*getbcncfg)(struct net_device * , int , u32 * ) ; void (*setbcncfg)(struct net_device * , int , u32 ) ; void (*getbcnrp)(struct net_device * , int , u8 * ) ; void (*setbcnrp)(struct net_device * , int , u8 ) ; int (*setapp)(struct net_device * , u8 , u16 , u8 ) ; int (*getapp)(struct net_device * , u8 , u16 ) ; u8 (*getfeatcfg)(struct net_device * , int , u8 * ) ; u8 (*setfeatcfg)(struct net_device * , int , u8 ) ; u8 (*getdcbx)(struct net_device * ) ; u8 (*setdcbx)(struct net_device * , u8 ) ; int (*peer_getappinfo)(struct net_device * , struct dcb_peer_app_info * , u16 * ) ; int (*peer_getapptable)(struct net_device * , struct dcb_app * ) ; int (*cee_peer_getpg)(struct net_device * , struct cee_pg * ) ; int (*cee_peer_getpfc)(struct net_device * , struct cee_pfc * ) ; }; struct taskstats { __u16 version ; __u32 ac_exitcode ; __u8 ac_flag ; __u8 ac_nice ; __u64 cpu_count ; __u64 cpu_delay_total ; __u64 blkio_count ; __u64 blkio_delay_total ; __u64 swapin_count ; __u64 swapin_delay_total ; __u64 cpu_run_real_total ; __u64 cpu_run_virtual_total ; char ac_comm[32U] ; __u8 ac_sched ; __u8 ac_pad[3U] ; __u32 ac_uid ; __u32 ac_gid ; __u32 ac_pid ; __u32 ac_ppid ; __u32 ac_btime ; __u64 ac_etime ; __u64 ac_utime ; __u64 ac_stime ; __u64 ac_minflt ; __u64 ac_majflt ; __u64 coremem ; __u64 virtmem ; __u64 hiwater_rss ; __u64 hiwater_vm ; __u64 read_char ; __u64 write_char ; __u64 read_syscalls ; __u64 write_syscalls ; __u64 read_bytes ; __u64 write_bytes ; __u64 cancelled_write_bytes ; __u64 nvcsw ; __u64 nivcsw ; __u64 ac_utimescaled ; __u64 ac_stimescaled ; __u64 cpu_scaled_run_real_total ; __u64 freepages_count ; __u64 freepages_delay_total ; }; struct netprio_map { struct callback_head rcu ; u32 priomap_len ; u32 priomap[] ; }; struct xfrm_policy; struct xfrm_state; struct request_sock; struct mnt_namespace; struct ipc_namespace; struct nsproxy { atomic_t count ; struct uts_namespace *uts_ns ; struct ipc_namespace *ipc_ns ; struct mnt_namespace *mnt_ns ; struct pid_namespace *pid_ns_for_children ; struct net *net_ns ; }; struct nlmsghdr { __u32 nlmsg_len ; __u16 nlmsg_type ; __u16 nlmsg_flags ; __u32 nlmsg_seq ; __u32 nlmsg_pid ; }; struct nlattr { __u16 nla_len ; __u16 nla_type ; }; struct netlink_callback { struct sk_buff *skb ; struct nlmsghdr const *nlh ; int (*dump)(struct sk_buff * , struct netlink_callback * ) ; int (*done)(struct netlink_callback * ) ; void *data ; struct module *module ; u16 family ; u16 min_dump_alloc ; unsigned int prev_seq ; unsigned int seq ; long args[6U] ; }; struct ndmsg { __u8 ndm_family ; __u8 ndm_pad1 ; __u16 ndm_pad2 ; __s32 ndm_ifindex ; __u16 ndm_state ; __u8 ndm_flags ; __u8 ndm_type ; }; struct rtnl_link_stats64 { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 rx_errors ; __u64 tx_errors ; __u64 rx_dropped ; __u64 tx_dropped ; __u64 multicast ; __u64 collisions ; __u64 rx_length_errors ; __u64 rx_over_errors ; __u64 rx_crc_errors ; __u64 rx_frame_errors ; __u64 rx_fifo_errors ; __u64 rx_missed_errors ; __u64 tx_aborted_errors ; __u64 tx_carrier_errors ; __u64 tx_fifo_errors ; __u64 tx_heartbeat_errors ; __u64 tx_window_errors ; __u64 rx_compressed ; __u64 tx_compressed ; }; struct ifla_vf_stats { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 broadcast ; __u64 multicast ; }; struct ifla_vf_info { __u32 vf ; __u8 mac[32U] ; __u32 vlan ; __u32 qos ; __u32 spoofchk ; __u32 linkstate ; __u32 min_tx_rate ; __u32 max_tx_rate ; __u32 rss_query_en ; }; struct netpoll_info; struct wireless_dev; struct wpan_dev; struct mpls_dev; enum netdev_tx { __NETDEV_TX_MIN = (-0x7FFFFFFF-1), NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ; typedef enum netdev_tx netdev_tx_t; struct net_device_stats { unsigned long rx_packets ; unsigned long tx_packets ; unsigned long rx_bytes ; unsigned long tx_bytes ; unsigned long rx_errors ; unsigned long tx_errors ; unsigned long rx_dropped ; unsigned long tx_dropped ; unsigned long multicast ; unsigned long collisions ; unsigned long rx_length_errors ; unsigned long rx_over_errors ; unsigned long rx_crc_errors ; unsigned long rx_frame_errors ; unsigned long rx_fifo_errors ; unsigned long rx_missed_errors ; unsigned long tx_aborted_errors ; unsigned long tx_carrier_errors ; unsigned long tx_fifo_errors ; unsigned long tx_heartbeat_errors ; unsigned long tx_window_errors ; unsigned long rx_compressed ; unsigned long tx_compressed ; }; struct neigh_parms; struct netdev_hw_addr { struct list_head list ; unsigned char addr[32U] ; unsigned char type ; bool global_use ; int sync_cnt ; int refcount ; int synced ; struct callback_head callback_head ; }; struct netdev_hw_addr_list { struct list_head list ; int count ; }; struct hh_cache { u16 hh_len ; u16 __pad ; seqlock_t hh_lock ; unsigned long hh_data[16U] ; }; struct header_ops { int (*create)(struct sk_buff * , struct net_device * , unsigned short , void const * , void const * , unsigned int ) ; int (*parse)(struct sk_buff const * , unsigned char * ) ; int (*cache)(struct neighbour const * , struct hh_cache * , __be16 ) ; void (*cache_update)(struct hh_cache * , struct net_device const * , unsigned char const * ) ; }; struct napi_struct { struct list_head poll_list ; unsigned long state ; int weight ; unsigned int gro_count ; int (*poll)(struct napi_struct * , int ) ; spinlock_t poll_lock ; int poll_owner ; struct net_device *dev ; struct sk_buff *gro_list ; struct sk_buff *skb ; struct hrtimer timer ; struct list_head dev_list ; struct hlist_node napi_hash_node ; unsigned int napi_id ; }; enum gro_result { GRO_MERGED = 0, GRO_MERGED_FREE = 1, GRO_HELD = 2, GRO_NORMAL = 3, GRO_DROP = 4 } ; typedef enum gro_result gro_result_t; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff ** ); struct Qdisc; struct netdev_queue { struct net_device *dev ; struct Qdisc *qdisc ; struct Qdisc *qdisc_sleeping ; struct kobject kobj ; int numa_node ; spinlock_t _xmit_lock ; int xmit_lock_owner ; unsigned long trans_start ; unsigned long trans_timeout ; unsigned long state ; struct dql dql ; unsigned long tx_maxrate ; }; struct rps_map { unsigned int len ; struct callback_head rcu ; u16 cpus[0U] ; }; struct rps_dev_flow { u16 cpu ; u16 filter ; unsigned int last_qtail ; }; struct rps_dev_flow_table { unsigned int mask ; struct callback_head rcu ; struct rps_dev_flow flows[0U] ; }; struct netdev_rx_queue { struct rps_map *rps_map ; struct rps_dev_flow_table *rps_flow_table ; struct kobject kobj ; struct net_device *dev ; }; struct xps_map { unsigned int len ; unsigned int alloc_len ; struct callback_head rcu ; u16 queues[0U] ; }; struct xps_dev_maps { struct callback_head rcu ; struct xps_map *cpu_map[0U] ; }; struct netdev_tc_txq { u16 count ; u16 offset ; }; struct netdev_fcoe_hbainfo { char manufacturer[64U] ; char serial_number[64U] ; char hardware_version[64U] ; char driver_version[64U] ; char optionrom_version[64U] ; char firmware_version[64U] ; char model[256U] ; char model_description[256U] ; }; struct netdev_phys_item_id { unsigned char id[32U] ; unsigned char id_len ; }; struct net_device_ops { int (*ndo_init)(struct net_device * ) ; void (*ndo_uninit)(struct net_device * ) ; int (*ndo_open)(struct net_device * ) ; int (*ndo_stop)(struct net_device * ) ; netdev_tx_t (*ndo_start_xmit)(struct sk_buff * , struct net_device * ) ; u16 (*ndo_select_queue)(struct net_device * , struct sk_buff * , void * , u16 (*)(struct net_device * , struct sk_buff * ) ) ; void (*ndo_change_rx_flags)(struct net_device * , int ) ; void (*ndo_set_rx_mode)(struct net_device * ) ; int (*ndo_set_mac_address)(struct net_device * , void * ) ; int (*ndo_validate_addr)(struct net_device * ) ; int (*ndo_do_ioctl)(struct net_device * , struct ifreq * , int ) ; int (*ndo_set_config)(struct net_device * , struct ifmap * ) ; int (*ndo_change_mtu)(struct net_device * , int ) ; int (*ndo_neigh_setup)(struct net_device * , struct neigh_parms * ) ; void (*ndo_tx_timeout)(struct net_device * ) ; struct rtnl_link_stats64 *(*ndo_get_stats64)(struct net_device * , struct rtnl_link_stats64 * ) ; struct net_device_stats *(*ndo_get_stats)(struct net_device * ) ; int (*ndo_vlan_rx_add_vid)(struct net_device * , __be16 , u16 ) ; int (*ndo_vlan_rx_kill_vid)(struct net_device * , __be16 , u16 ) ; void (*ndo_poll_controller)(struct net_device * ) ; int (*ndo_netpoll_setup)(struct net_device * , struct netpoll_info * ) ; void (*ndo_netpoll_cleanup)(struct net_device * ) ; int (*ndo_busy_poll)(struct napi_struct * ) ; int (*ndo_set_vf_mac)(struct net_device * , int , u8 * ) ; int (*ndo_set_vf_vlan)(struct net_device * , int , u16 , u8 ) ; int (*ndo_set_vf_rate)(struct net_device * , int , int , int ) ; int (*ndo_set_vf_spoofchk)(struct net_device * , int , bool ) ; int (*ndo_get_vf_config)(struct net_device * , int , struct ifla_vf_info * ) ; int (*ndo_set_vf_link_state)(struct net_device * , int , int ) ; int (*ndo_get_vf_stats)(struct net_device * , int , struct ifla_vf_stats * ) ; int (*ndo_set_vf_port)(struct net_device * , int , struct nlattr ** ) ; int (*ndo_get_vf_port)(struct net_device * , int , struct sk_buff * ) ; int (*ndo_set_vf_rss_query_en)(struct net_device * , int , bool ) ; int (*ndo_setup_tc)(struct net_device * , u8 ) ; int (*ndo_fcoe_enable)(struct net_device * ) ; int (*ndo_fcoe_disable)(struct net_device * ) ; int (*ndo_fcoe_ddp_setup)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_ddp_done)(struct net_device * , u16 ) ; int (*ndo_fcoe_ddp_target)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_get_hbainfo)(struct net_device * , struct netdev_fcoe_hbainfo * ) ; int (*ndo_fcoe_get_wwn)(struct net_device * , u64 * , int ) ; int (*ndo_rx_flow_steer)(struct net_device * , struct sk_buff const * , u16 , u32 ) ; int (*ndo_add_slave)(struct net_device * , struct net_device * ) ; int (*ndo_del_slave)(struct net_device * , struct net_device * ) ; netdev_features_t (*ndo_fix_features)(struct net_device * , netdev_features_t ) ; int (*ndo_set_features)(struct net_device * , netdev_features_t ) ; int (*ndo_neigh_construct)(struct neighbour * ) ; void (*ndo_neigh_destroy)(struct neighbour * ) ; int (*ndo_fdb_add)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 , u16 ) ; int (*ndo_fdb_del)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 ) ; int (*ndo_fdb_dump)(struct sk_buff * , struct netlink_callback * , struct net_device * , struct net_device * , int ) ; int (*ndo_bridge_setlink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_bridge_getlink)(struct sk_buff * , u32 , u32 , struct net_device * , u32 , int ) ; int (*ndo_bridge_dellink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_change_carrier)(struct net_device * , bool ) ; int (*ndo_get_phys_port_id)(struct net_device * , struct netdev_phys_item_id * ) ; int (*ndo_get_phys_port_name)(struct net_device * , char * , size_t ) ; void (*ndo_add_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void (*ndo_del_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void *(*ndo_dfwd_add_station)(struct net_device * , struct net_device * ) ; void (*ndo_dfwd_del_station)(struct net_device * , void * ) ; netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff * , struct net_device * , void * ) ; int (*ndo_get_lock_subclass)(struct net_device * ) ; netdev_features_t (*ndo_features_check)(struct sk_buff * , struct net_device * , netdev_features_t ) ; int (*ndo_set_tx_maxrate)(struct net_device * , int , u32 ) ; int (*ndo_get_iflink)(struct net_device const * ) ; }; struct __anonstruct_adj_list_315 { struct list_head upper ; struct list_head lower ; }; struct __anonstruct_all_adj_list_316 { struct list_head upper ; struct list_head lower ; }; struct iw_handler_def; struct iw_public_data; struct switchdev_ops; struct vlan_info; struct tipc_bearer; struct in_device; struct dn_dev; struct inet6_dev; struct tcf_proto; struct cpu_rmap; struct pcpu_lstats; struct pcpu_sw_netstats; struct pcpu_dstats; struct pcpu_vstats; union __anonunion____missing_field_name_317 { void *ml_priv ; struct pcpu_lstats *lstats ; struct pcpu_sw_netstats *tstats ; struct pcpu_dstats *dstats ; struct pcpu_vstats *vstats ; }; struct garp_port; struct mrp_port; struct rtnl_link_ops; struct net_device { char name[16U] ; struct hlist_node name_hlist ; char *ifalias ; unsigned long mem_end ; unsigned long mem_start ; unsigned long base_addr ; int irq ; atomic_t carrier_changes ; unsigned long state ; struct list_head dev_list ; struct list_head napi_list ; struct list_head unreg_list ; struct list_head close_list ; struct list_head ptype_all ; struct list_head ptype_specific ; struct __anonstruct_adj_list_315 adj_list ; struct __anonstruct_all_adj_list_316 all_adj_list ; netdev_features_t features ; netdev_features_t hw_features ; netdev_features_t wanted_features ; netdev_features_t vlan_features ; netdev_features_t hw_enc_features ; netdev_features_t mpls_features ; int ifindex ; int group ; struct net_device_stats stats ; atomic_long_t rx_dropped ; atomic_long_t tx_dropped ; struct iw_handler_def const *wireless_handlers ; struct iw_public_data *wireless_data ; struct net_device_ops const *netdev_ops ; struct ethtool_ops const *ethtool_ops ; struct switchdev_ops const *switchdev_ops ; struct header_ops const *header_ops ; unsigned int flags ; unsigned int priv_flags ; unsigned short gflags ; unsigned short padded ; unsigned char operstate ; unsigned char link_mode ; unsigned char if_port ; unsigned char dma ; unsigned int mtu ; unsigned short type ; unsigned short hard_header_len ; unsigned short needed_headroom ; unsigned short needed_tailroom ; unsigned char perm_addr[32U] ; unsigned char addr_assign_type ; unsigned char addr_len ; unsigned short neigh_priv_len ; unsigned short dev_id ; unsigned short dev_port ; spinlock_t addr_list_lock ; unsigned char name_assign_type ; bool uc_promisc ; struct netdev_hw_addr_list uc ; struct netdev_hw_addr_list mc ; struct netdev_hw_addr_list dev_addrs ; struct kset *queues_kset ; unsigned int promiscuity ; unsigned int allmulti ; struct vlan_info *vlan_info ; struct dsa_switch_tree *dsa_ptr ; struct tipc_bearer *tipc_ptr ; void *atalk_ptr ; struct in_device *ip_ptr ; struct dn_dev *dn_ptr ; struct inet6_dev *ip6_ptr ; void *ax25_ptr ; struct wireless_dev *ieee80211_ptr ; struct wpan_dev *ieee802154_ptr ; struct mpls_dev *mpls_ptr ; unsigned long last_rx ; unsigned char *dev_addr ; struct netdev_rx_queue *_rx ; unsigned int num_rx_queues ; unsigned int real_num_rx_queues ; unsigned long gro_flush_timeout ; rx_handler_func_t *rx_handler ; void *rx_handler_data ; struct tcf_proto *ingress_cl_list ; struct netdev_queue *ingress_queue ; struct list_head nf_hooks_ingress ; unsigned char broadcast[32U] ; struct cpu_rmap *rx_cpu_rmap ; struct hlist_node index_hlist ; struct netdev_queue *_tx ; unsigned int num_tx_queues ; unsigned int real_num_tx_queues ; struct Qdisc *qdisc ; unsigned long tx_queue_len ; spinlock_t tx_global_lock ; int watchdog_timeo ; struct xps_dev_maps *xps_maps ; unsigned long trans_start ; struct timer_list watchdog_timer ; int *pcpu_refcnt ; struct list_head todo_list ; struct list_head link_watch_list ; unsigned char reg_state ; bool dismantle ; unsigned short rtnl_link_state ; void (*destructor)(struct net_device * ) ; struct netpoll_info *npinfo ; possible_net_t nd_net ; union __anonunion____missing_field_name_317 __annonCompField94 ; struct garp_port *garp_port ; struct mrp_port *mrp_port ; struct device dev ; struct attribute_group const *sysfs_groups[4U] ; struct attribute_group const *sysfs_rx_queue_group ; struct rtnl_link_ops const *rtnl_link_ops ; unsigned int gso_max_size ; u16 gso_max_segs ; u16 gso_min_segs ; struct dcbnl_rtnl_ops const *dcbnl_ops ; u8 num_tc ; struct netdev_tc_txq tc_to_txq[16U] ; u8 prio_tc_map[16U] ; unsigned int fcoe_ddp_xid ; struct netprio_map *priomap ; struct phy_device *phydev ; struct lock_class_key *qdisc_tx_busylock ; }; struct packet_type { __be16 type ; struct net_device *dev ; int (*func)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; bool (*id_match)(struct packet_type * , struct sock * ) ; void *af_packet_priv ; struct list_head list ; }; struct pcpu_sw_netstats { u64 rx_packets ; u64 rx_bytes ; u64 tx_packets ; u64 tx_bytes ; struct u64_stats_sync syncp ; }; enum skb_free_reason { SKB_REASON_CONSUMED = 0, SKB_REASON_DROPPED = 1 } ; struct ipv6hdr { unsigned char priority : 4 ; unsigned char version : 4 ; __u8 flow_lbl[3U] ; __be16 payload_len ; __u8 nexthdr ; __u8 hop_limit ; struct in6_addr saddr ; struct in6_addr daddr ; }; struct ipv6_stable_secret { bool initialized ; struct in6_addr secret ; }; struct ipv6_devconf { __s32 forwarding ; __s32 hop_limit ; __s32 mtu6 ; __s32 accept_ra ; __s32 accept_redirects ; __s32 autoconf ; __s32 dad_transmits ; __s32 rtr_solicits ; __s32 rtr_solicit_interval ; __s32 rtr_solicit_delay ; __s32 force_mld_version ; __s32 mldv1_unsolicited_report_interval ; __s32 mldv2_unsolicited_report_interval ; __s32 use_tempaddr ; __s32 temp_valid_lft ; __s32 temp_prefered_lft ; __s32 regen_max_retry ; __s32 max_desync_factor ; __s32 max_addresses ; __s32 accept_ra_defrtr ; __s32 accept_ra_pinfo ; __s32 accept_ra_rtr_pref ; __s32 rtr_probe_interval ; __s32 accept_ra_rt_info_max_plen ; __s32 proxy_ndp ; __s32 accept_source_route ; __s32 accept_ra_from_local ; __s32 optimistic_dad ; __s32 use_optimistic ; __s32 mc_forwarding ; __s32 disable_ipv6 ; __s32 accept_dad ; __s32 force_tllao ; __s32 ndisc_notify ; __s32 suppress_frag_ndisc ; __s32 accept_ra_mtu ; struct ipv6_stable_secret stable_secret ; void *sysctl ; }; struct page_counter { atomic_long_t count ; unsigned long limit ; struct page_counter *parent ; unsigned long watermark ; unsigned long failcnt ; }; struct sock_filter { __u16 code ; __u8 jt ; __u8 jf ; __u32 k ; }; struct bpf_insn { __u8 code ; unsigned char dst_reg : 4 ; unsigned char src_reg : 4 ; __s16 off ; __s32 imm ; }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4 } ; struct bpf_prog_aux; struct sock_fprog_kern { u16 len ; struct sock_filter *filter ; }; union __anonunion____missing_field_name_329 { struct sock_filter insns[0U] ; struct bpf_insn insnsi[0U] ; }; struct bpf_prog { u16 pages ; bool jited ; bool gpl_compatible ; u32 len ; enum bpf_prog_type type ; struct bpf_prog_aux *aux ; struct sock_fprog_kern *orig_prog ; unsigned int (*bpf_func)(struct sk_buff const * , struct bpf_insn const * ) ; union __anonunion____missing_field_name_329 __annonCompField99 ; }; struct sk_filter { atomic_t refcnt ; struct callback_head rcu ; struct bpf_prog *prog ; }; struct pollfd { int fd ; short events ; short revents ; }; struct poll_table_struct { void (*_qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ; unsigned long _key ; }; struct nla_policy { u16 type ; u16 len ; }; struct rtnl_link_ops { struct list_head list ; char const *kind ; size_t priv_size ; void (*setup)(struct net_device * ) ; int maxtype ; struct nla_policy const *policy ; int (*validate)(struct nlattr ** , struct nlattr ** ) ; int (*newlink)(struct net * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; int (*changelink)(struct net_device * , struct nlattr ** , struct nlattr ** ) ; void (*dellink)(struct net_device * , struct list_head * ) ; size_t (*get_size)(struct net_device const * ) ; int (*fill_info)(struct sk_buff * , struct net_device const * ) ; size_t (*get_xstats_size)(struct net_device const * ) ; int (*fill_xstats)(struct sk_buff * , struct net_device const * ) ; unsigned int (*get_num_tx_queues)(void) ; unsigned int (*get_num_rx_queues)(void) ; int slave_maxtype ; struct nla_policy const *slave_policy ; int (*slave_validate)(struct nlattr ** , struct nlattr ** ) ; int (*slave_changelink)(struct net_device * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; size_t (*get_slave_size)(struct net_device const * , struct net_device const * ) ; int (*fill_slave_info)(struct sk_buff * , struct net_device const * , struct net_device const * ) ; struct net *(*get_link_net)(struct net_device const * ) ; }; struct neigh_table; struct neigh_parms { possible_net_t net ; struct net_device *dev ; struct list_head list ; int (*neigh_setup)(struct neighbour * ) ; void (*neigh_cleanup)(struct neighbour * ) ; struct neigh_table *tbl ; void *sysctl_table ; int dead ; atomic_t refcnt ; struct callback_head callback_head ; int reachable_time ; int data[13U] ; unsigned long data_state[1U] ; }; struct neigh_statistics { unsigned long allocs ; unsigned long destroys ; unsigned long hash_grows ; unsigned long res_failed ; unsigned long lookups ; unsigned long hits ; unsigned long rcv_probes_mcast ; unsigned long rcv_probes_ucast ; unsigned long periodic_gc_runs ; unsigned long forced_gc_runs ; unsigned long unres_discards ; }; struct neigh_ops; struct neighbour { struct neighbour *next ; struct neigh_table *tbl ; struct neigh_parms *parms ; unsigned long confirmed ; unsigned long updated ; rwlock_t lock ; atomic_t refcnt ; struct sk_buff_head arp_queue ; unsigned int arp_queue_len_bytes ; struct timer_list timer ; unsigned long used ; atomic_t probes ; __u8 flags ; __u8 nud_state ; __u8 type ; __u8 dead ; seqlock_t ha_lock ; unsigned char ha[32U] ; struct hh_cache hh ; int (*output)(struct neighbour * , struct sk_buff * ) ; struct neigh_ops const *ops ; struct callback_head rcu ; struct net_device *dev ; u8 primary_key[0U] ; }; struct neigh_ops { int family ; void (*solicit)(struct neighbour * , struct sk_buff * ) ; void (*error_report)(struct neighbour * , struct sk_buff * ) ; int (*output)(struct neighbour * , struct sk_buff * ) ; int (*connected_output)(struct neighbour * , struct sk_buff * ) ; }; struct pneigh_entry { struct pneigh_entry *next ; possible_net_t net ; struct net_device *dev ; u8 flags ; u8 key[0U] ; }; struct neigh_hash_table { struct neighbour **hash_buckets ; unsigned int hash_shift ; __u32 hash_rnd[4U] ; struct callback_head rcu ; }; struct neigh_table { int family ; int entry_size ; int key_len ; __be16 protocol ; __u32 (*hash)(void const * , struct net_device const * , __u32 * ) ; bool (*key_eq)(struct neighbour const * , void const * ) ; int (*constructor)(struct neighbour * ) ; int (*pconstructor)(struct pneigh_entry * ) ; void (*pdestructor)(struct pneigh_entry * ) ; void (*proxy_redo)(struct sk_buff * ) ; char *id ; struct neigh_parms parms ; struct list_head parms_list ; int gc_interval ; int gc_thresh1 ; int gc_thresh2 ; int gc_thresh3 ; unsigned long last_flush ; struct delayed_work gc_work ; struct timer_list proxy_timer ; struct sk_buff_head proxy_queue ; atomic_t entries ; rwlock_t lock ; unsigned long last_rand ; struct neigh_statistics *stats ; struct neigh_hash_table *nht ; struct pneigh_entry **phash_buckets ; }; struct dn_route; union __anonunion____missing_field_name_340 { struct dst_entry *next ; struct rtable *rt_next ; struct rt6_info *rt6_next ; struct dn_route *dn_next ; }; struct dst_entry { struct callback_head callback_head ; struct dst_entry *child ; struct net_device *dev ; struct dst_ops *ops ; unsigned long _metrics ; unsigned long expires ; struct dst_entry *path ; struct dst_entry *from ; struct xfrm_state *xfrm ; int (*input)(struct sk_buff * ) ; int (*output)(struct sock * , struct sk_buff * ) ; unsigned short flags ; unsigned short pending_confirm ; short error ; short obsolete ; unsigned short header_len ; unsigned short trailer_len ; __u32 tclassid ; long __pad_to_align_refcnt[2U] ; atomic_t __refcnt ; int __use ; unsigned long lastuse ; union __anonunion____missing_field_name_340 __annonCompField100 ; }; struct hwtstamp_config { int flags ; int tx_type ; int rx_filter ; }; struct __anonstruct_socket_lock_t_341 { spinlock_t slock ; int owned ; wait_queue_head_t wq ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_socket_lock_t_341 socket_lock_t; struct proto; typedef __u32 __portpair; typedef __u64 __addrpair; struct __anonstruct____missing_field_name_343 { __be32 skc_daddr ; __be32 skc_rcv_saddr ; }; union __anonunion____missing_field_name_342 { __addrpair skc_addrpair ; struct __anonstruct____missing_field_name_343 __annonCompField101 ; }; union __anonunion____missing_field_name_344 { unsigned int skc_hash ; __u16 skc_u16hashes[2U] ; }; struct __anonstruct____missing_field_name_346 { __be16 skc_dport ; __u16 skc_num ; }; union __anonunion____missing_field_name_345 { __portpair skc_portpair ; struct __anonstruct____missing_field_name_346 __annonCompField104 ; }; union __anonunion____missing_field_name_347 { struct hlist_node skc_bind_node ; struct hlist_nulls_node skc_portaddr_node ; }; union __anonunion____missing_field_name_348 { struct hlist_node skc_node ; struct hlist_nulls_node skc_nulls_node ; }; struct sock_common { union __anonunion____missing_field_name_342 __annonCompField102 ; union __anonunion____missing_field_name_344 __annonCompField103 ; union __anonunion____missing_field_name_345 __annonCompField105 ; unsigned short skc_family ; unsigned char volatile skc_state ; unsigned char skc_reuse : 4 ; unsigned char skc_reuseport : 1 ; unsigned char skc_ipv6only : 1 ; unsigned char skc_net_refcnt : 1 ; int skc_bound_dev_if ; union __anonunion____missing_field_name_347 __annonCompField106 ; struct proto *skc_prot ; possible_net_t skc_net ; struct in6_addr skc_v6_daddr ; struct in6_addr skc_v6_rcv_saddr ; atomic64_t skc_cookie ; int skc_dontcopy_begin[0U] ; union __anonunion____missing_field_name_348 __annonCompField107 ; int skc_tx_queue_mapping ; atomic_t skc_refcnt ; int skc_dontcopy_end[0U] ; }; struct cg_proto; struct __anonstruct_sk_backlog_349 { atomic_t rmem_alloc ; int len ; struct sk_buff *head ; struct sk_buff *tail ; }; struct sock { struct sock_common __sk_common ; socket_lock_t sk_lock ; struct sk_buff_head sk_receive_queue ; struct __anonstruct_sk_backlog_349 sk_backlog ; int sk_forward_alloc ; __u32 sk_rxhash ; u16 sk_incoming_cpu ; __u32 sk_txhash ; unsigned int sk_napi_id ; unsigned int sk_ll_usec ; atomic_t sk_drops ; int sk_rcvbuf ; struct sk_filter *sk_filter ; struct socket_wq *sk_wq ; struct xfrm_policy *sk_policy[2U] ; unsigned long sk_flags ; struct dst_entry *sk_rx_dst ; struct dst_entry *sk_dst_cache ; spinlock_t sk_dst_lock ; atomic_t sk_wmem_alloc ; atomic_t sk_omem_alloc ; int sk_sndbuf ; struct sk_buff_head sk_write_queue ; unsigned char sk_shutdown : 2 ; unsigned char sk_no_check_tx : 1 ; unsigned char sk_no_check_rx : 1 ; unsigned char sk_userlocks : 4 ; unsigned char sk_protocol ; unsigned short sk_type ; int sk_wmem_queued ; gfp_t sk_allocation ; u32 sk_pacing_rate ; u32 sk_max_pacing_rate ; netdev_features_t sk_route_caps ; netdev_features_t sk_route_nocaps ; int sk_gso_type ; unsigned int sk_gso_max_size ; u16 sk_gso_max_segs ; int sk_rcvlowat ; unsigned long sk_lingertime ; struct sk_buff_head sk_error_queue ; struct proto *sk_prot_creator ; rwlock_t sk_callback_lock ; int sk_err ; int sk_err_soft ; u32 sk_ack_backlog ; u32 sk_max_ack_backlog ; __u32 sk_priority ; __u32 sk_cgrp_prioidx ; struct pid *sk_peer_pid ; struct cred const *sk_peer_cred ; long sk_rcvtimeo ; long sk_sndtimeo ; struct timer_list sk_timer ; ktime_t sk_stamp ; u16 sk_tsflags ; u32 sk_tskey ; struct socket *sk_socket ; void *sk_user_data ; struct page_frag sk_frag ; struct sk_buff *sk_send_head ; __s32 sk_peek_off ; int sk_write_pending ; void *sk_security ; __u32 sk_mark ; u32 sk_classid ; struct cg_proto *sk_cgrp ; void (*sk_state_change)(struct sock * ) ; void (*sk_data_ready)(struct sock * ) ; void (*sk_write_space)(struct sock * ) ; void (*sk_error_report)(struct sock * ) ; int (*sk_backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*sk_destruct)(struct sock * ) ; }; struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; struct udp_table; union __anonunion_h_352 { struct inet_hashinfo *hashinfo ; struct udp_table *udp_table ; struct raw_hashinfo *raw_hash ; }; struct proto { void (*close)(struct sock * , long ) ; int (*connect)(struct sock * , struct sockaddr * , int ) ; int (*disconnect)(struct sock * , int ) ; struct sock *(*accept)(struct sock * , int , int * ) ; int (*ioctl)(struct sock * , int , unsigned long ) ; int (*init)(struct sock * ) ; void (*destroy)(struct sock * ) ; void (*shutdown)(struct sock * , int ) ; int (*setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_ioctl)(struct sock * , unsigned int , unsigned long ) ; int (*sendmsg)(struct sock * , struct msghdr * , size_t ) ; int (*recvmsg)(struct sock * , struct msghdr * , size_t , int , int , int * ) ; int (*sendpage)(struct sock * , struct page * , int , size_t , int ) ; int (*bind)(struct sock * , struct sockaddr * , int ) ; int (*backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*release_cb)(struct sock * ) ; void (*hash)(struct sock * ) ; void (*unhash)(struct sock * ) ; void (*rehash)(struct sock * ) ; int (*get_port)(struct sock * , unsigned short ) ; void (*clear_sk)(struct sock * , int ) ; unsigned int inuse_idx ; bool (*stream_memory_free)(struct sock const * ) ; void (*enter_memory_pressure)(struct sock * ) ; atomic_long_t *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; int *sysctl_wmem ; int *sysctl_rmem ; int max_header ; bool no_autobind ; struct kmem_cache *slab ; unsigned int obj_size ; int slab_flags ; struct percpu_counter *orphan_count ; struct request_sock_ops *rsk_prot ; struct timewait_sock_ops *twsk_prot ; union __anonunion_h_352 h ; struct module *owner ; char name[32U] ; struct list_head node ; int (*init_cgroup)(struct mem_cgroup * , struct cgroup_subsys * ) ; void (*destroy_cgroup)(struct mem_cgroup * ) ; struct cg_proto *(*proto_cgroup)(struct mem_cgroup * ) ; }; struct cg_proto { struct page_counter memory_allocated ; struct percpu_counter sockets_allocated ; int memory_pressure ; long sysctl_mem[3U] ; unsigned long flags ; struct mem_cgroup *memcg ; }; struct request_sock_ops { int family ; int obj_size ; struct kmem_cache *slab ; char *slab_name ; int (*rtx_syn_ack)(struct sock * , struct request_sock * ) ; void (*send_ack)(struct sock * , struct sk_buff * , struct request_sock * ) ; void (*send_reset)(struct sock * , struct sk_buff * ) ; void (*destructor)(struct request_sock * ) ; void (*syn_ack_timeout)(struct request_sock const * ) ; }; struct request_sock { struct sock_common __req_common ; struct request_sock *dl_next ; struct sock *rsk_listener ; u16 mss ; u8 num_retrans ; unsigned char cookie_ts : 1 ; unsigned char num_timeout : 7 ; u32 window_clamp ; u32 rcv_wnd ; u32 ts_recent ; struct timer_list rsk_timer ; struct request_sock_ops const *rsk_ops ; struct sock *sk ; u32 *saved_syn ; u32 secid ; u32 peer_secid ; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab ; char *twsk_slab_name ; unsigned int twsk_obj_size ; int (*twsk_unique)(struct sock * , struct sock * , void * ) ; void (*twsk_destructor)(struct sock * ) ; }; struct tcphdr { __be16 source ; __be16 dest ; __be32 seq ; __be32 ack_seq ; unsigned char res1 : 4 ; unsigned char doff : 4 ; unsigned char fin : 1 ; unsigned char syn : 1 ; unsigned char rst : 1 ; unsigned char psh : 1 ; unsigned char ack : 1 ; unsigned char urg : 1 ; unsigned char ece : 1 ; unsigned char cwr : 1 ; __be16 window ; __sum16 check ; __be16 urg_ptr ; }; struct iphdr { unsigned char ihl : 4 ; unsigned char version : 4 ; __u8 tos ; __be16 tot_len ; __be16 id ; __be16 frag_off ; __u8 ttl ; __u8 protocol ; __sum16 check ; __be32 saddr ; __be32 daddr ; }; struct ip6_sf_list { struct ip6_sf_list *sf_next ; struct in6_addr sf_addr ; unsigned long sf_count[2U] ; unsigned char sf_gsresp ; unsigned char sf_oldin ; unsigned char sf_crcount ; }; struct ifmcaddr6 { struct in6_addr mca_addr ; struct inet6_dev *idev ; struct ifmcaddr6 *next ; struct ip6_sf_list *mca_sources ; struct ip6_sf_list *mca_tomb ; unsigned int mca_sfmode ; unsigned char mca_crcount ; unsigned long mca_sfcount[2U] ; struct timer_list mca_timer ; unsigned int mca_flags ; int mca_users ; atomic_t mca_refcnt ; spinlock_t mca_lock ; unsigned long mca_cstamp ; unsigned long mca_tstamp ; }; struct ifacaddr6 { struct in6_addr aca_addr ; struct inet6_dev *aca_idev ; struct rt6_info *aca_rt ; struct ifacaddr6 *aca_next ; int aca_users ; atomic_t aca_refcnt ; unsigned long aca_cstamp ; unsigned long aca_tstamp ; }; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry ; struct ipstats_mib *ipv6 ; struct icmpv6_mib_device *icmpv6dev ; struct icmpv6msg_mib_device *icmpv6msgdev ; }; struct inet6_dev { struct net_device *dev ; struct list_head addr_list ; struct ifmcaddr6 *mc_list ; struct ifmcaddr6 *mc_tomb ; spinlock_t mc_lock ; unsigned char mc_qrv ; unsigned char mc_gq_running ; unsigned char mc_ifc_count ; unsigned char mc_dad_count ; unsigned long mc_v1_seen ; unsigned long mc_qi ; unsigned long mc_qri ; unsigned long mc_maxdelay ; struct timer_list mc_gq_timer ; struct timer_list mc_ifc_timer ; struct timer_list mc_dad_timer ; struct ifacaddr6 *ac_list ; rwlock_t lock ; atomic_t refcnt ; __u32 if_flags ; int dead ; u8 rndid[8U] ; struct timer_list regen_timer ; struct list_head tempaddr_list ; struct in6_addr token ; struct neigh_parms *nd_parms ; struct ipv6_devconf cnf ; struct ipv6_devstat stats ; struct timer_list rs_timer ; __u8 rs_probes ; __u8 addr_gen_mode ; unsigned long tstamp ; struct callback_head rcu ; }; union __anonunion____missing_field_name_371 { __be32 a4 ; __be32 a6[4U] ; struct in6_addr in6 ; }; struct inetpeer_addr_base { union __anonunion____missing_field_name_371 __annonCompField109 ; }; struct inetpeer_addr { struct inetpeer_addr_base addr ; __u16 family ; }; union __anonunion____missing_field_name_372 { struct list_head gc_list ; struct callback_head gc_rcu ; }; struct __anonstruct____missing_field_name_374 { atomic_t rid ; }; union __anonunion____missing_field_name_373 { struct __anonstruct____missing_field_name_374 __annonCompField111 ; struct callback_head rcu ; struct inet_peer *gc_next ; }; struct inet_peer { struct inet_peer *avl_left ; struct inet_peer *avl_right ; struct inetpeer_addr daddr ; __u32 avl_height ; u32 metrics[16U] ; u32 rate_tokens ; unsigned long rate_last ; union __anonunion____missing_field_name_372 __annonCompField110 ; union __anonunion____missing_field_name_373 __annonCompField112 ; __u32 dtime ; atomic_t refcnt ; }; struct inet_peer_base { struct inet_peer *root ; seqlock_t lock ; int total ; }; struct uncached_list; struct rtable { struct dst_entry dst ; int rt_genid ; unsigned int rt_flags ; __u16 rt_type ; __u8 rt_is_input ; __u8 rt_uses_gateway ; int rt_iif ; __be32 rt_gateway ; u32 rt_pmtu ; struct list_head rt_uncached ; struct uncached_list *rt_uncached_list ; }; struct vlan_hdr { __be16 h_vlan_TCI ; __be16 h_vlan_encapsulated_proto ; }; struct hotplug_slot; struct pci_slot { struct pci_bus *bus ; struct list_head list ; struct hotplug_slot *hotplug ; unsigned char number ; struct kobject kobj ; }; typedef int pci_power_t; typedef unsigned int pci_channel_state_t; enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; typedef unsigned short pci_dev_flags_t; typedef unsigned short pci_bus_flags_t; struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_ats; struct pci_driver; union __anonunion____missing_field_name_381 { struct pci_sriov *sriov ; struct pci_dev *physfn ; }; struct pci_dev { struct list_head bus_list ; struct pci_bus *bus ; struct pci_bus *subordinate ; void *sysdata ; struct proc_dir_entry *procent ; struct pci_slot *slot ; unsigned int devfn ; unsigned short vendor ; unsigned short device ; unsigned short subsystem_vendor ; unsigned short subsystem_device ; unsigned int class ; u8 revision ; u8 hdr_type ; u8 pcie_cap ; u8 msi_cap ; u8 msix_cap ; unsigned char pcie_mpss : 3 ; u8 rom_base_reg ; u8 pin ; u16 pcie_flags_reg ; u8 dma_alias_devfn ; struct pci_driver *driver ; u64 dma_mask ; struct device_dma_parameters dma_parms ; pci_power_t current_state ; u8 pm_cap ; unsigned char pme_support : 5 ; unsigned char pme_interrupt : 1 ; unsigned char pme_poll : 1 ; unsigned char d1_support : 1 ; unsigned char d2_support : 1 ; unsigned char no_d1d2 : 1 ; unsigned char no_d3cold : 1 ; unsigned char d3cold_allowed : 1 ; unsigned char mmio_always_on : 1 ; unsigned char wakeup_prepared : 1 ; unsigned char runtime_d3cold : 1 ; unsigned char ignore_hotplug : 1 ; unsigned int d3_delay ; unsigned int d3cold_delay ; struct pcie_link_state *link_state ; pci_channel_state_t error_state ; struct device dev ; int cfg_size ; unsigned int irq ; struct resource resource[17U] ; bool match_driver ; unsigned char transparent : 1 ; unsigned char multifunction : 1 ; unsigned char is_added : 1 ; unsigned char is_busmaster : 1 ; unsigned char no_msi : 1 ; unsigned char no_64bit_msi : 1 ; unsigned char block_cfg_access : 1 ; unsigned char broken_parity_status : 1 ; unsigned char irq_reroute_variant : 2 ; unsigned char msi_enabled : 1 ; unsigned char msix_enabled : 1 ; unsigned char ari_enabled : 1 ; unsigned char is_managed : 1 ; unsigned char needs_freset : 1 ; unsigned char state_saved : 1 ; unsigned char is_physfn : 1 ; unsigned char is_virtfn : 1 ; unsigned char reset_fn : 1 ; unsigned char is_hotplug_bridge : 1 ; unsigned char __aer_firmware_first_valid : 1 ; unsigned char __aer_firmware_first : 1 ; unsigned char broken_intx_masking : 1 ; unsigned char io_window_1k : 1 ; unsigned char irq_managed : 1 ; unsigned char has_secondary_link : 1 ; pci_dev_flags_t dev_flags ; atomic_t enable_cnt ; u32 saved_config_space[16U] ; struct hlist_head saved_cap_space ; struct bin_attribute *rom_attr ; int rom_attr_enabled ; struct bin_attribute *res_attr[17U] ; struct bin_attribute *res_attr_wc[17U] ; struct list_head msi_list ; struct attribute_group const **msi_irq_groups ; struct pci_vpd *vpd ; union __anonunion____missing_field_name_381 __annonCompField114 ; struct pci_ats *ats ; phys_addr_t rom ; size_t romlen ; char *driver_override ; }; struct pci_ops; struct msi_controller; struct pci_bus { struct list_head node ; struct pci_bus *parent ; struct list_head children ; struct list_head devices ; struct pci_dev *self ; struct list_head slots ; struct resource *resource[4U] ; struct list_head resources ; struct resource busn_res ; struct pci_ops *ops ; struct msi_controller *msi ; void *sysdata ; struct proc_dir_entry *procdir ; unsigned char number ; unsigned char primary ; unsigned char max_bus_speed ; unsigned char cur_bus_speed ; char name[48U] ; unsigned short bridge_ctl ; pci_bus_flags_t bus_flags ; struct device *bridge ; struct device dev ; struct bin_attribute *legacy_io ; struct bin_attribute *legacy_mem ; unsigned char is_added : 1 ; }; struct pci_ops { void *(*map_bus)(struct pci_bus * , unsigned int , int ) ; int (*read)(struct pci_bus * , unsigned int , int , int , u32 * ) ; int (*write)(struct pci_bus * , unsigned int , int , int , u32 ) ; }; struct pci_dynids { spinlock_t lock ; struct list_head list ; }; typedef unsigned int pci_ers_result_t; struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev * , enum pci_channel_state ) ; pci_ers_result_t (*mmio_enabled)(struct pci_dev * ) ; pci_ers_result_t (*link_reset)(struct pci_dev * ) ; pci_ers_result_t (*slot_reset)(struct pci_dev * ) ; void (*reset_notify)(struct pci_dev * , bool ) ; void (*resume)(struct pci_dev * ) ; }; struct pci_driver { struct list_head node ; char const *name ; struct pci_device_id const *id_table ; int (*probe)(struct pci_dev * , struct pci_device_id const * ) ; void (*remove)(struct pci_dev * ) ; int (*suspend)(struct pci_dev * , pm_message_t ) ; int (*suspend_late)(struct pci_dev * , pm_message_t ) ; int (*resume_early)(struct pci_dev * ) ; int (*resume)(struct pci_dev * ) ; void (*shutdown)(struct pci_dev * ) ; int (*sriov_configure)(struct pci_dev * , int ) ; struct pci_error_handlers const *err_handler ; struct device_driver driver ; struct pci_dynids dynids ; }; struct msix_entry { u32 vector ; u16 entry ; }; struct i2c_msg { __u16 addr ; __u16 flags ; __u16 len ; __u8 *buf ; }; union i2c_smbus_data { __u8 byte ; __u16 word ; __u8 block[34U] ; }; struct i2c_algorithm; struct i2c_adapter; struct i2c_client; enum i2c_slave_event; enum i2c_slave_event; struct i2c_client { unsigned short flags ; unsigned short addr ; char name[20U] ; struct i2c_adapter *adapter ; struct device dev ; int irq ; struct list_head detected ; int (*slave_cb)(struct i2c_client * , enum i2c_slave_event , u8 * ) ; }; enum i2c_slave_event { I2C_SLAVE_READ_REQUESTED = 0, I2C_SLAVE_WRITE_REQUESTED = 1, I2C_SLAVE_READ_PROCESSED = 2, I2C_SLAVE_WRITE_RECEIVED = 3, I2C_SLAVE_STOP = 4 } ; struct i2c_algorithm { int (*master_xfer)(struct i2c_adapter * , struct i2c_msg * , int ) ; int (*smbus_xfer)(struct i2c_adapter * , u16 , unsigned short , char , u8 , int , union i2c_smbus_data * ) ; u32 (*functionality)(struct i2c_adapter * ) ; int (*reg_slave)(struct i2c_client * ) ; int (*unreg_slave)(struct i2c_client * ) ; }; struct i2c_bus_recovery_info { int (*recover_bus)(struct i2c_adapter * ) ; int (*get_scl)(struct i2c_adapter * ) ; void (*set_scl)(struct i2c_adapter * , int ) ; int (*get_sda)(struct i2c_adapter * ) ; void (*prepare_recovery)(struct i2c_adapter * ) ; void (*unprepare_recovery)(struct i2c_adapter * ) ; int scl_gpio ; int sda_gpio ; }; struct i2c_adapter_quirks { u64 flags ; int max_num_msgs ; u16 max_write_len ; u16 max_read_len ; u16 max_comb_1st_msg_len ; u16 max_comb_2nd_msg_len ; }; struct i2c_adapter { struct module *owner ; unsigned int class ; struct i2c_algorithm const *algo ; void *algo_data ; struct rt_mutex bus_lock ; int timeout ; int retries ; struct device dev ; int nr ; char name[48U] ; struct completion dev_released ; struct mutex userspace_clients_lock ; struct list_head userspace_clients ; struct i2c_bus_recovery_info *bus_recovery_info ; struct i2c_adapter_quirks const *quirks ; }; enum e1000_mac_type { e1000_undefined = 0, e1000_82575 = 1, e1000_82576 = 2, e1000_82580 = 3, e1000_i350 = 4, e1000_i354 = 5, e1000_i210 = 6, e1000_i211 = 7, e1000_num_macs = 8 } ; enum e1000_media_type { e1000_media_type_unknown = 0, e1000_media_type_copper = 1, e1000_media_type_fiber = 2, e1000_media_type_internal_serdes = 3, e1000_num_media_types = 4 } ; enum e1000_nvm_type { e1000_nvm_unknown = 0, e1000_nvm_none = 1, e1000_nvm_eeprom_spi = 2, e1000_nvm_flash_hw = 3, e1000_nvm_invm = 4, e1000_nvm_flash_sw = 5 } ; enum e1000_nvm_override { e1000_nvm_override_none = 0, e1000_nvm_override_spi_small = 1, e1000_nvm_override_spi_large = 2 } ; enum e1000_phy_type { e1000_phy_unknown = 0, e1000_phy_none = 1, e1000_phy_m88 = 2, e1000_phy_igp = 3, e1000_phy_igp_2 = 4, e1000_phy_gg82563 = 5, e1000_phy_igp_3 = 6, e1000_phy_ife = 7, e1000_phy_82580 = 8, e1000_phy_i210 = 9 } ; enum e1000_bus_type { e1000_bus_type_unknown = 0, e1000_bus_type_pci = 1, e1000_bus_type_pcix = 2, e1000_bus_type_pci_express = 3, e1000_bus_type_reserved = 4 } ; enum e1000_bus_speed { e1000_bus_speed_unknown = 0, e1000_bus_speed_33 = 1, e1000_bus_speed_66 = 2, e1000_bus_speed_100 = 3, e1000_bus_speed_120 = 4, e1000_bus_speed_133 = 5, e1000_bus_speed_2500 = 6, e1000_bus_speed_5000 = 7, e1000_bus_speed_reserved = 8 } ; enum e1000_bus_width { e1000_bus_width_unknown = 0, e1000_bus_width_pcie_x1 = 1, e1000_bus_width_pcie_x2 = 2, e1000_bus_width_pcie_x4 = 4, e1000_bus_width_pcie_x8 = 8, e1000_bus_width_32 = 9, e1000_bus_width_64 = 10, e1000_bus_width_reserved = 11 } ; enum e1000_1000t_rx_status { e1000_1000t_rx_status_not_ok = 0, e1000_1000t_rx_status_ok = 1, e1000_1000t_rx_status_undefined = 255 } ; enum e1000_rev_polarity { e1000_rev_polarity_normal = 0, e1000_rev_polarity_reversed = 1, e1000_rev_polarity_undefined = 255 } ; enum e1000_fc_mode { e1000_fc_none = 0, e1000_fc_rx_pause = 1, e1000_fc_tx_pause = 2, e1000_fc_full = 3, e1000_fc_default = 255 } ; struct e1000_hw_stats { u64 crcerrs ; u64 algnerrc ; u64 symerrs ; u64 rxerrc ; u64 mpc ; u64 scc ; u64 ecol ; u64 mcc ; u64 latecol ; u64 colc ; u64 dc ; u64 tncrs ; u64 sec ; u64 cexterr ; u64 rlec ; u64 xonrxc ; u64 xontxc ; u64 xoffrxc ; u64 xofftxc ; u64 fcruc ; u64 prc64 ; u64 prc127 ; u64 prc255 ; u64 prc511 ; u64 prc1023 ; u64 prc1522 ; u64 gprc ; u64 bprc ; u64 mprc ; u64 gptc ; u64 gorc ; u64 gotc ; u64 rnbc ; u64 ruc ; u64 rfc ; u64 roc ; u64 rjc ; u64 mgprc ; u64 mgpdc ; u64 mgptc ; u64 tor ; u64 tot ; u64 tpr ; u64 tpt ; u64 ptc64 ; u64 ptc127 ; u64 ptc255 ; u64 ptc511 ; u64 ptc1023 ; u64 ptc1522 ; u64 mptc ; u64 bptc ; u64 tsctc ; u64 tsctfc ; u64 iac ; u64 icrxptc ; u64 icrxatc ; u64 ictxptc ; u64 ictxatc ; u64 ictxqec ; u64 ictxqmtc ; u64 icrxdmtc ; u64 icrxoc ; u64 cbtmpc ; u64 htdpmc ; u64 cbrdpc ; u64 cbrmpc ; u64 rpthc ; u64 hgptc ; u64 htcbdpc ; u64 hgorc ; u64 hgotc ; u64 lenerrs ; u64 scvpc ; u64 hrmpc ; u64 doosync ; u64 o2bgptc ; u64 o2bspc ; u64 b2ospc ; u64 b2ogprc ; }; struct e1000_host_mng_dhcp_cookie { u32 signature ; u8 status ; u8 reserved0 ; u16 vlan_id ; u32 reserved1 ; u16 reserved2 ; u8 reserved3 ; u8 checksum ; }; enum e1000_ms_type { e1000_ms_hw_default = 0, e1000_ms_force_master = 1, e1000_ms_force_slave = 2, e1000_ms_auto = 3 } ; enum e1000_smart_speed { e1000_smart_speed_default = 0, e1000_smart_speed_on = 1, e1000_smart_speed_off = 2 } ; struct e1000_sfp_flags { unsigned char e1000_base_sx : 1 ; unsigned char e1000_base_lx : 1 ; unsigned char e1000_base_cx : 1 ; unsigned char e1000_base_t : 1 ; unsigned char e100_base_lx : 1 ; unsigned char e100_base_fx : 1 ; unsigned char e10_base_bx10 : 1 ; unsigned char e10_base_px : 1 ; }; struct e1000_fw_version { u32 etrack_id ; u16 eep_major ; u16 eep_minor ; u16 eep_build ; u8 invm_major ; u8 invm_minor ; u8 invm_img_type ; bool or_valid ; u16 or_major ; u16 or_build ; u16 or_patch ; }; struct e1000_mac_operations { s32 (*check_for_link)(struct e1000_hw * ) ; s32 (*reset_hw)(struct e1000_hw * ) ; s32 (*init_hw)(struct e1000_hw * ) ; bool (*check_mng_mode)(struct e1000_hw * ) ; s32 (*setup_physical_interface)(struct e1000_hw * ) ; void (*rar_set)(struct e1000_hw * , u8 * , u32 ) ; s32 (*read_mac_addr)(struct e1000_hw * ) ; s32 (*get_speed_and_duplex)(struct e1000_hw * , u16 * , u16 * ) ; s32 (*acquire_swfw_sync)(struct e1000_hw * , u16 ) ; void (*release_swfw_sync)(struct e1000_hw * , u16 ) ; s32 (*get_thermal_sensor_data)(struct e1000_hw * ) ; s32 (*init_thermal_sensor_thresh)(struct e1000_hw * ) ; }; struct e1000_phy_operations { s32 (*acquire)(struct e1000_hw * ) ; s32 (*check_polarity)(struct e1000_hw * ) ; s32 (*check_reset_block)(struct e1000_hw * ) ; s32 (*force_speed_duplex)(struct e1000_hw * ) ; s32 (*get_cfg_done)(struct e1000_hw * ) ; s32 (*get_cable_length)(struct e1000_hw * ) ; s32 (*get_phy_info)(struct e1000_hw * ) ; s32 (*read_reg)(struct e1000_hw * , u32 , u16 * ) ; void (*release)(struct e1000_hw * ) ; s32 (*reset)(struct e1000_hw * ) ; s32 (*set_d0_lplu_state)(struct e1000_hw * , bool ) ; s32 (*set_d3_lplu_state)(struct e1000_hw * , bool ) ; s32 (*write_reg)(struct e1000_hw * , u32 , u16 ) ; s32 (*read_i2c_byte)(struct e1000_hw * , u8 , u8 , u8 * ) ; s32 (*write_i2c_byte)(struct e1000_hw * , u8 , u8 , u8 ) ; }; struct e1000_nvm_operations { s32 (*acquire)(struct e1000_hw * ) ; s32 (*read)(struct e1000_hw * , u16 , u16 , u16 * ) ; void (*release)(struct e1000_hw * ) ; s32 (*write)(struct e1000_hw * , u16 , u16 , u16 * ) ; s32 (*update)(struct e1000_hw * ) ; s32 (*validate)(struct e1000_hw * ) ; s32 (*valid_led_default)(struct e1000_hw * , u16 * ) ; }; struct e1000_thermal_diode_data { u8 location ; u8 temp ; u8 caution_thresh ; u8 max_op_thresh ; }; struct e1000_thermal_sensor_data { struct e1000_thermal_diode_data sensor[3U] ; }; struct e1000_info { s32 (*get_invariants)(struct e1000_hw * ) ; struct e1000_mac_operations *mac_ops ; struct e1000_phy_operations *phy_ops ; struct e1000_nvm_operations *nvm_ops ; }; struct e1000_mac_info { struct e1000_mac_operations ops ; u8 addr[6U] ; u8 perm_addr[6U] ; enum e1000_mac_type type ; u32 ledctl_default ; u32 ledctl_mode1 ; u32 ledctl_mode2 ; u32 mc_filter_type ; u32 txcw ; u16 mta_reg_count ; u16 uta_reg_count ; u32 mta_shadow[128U] ; u16 rar_entry_count ; u8 forced_speed_duplex ; bool adaptive_ifs ; bool arc_subsystem_valid ; bool asf_firmware_present ; bool autoneg ; bool autoneg_failed ; bool disable_hw_init_bits ; bool get_link_status ; bool ifs_params_forced ; bool in_ifs_mode ; bool report_tx_early ; bool serdes_has_link ; bool tx_pkt_filtering ; struct e1000_thermal_sensor_data thermal_sensor_data ; }; struct e1000_phy_info { struct e1000_phy_operations ops ; enum e1000_phy_type type ; enum e1000_1000t_rx_status local_rx ; enum e1000_1000t_rx_status remote_rx ; enum e1000_ms_type ms_type ; enum e1000_ms_type original_ms_type ; enum e1000_rev_polarity cable_polarity ; enum e1000_smart_speed smart_speed ; u32 addr ; u32 id ; u32 reset_delay_us ; u32 revision ; enum e1000_media_type media_type ; u16 autoneg_advertised ; u16 autoneg_mask ; u16 cable_length ; u16 max_cable_length ; u16 min_cable_length ; u8 mdix ; bool disable_polarity_correction ; bool is_mdix ; bool polarity_correction ; bool reset_disable ; bool speed_downgraded ; bool autoneg_wait_to_complete ; }; struct e1000_nvm_info { struct e1000_nvm_operations ops ; enum e1000_nvm_type type ; enum e1000_nvm_override override ; u32 flash_bank_size ; u32 flash_base_addr ; u16 word_size ; u16 delay_usec ; u16 address_bits ; u16 opcode_bits ; u16 page_size ; }; struct e1000_bus_info { enum e1000_bus_type type ; enum e1000_bus_speed speed ; enum e1000_bus_width width ; u32 snoop ; u16 func ; u16 pci_cmd_word ; }; struct e1000_fc_info { u32 high_water ; u32 low_water ; u16 pause_time ; bool send_xon ; bool strict_ieee ; enum e1000_fc_mode current_mode ; enum e1000_fc_mode requested_mode ; }; struct e1000_mbx_operations { s32 (*init_params)(struct e1000_hw * ) ; s32 (*read)(struct e1000_hw * , u32 * , u16 , u16 ) ; s32 (*write)(struct e1000_hw * , u32 * , u16 , u16 ) ; s32 (*read_posted)(struct e1000_hw * , u32 * , u16 , u16 ) ; s32 (*write_posted)(struct e1000_hw * , u32 * , u16 , u16 ) ; s32 (*check_for_msg)(struct e1000_hw * , u16 ) ; s32 (*check_for_ack)(struct e1000_hw * , u16 ) ; s32 (*check_for_rst)(struct e1000_hw * , u16 ) ; }; struct e1000_mbx_stats { u32 msgs_tx ; u32 msgs_rx ; u32 acks ; u32 reqs ; u32 rsts ; }; struct e1000_mbx_info { struct e1000_mbx_operations ops ; struct e1000_mbx_stats stats ; u32 timeout ; u32 usec_delay ; u16 size ; }; struct e1000_dev_spec_82575 { bool sgmii_active ; bool global_device_reset ; bool eee_disable ; bool clear_semaphore_once ; struct e1000_sfp_flags eth_flags ; bool module_plugged ; u8 media_port ; bool media_changed ; bool mas_capable ; }; union __anonunion_dev_spec_386 { struct e1000_dev_spec_82575 _82575 ; }; struct e1000_hw { void *back ; u8 *hw_addr ; u8 *flash_address ; unsigned long io_base ; struct e1000_mac_info mac ; struct e1000_fc_info fc ; struct e1000_phy_info phy ; struct e1000_nvm_info nvm ; struct e1000_bus_info bus ; struct e1000_mbx_info mbx ; struct e1000_host_mng_dhcp_cookie mng_cookie ; union __anonunion_dev_spec_386 dev_spec ; u16 device_id ; u16 subsystem_vendor_id ; u16 subsystem_device_id ; u16 vendor_id ; u8 revision_id ; }; struct __anonstruct_read_387 { __le64 pkt_addr ; __le64 hdr_addr ; }; struct __anonstruct_lo_dword_390 { __le16 pkt_info ; __le16 hdr_info ; }; struct __anonstruct_csum_ip_392 { __le16 ip_id ; __le16 csum ; }; union __anonunion_hi_dword_391 { __le32 rss ; struct __anonstruct_csum_ip_392 csum_ip ; }; struct __anonstruct_lower_389 { struct __anonstruct_lo_dword_390 lo_dword ; union __anonunion_hi_dword_391 hi_dword ; }; struct __anonstruct_upper_393 { __le32 status_error ; __le16 length ; __le16 vlan ; }; struct __anonstruct_wb_388 { struct __anonstruct_lower_389 lower ; struct __anonstruct_upper_393 upper ; }; union e1000_adv_rx_desc { struct __anonstruct_read_387 read ; struct __anonstruct_wb_388 wb ; }; struct __anonstruct_read_394 { __le64 buffer_addr ; __le32 cmd_type_len ; __le32 olinfo_status ; }; struct __anonstruct_wb_395 { __le64 rsvd ; __le32 nxtseq_seed ; __le32 status ; }; union e1000_adv_tx_desc { struct __anonstruct_read_394 read ; struct __anonstruct_wb_395 wb ; }; struct e1000_adv_tx_context_desc { __le32 vlan_macip_lens ; __le32 seqnum_seed ; __le32 type_tucmd_mlhl ; __le32 mss_l4len_idx ; }; struct cyclecounter { cycle_t (*read)(struct cyclecounter const * ) ; cycle_t mask ; u32 mult ; u32 shift ; }; struct timecounter { struct cyclecounter const *cc ; cycle_t cycle_last ; u64 nsec ; u64 mask ; u64 frac ; }; struct cdev { struct kobject kobj ; struct module *owner ; struct file_operations const *ops ; struct list_head list ; dev_t dev ; unsigned int count ; }; struct pps_event_time { struct timespec ts_real ; }; struct ptp_clock_time { __s64 sec ; __u32 nsec ; __u32 reserved ; }; struct ptp_extts_request { unsigned int index ; unsigned int flags ; unsigned int rsv[2U] ; }; struct ptp_perout_request { struct ptp_clock_time start ; struct ptp_clock_time period ; unsigned int index ; unsigned int flags ; unsigned int rsv[4U] ; }; enum ptp_pin_function { PTP_PF_NONE = 0, PTP_PF_EXTTS = 1, PTP_PF_PEROUT = 2, PTP_PF_PHYSYNC = 3 } ; struct ptp_pin_desc { char name[64U] ; unsigned int index ; unsigned int func ; unsigned int chan ; unsigned int rsv[5U] ; }; enum ldv_36915 { PTP_CLK_REQ_EXTTS = 0, PTP_CLK_REQ_PEROUT = 1, PTP_CLK_REQ_PPS = 2 } ; union __anonunion____missing_field_name_396 { struct ptp_extts_request extts ; struct ptp_perout_request perout ; }; struct ptp_clock_request { enum ldv_36915 type ; union __anonunion____missing_field_name_396 __annonCompField115 ; }; struct ptp_clock_info { struct module *owner ; char name[16U] ; s32 max_adj ; int n_alarm ; int n_ext_ts ; int n_per_out ; int n_pins ; int pps ; struct ptp_pin_desc *pin_config ; int (*adjfreq)(struct ptp_clock_info * , s32 ) ; int (*adjtime)(struct ptp_clock_info * , s64 ) ; int (*gettime64)(struct ptp_clock_info * , struct timespec * ) ; int (*settime64)(struct ptp_clock_info * , struct timespec const * ) ; int (*enable)(struct ptp_clock_info * , struct ptp_clock_request * , int ) ; int (*verify)(struct ptp_clock_info * , unsigned int , enum ptp_pin_function , unsigned int ) ; }; struct ptp_clock; union __anonunion____missing_field_name_397 { u64 timestamp ; struct pps_event_time pps_times ; }; struct ptp_clock_event { int type ; int index ; union __anonunion____missing_field_name_397 __annonCompField116 ; }; struct i2c_algo_bit_data { void *data ; void (*setsda)(void * , int ) ; void (*setscl)(void * , int ) ; int (*getsda)(void * ) ; int (*getscl)(void * ) ; int (*pre_xfer)(struct i2c_adapter * ) ; void (*post_xfer)(struct i2c_adapter * ) ; int udelay ; int timeout ; }; struct igb_adapter; struct vf_data_storage { unsigned char vf_mac_addresses[6U] ; u16 vf_mc_hashes[30U] ; u16 num_vf_mc_hashes ; u16 vlans_enabled ; u32 flags ; unsigned long last_nack ; u16 pf_vlan ; u16 pf_qos ; u16 tx_rate ; bool spoofchk_enabled ; }; struct igb_tx_buffer { union e1000_adv_tx_desc *next_to_watch ; unsigned long time_stamp ; struct sk_buff *skb ; unsigned int bytecount ; u16 gso_segs ; __be16 protocol ; dma_addr_t dma ; __u32 len ; u32 tx_flags ; }; struct igb_rx_buffer { dma_addr_t dma ; struct page *page ; unsigned int page_offset ; }; struct igb_tx_queue_stats { u64 packets ; u64 bytes ; u64 restart_queue ; u64 restart_queue2 ; }; struct igb_rx_queue_stats { u64 packets ; u64 bytes ; u64 drops ; u64 csum_err ; u64 alloc_failed ; }; struct igb_ring; struct igb_ring_container { struct igb_ring *ring ; unsigned int total_bytes ; unsigned int total_packets ; u16 work_limit ; u8 count ; u8 itr ; }; struct igb_q_vector; union __anonunion____missing_field_name_398 { struct igb_tx_buffer *tx_buffer_info ; struct igb_rx_buffer *rx_buffer_info ; }; struct __anonstruct____missing_field_name_400 { struct igb_tx_queue_stats tx_stats ; struct u64_stats_sync tx_syncp ; struct u64_stats_sync tx_syncp2 ; }; struct __anonstruct____missing_field_name_401 { struct sk_buff *skb ; struct igb_rx_queue_stats rx_stats ; struct u64_stats_sync rx_syncp ; }; union __anonunion____missing_field_name_399 { struct __anonstruct____missing_field_name_400 __annonCompField118 ; struct __anonstruct____missing_field_name_401 __annonCompField119 ; }; struct igb_ring { struct igb_q_vector *q_vector ; struct net_device *netdev ; struct device *dev ; union __anonunion____missing_field_name_398 __annonCompField117 ; void *desc ; unsigned long flags ; void *tail ; dma_addr_t dma ; unsigned int size ; u16 count ; u8 queue_index ; u8 reg_idx ; u16 next_to_clean ; u16 next_to_use ; u16 next_to_alloc ; union __anonunion____missing_field_name_399 __annonCompField120 ; }; struct igb_q_vector { struct igb_adapter *adapter ; int cpu ; u32 eims_value ; u16 itr_val ; u8 set_itr ; void *itr_register ; struct igb_ring_container rx ; struct igb_ring_container tx ; struct napi_struct napi ; struct callback_head rcu ; char name[25U] ; struct igb_ring ring[0U] ; }; struct hwmon_attr { struct device_attribute dev_attr ; struct e1000_hw *hw ; struct e1000_thermal_diode_data *sensor ; char name[12U] ; }; struct hwmon_buff { struct attribute_group group ; struct attribute_group const *groups[2U] ; struct attribute *attrs[13U] ; struct hwmon_attr hwmon_list[12U] ; unsigned int n_hwmon ; }; struct __anonstruct_perout_402 { struct timespec start ; struct timespec period ; }; struct igb_adapter { unsigned long active_vlans[64U] ; struct net_device *netdev ; unsigned long state ; unsigned int flags ; unsigned int num_q_vectors ; struct msix_entry msix_entries[10U] ; u32 rx_itr_setting ; u32 tx_itr_setting ; u16 tx_itr ; u16 rx_itr ; u16 tx_work_limit ; u32 tx_timeout_count ; int num_tx_queues ; struct igb_ring *tx_ring[16U] ; int num_rx_queues ; struct igb_ring *rx_ring[16U] ; u32 max_frame_size ; u32 min_frame_size ; struct timer_list watchdog_timer ; struct timer_list phy_info_timer ; u16 mng_vlan_id ; u32 bd_number ; u32 wol ; u32 en_mng_pt ; u16 link_speed ; u16 link_duplex ; struct work_struct reset_task ; struct work_struct watchdog_task ; bool fc_autoneg ; u8 tx_timeout_factor ; struct timer_list blink_timer ; unsigned long led_status ; struct pci_dev *pdev ; spinlock_t stats64_lock ; struct rtnl_link_stats64 stats64 ; struct e1000_hw hw ; struct e1000_hw_stats stats ; struct e1000_phy_info phy_info ; u32 test_icr ; struct igb_ring test_tx_ring ; struct igb_ring test_rx_ring ; int msg_enable ; struct igb_q_vector *q_vector[8U] ; u32 eims_enable_mask ; u32 eims_other ; u16 tx_ring_count ; u16 rx_ring_count ; unsigned int vfs_allocated_count ; struct vf_data_storage *vf_data ; int vf_rate_link_speed ; u32 rss_queues ; u32 wvbr ; u32 *shadow_vfta ; struct ptp_clock *ptp_clock ; struct ptp_clock_info ptp_caps ; struct delayed_work ptp_overflow_work ; struct work_struct ptp_tx_work ; struct sk_buff *ptp_tx_skb ; struct hwtstamp_config tstamp_config ; unsigned long ptp_tx_start ; unsigned long last_rx_ptp_check ; unsigned long last_rx_timestamp ; spinlock_t tmreg_lock ; struct cyclecounter cc ; struct timecounter tc ; u32 tx_hwtstamp_timeouts ; u32 rx_hwtstamp_cleared ; struct ptp_pin_desc sdp_config[4U] ; struct __anonstruct_perout_402 perout[2U] ; char fw_version[32U] ; struct hwmon_buff *igb_hwmon_buff ; bool ets ; struct i2c_algo_bit_data i2c_algo ; struct i2c_adapter i2c_adap ; struct i2c_client *i2c_client ; u32 rss_indir_tbl_init ; u8 rss_indir_tbl[128U] ; unsigned long link_check_timeout ; int copper_tries ; struct e1000_info ei ; u16 eee_advert ; }; struct igb_reg_info { u32 ofs ; char *name ; }; struct my_u0 { u64 a ; u64 b ; }; typedef bool ldv_func_ret_type; typedef bool ldv_func_ret_type___0; typedef bool ldv_func_ret_type___1; typedef bool ldv_func_ret_type___2; typedef int ldv_func_ret_type___3; typedef int ldv_func_ret_type___4; typedef int ldv_func_ret_type___5; typedef int ldv_func_ret_type___6; typedef int ldv_func_ret_type___7; typedef int ldv_func_ret_type___8; typedef int ldv_func_ret_type___9; typedef int ldv_func_ret_type___10; typedef int ldv_func_ret_type___11; typedef int ldv_func_ret_type___12; typedef int ldv_func_ret_type___13; typedef bool ldv_func_ret_type___14; typedef bool ldv_func_ret_type___15; typedef int ldv_func_ret_type___16; typedef int ldv_func_ret_type___17; typedef int ldv_func_ret_type___18; typedef int ldv_func_ret_type___19; typedef int ldv_func_ret_type___20; typedef int ldv_func_ret_type___21; typedef int ldv_func_ret_type___22; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct igb_stats { char stat_string[32U] ; int sizeof_stat ; int stat_offset ; }; struct igb_reg_test { u16 reg ; u16 reg_offset ; u16 array_len ; u16 test_type ; u32 mask ; u32 write ; }; enum hrtimer_restart; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum tk_offsets { TK_OFFS_REAL = 0, TK_OFFS_BOOT = 1, TK_OFFS_TAI = 2, TK_OFFS_MAX = 3 } ; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; typedef bool ldv_func_ret_type___23; typedef bool ldv_func_ret_type___24; enum hrtimer_restart; struct i2c_board_info; enum i2c_slave_event; enum i2c_slave_event; struct i2c_board_info { char type[20U] ; unsigned short flags ; unsigned short addr ; void *platform_data ; struct dev_archdata *archdata ; struct device_node *of_node ; struct fwnode_handle *fwnode ; int irq ; }; void __builtin_prefetch(void const * , ...) ; __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct module __this_module ; __inline static void set_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static void clear_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } __inline static int test_and_set_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } __inline static int test_and_set_bit_lock(long nr , unsigned long volatile *addr ) { int tmp ; { tmp = test_and_set_bit(nr, addr); return (tmp); } } __inline static int test_and_clear_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } __inline static int constant_test_bit(long nr , unsigned long const volatile *addr ) { { return ((int )((unsigned long )*(addr + (unsigned long )(nr >> 6)) >> ((int )nr & 63)) & 1); } } __inline static int variable_test_bit(long nr , unsigned long const volatile *addr ) { int oldbit ; { __asm__ volatile ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)), "Ir" (nr)); return (oldbit); } } extern unsigned long find_next_bit(unsigned long const * , unsigned long , unsigned long ) ; extern unsigned long find_first_bit(unsigned long const * , unsigned long ) ; __inline static __u16 __fswab16(__u16 val ) { { return ((__u16 )((int )((short )((int )val << 8)) | (int )((short )((int )val >> 8)))); } } extern int printk(char const * , ...) ; extern void __dynamic_dev_dbg(struct _ddebug * , struct device const * , char const * , ...) ; extern void print_hex_dump(char const * , char const * , int , int , int , void const * , size_t , bool ) ; extern int sprintf(char * , char const * , ...) ; extern int snprintf(char * , size_t , char const * , ...) ; extern enum system_states system_state ; extern void __bad_percpu_size(void) ; extern void __bad_size_call_parameter(void) ; __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern unsigned long __phys_addr(unsigned long ) ; extern void *memcpy(void * , void const * , size_t ) ; extern void *memset(void * , int , size_t ) ; extern char *strcpy(char * , char const * ) ; extern char *strncpy(char * , char const * , __kernel_size_t ) ; extern size_t strlcpy(char * , char const * , size_t ) ; extern int __bitmap_weight(unsigned long const * , unsigned int ) ; __inline static int bitmap_weight(unsigned long const *src , unsigned int nbits ) { int tmp___0 ; { tmp___0 = __bitmap_weight(src, nbits); return (tmp___0); } } extern void warn_slowpath_fmt(char const * , int const , char const * , ...) ; extern void warn_slowpath_null(char const * , int const ) ; extern int nr_cpu_ids ; extern struct cpumask const * const cpu_online_mask ; __inline static unsigned int cpumask_weight(struct cpumask const *srcp ) { int tmp ; { tmp = bitmap_weight((unsigned long const *)(& srcp->bits), (unsigned int )nr_cpu_ids); return ((unsigned int )tmp); } } __inline static void prefetchw(void const *x ) { { __asm__ volatile ("661:\n\tprefetcht0 %P1\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 6*32+ 8)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\tprefetchw %P1\n6651:\n\t.popsection": : "i" (0), "m" (*((char const *)x))); return; } } extern void __cmpxchg_wrong_size(void) ; __inline static int atomic_read(atomic_t const *v ) { int __var ; { __var = 0; return ((int )*((int const volatile *)(& v->counter))); } } __inline static void atomic_inc(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incl %0": "+m" (v->counter)); return; } } __inline static int atomic_cmpxchg(atomic_t *v , int old , int new ) { int __ret ; int __old ; int __new ; u8 volatile *__ptr ; u16 volatile *__ptr___0 ; u32 volatile *__ptr___1 ; u64 volatile *__ptr___2 ; { __old = old; __new = new; switch (4UL) { case 1UL: __ptr = (u8 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgb %2,%1": "=a" (__ret), "+m" (*__ptr): "q" (__new), "0" (__old): "memory"); goto ldv_5616; case 2UL: __ptr___0 = (u16 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgw %2,%1": "=a" (__ret), "+m" (*__ptr___0): "r" (__new), "0" (__old): "memory"); goto ldv_5616; case 4UL: __ptr___1 = (u32 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgl %2,%1": "=a" (__ret), "+m" (*__ptr___1): "r" (__new), "0" (__old): "memory"); goto ldv_5616; case 8UL: __ptr___2 = (u64 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgq %2,%1": "=a" (__ret), "+m" (*__ptr___2): "r" (__new), "0" (__old): "memory"); goto ldv_5616; default: __cmpxchg_wrong_size(); } ldv_5616: ; return (__ret); } } __inline static int __atomic_add_unless(atomic_t *v , int a , int u ) { int c ; int old ; long tmp ; long tmp___0 ; { c = atomic_read((atomic_t const *)v); ldv_5645: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5644; } else { } old = atomic_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5644; } else { } c = old; goto ldv_5645; ldv_5644: ; return (c); } } __inline static int atomic_add_unless(atomic_t *v , int a , int u ) { int tmp ; { tmp = __atomic_add_unless(v, a, u); return (tmp != u); } } extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; extern void lock_acquire(struct lockdep_map * , unsigned int , int , int , int , struct lockdep_map * , unsigned long ) ; extern void lock_release(struct lockdep_map * , int , unsigned long ) ; extern void lockdep_rcu_suspicious(char const * , int const , char const * ) ; extern int mutex_trylock(struct mutex * ) ; int ldv_mutex_trylock_15(struct mutex *ldv_func_arg1 ) ; extern void mutex_unlock(struct mutex * ) ; void ldv_mutex_unlock_11(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_12(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_16(struct mutex *ldv_func_arg1 ) ; extern void *malloc(size_t ) ; extern void *calloc(size_t , size_t ) ; extern int __VERIFIER_nondet_int(void) ; extern unsigned long __VERIFIER_nondet_ulong(void) ; extern void *__VERIFIER_nondet_pointer(void) ; extern void __VERIFIER_assume(int ) ; void *ldv_malloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = malloc(size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_zalloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_init_zalloc(size_t size ) { void *p ; void *tmp ; { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } void *ldv_memset(void *s , int c , size_t n ) { void *tmp ; { tmp = memset(s, c, n); return (tmp); } } int ldv_undef_int(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); return (tmp); } } void *ldv_undef_ptr(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); return (tmp); } } unsigned long ldv_undef_ulong(void) { unsigned long tmp ; { tmp = __VERIFIER_nondet_ulong(); return (tmp); } } __inline static void ldv_stop(void) { { LDV_STOP: ; goto LDV_STOP; } } __inline static long ldv__builtin_expect(long exp , long c ) { { return (exp); } } extern void mutex_lock(struct mutex * ) ; void ldv_mutex_lock_10(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_13(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_14(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock ) ; void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock ) ; void ldv_mutex_lock_lock(struct mutex *lock ) ; void ldv_mutex_unlock_lock(struct mutex *lock ) ; void ldv_mutex_lock_mutex_of_device(struct mutex *lock ) ; int ldv_mutex_trylock_mutex_of_device(struct mutex *lock ) ; void ldv_mutex_unlock_mutex_of_device(struct mutex *lock ) ; extern int __preempt_count ; __inline static int preempt_count(void) { int pfo_ret__ ; { switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (__preempt_count)); goto ldv_6474; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6474; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6474; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6474; default: __bad_percpu_size(); } ldv_6474: ; return (pfo_ret__ & 2147483647); } } __inline static void __preempt_count_add(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (val)); } goto ldv_6531; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6531; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6531; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (val)); } goto ldv_6531; default: __bad_percpu_size(); } ldv_6531: ; return; } } __inline static void __preempt_count_sub(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (- val)); } goto ldv_6543; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6543; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6543; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (- val)); } goto ldv_6543; default: __bad_percpu_size(); } ldv_6543: ; return; } } extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; extern void _raw_spin_lock(raw_spinlock_t * ) ; extern void _raw_spin_unlock(raw_spinlock_t * ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->__annonCompField17.rlock); } } __inline static void spin_lock(spinlock_t *lock ) { { _raw_spin_lock(& lock->__annonCompField17.rlock); return; } } __inline static void spin_unlock(spinlock_t *lock ) { { _raw_spin_unlock(& lock->__annonCompField17.rlock); return; } } extern void set_normalized_timespec(struct timespec * , time_t , s64 ) ; __inline static struct timespec timespec_add(struct timespec lhs , struct timespec rhs ) { struct timespec ts_delta ; { set_normalized_timespec(& ts_delta, lhs.tv_sec + rhs.tv_sec, (s64 )(lhs.tv_nsec + rhs.tv_nsec)); return (ts_delta); } } extern unsigned long volatile jiffies ; extern int mod_timer(struct timer_list * , unsigned long ) ; int ldv_mod_timer_38(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; int ldv_mod_timer_39(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; int ldv_mod_timer_40(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; int ldv_mod_timer_41(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; int ldv_mod_timer_42(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; int ldv_mod_timer_43(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; int ldv_mod_timer_44(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; extern int del_timer_sync(struct timer_list * ) ; int ldv_del_timer_sync_28(struct timer_list *ldv_func_arg1 ) ; int ldv_del_timer_sync_29(struct timer_list *ldv_func_arg1 ) ; int ldv_del_timer_sync_32(struct timer_list *ldv_func_arg1 ) ; int ldv_del_timer_sync_33(struct timer_list *ldv_func_arg1 ) ; __inline static void __rcu_read_lock(void) { { __preempt_count_add(1); __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock(void) { { __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); return; } } extern void kfree_call_rcu(struct callback_head * , void (*)(struct callback_head * ) ) ; extern bool rcu_is_watching(void) ; __inline static void rcu_lock_acquire(struct lockdep_map *map ) { { lock_acquire(map, 0U, 0, 2, 0, (struct lockdep_map *)0, 0UL); return; } } __inline static void rcu_lock_release(struct lockdep_map *map ) { { lock_release(map, 1, 0UL); return; } } extern struct lockdep_map rcu_lock_map ; extern int debug_lockdep_rcu_enabled(void) ; __inline static void rcu_read_lock(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { __rcu_read_lock(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 849, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 900, "rcu_read_unlock() used illegally while idle"); } else { } } else { } __rcu_read_unlock(); rcu_lock_release(& rcu_lock_map); return; } } extern unsigned long round_jiffies(unsigned long ) ; extern void __init_work(struct work_struct * , int ) ; extern struct workqueue_struct *system_wq ; extern bool queue_work_on(int , struct workqueue_struct * , struct work_struct * ) ; bool ldv_queue_work_on_5(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_7(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; extern bool queue_delayed_work_on(int , struct workqueue_struct * , struct delayed_work * , unsigned long ) ; bool ldv_queue_delayed_work_on_6(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_9(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; extern void flush_workqueue(struct workqueue_struct * ) ; void ldv_flush_workqueue_8(struct workqueue_struct *ldv_func_arg1 ) ; extern bool cancel_work_sync(struct work_struct * ) ; bool ldv_cancel_work_sync_34(struct work_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_35(struct work_struct *ldv_func_arg1 ) ; __inline static bool queue_work(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_5(8192, wq, work); return (tmp); } } __inline static bool schedule_work(struct work_struct *work ) { bool tmp ; { tmp = queue_work(system_wq, work); return (tmp); } } extern pg_data_t *node_data[] ; __inline static unsigned int readl(void const volatile *addr ) { unsigned int ret ; { __asm__ volatile ("movl %1,%0": "=r" (ret): "m" (*((unsigned int volatile *)addr)): "memory"); return (ret); } } __inline static void writel(unsigned int val , void volatile *addr ) { { __asm__ volatile ("movl %0,%1": : "r" (val), "m" (*((unsigned int volatile *)addr)): "memory"); return; } } extern void iounmap(void volatile * ) ; extern void pci_iounmap(struct pci_dev * , void * ) ; extern void *pci_iomap(struct pci_dev * , int , unsigned long ) ; extern int cpu_number ; extern int numa_node ; __inline static int numa_node_id(void) { int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (numa_node)); goto ldv_13647; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (numa_node)); goto ldv_13647; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (numa_node)); goto ldv_13647; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (numa_node)); goto ldv_13647; default: __bad_percpu_size(); } ldv_13647: pscr_ret__ = pfo_ret__; goto ldv_13653; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (numa_node)); goto ldv_13657; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (numa_node)); goto ldv_13657; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (numa_node)); goto ldv_13657; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (numa_node)); goto ldv_13657; default: __bad_percpu_size(); } ldv_13657: pscr_ret__ = pfo_ret_____0; goto ldv_13653; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (numa_node)); goto ldv_13666; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (numa_node)); goto ldv_13666; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (numa_node)); goto ldv_13666; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (numa_node)); goto ldv_13666; default: __bad_percpu_size(); } ldv_13666: pscr_ret__ = pfo_ret_____1; goto ldv_13653; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (numa_node)); goto ldv_13675; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (numa_node)); goto ldv_13675; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (numa_node)); goto ldv_13675; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (numa_node)); goto ldv_13675; default: __bad_percpu_size(); } ldv_13675: pscr_ret__ = pfo_ret_____2; goto ldv_13653; default: __bad_size_call_parameter(); goto ldv_13653; } ldv_13653: ; return (pscr_ret__); } } __inline static int numa_mem_id(void) { int tmp ; { tmp = numa_node_id(); return (tmp); } } __inline static int gfp_zonelist(gfp_t flags ) { long tmp ; { tmp = ldv__builtin_expect((flags & 262144U) != 0U, 0L); if (tmp != 0L) { return (1); } else { } return (0); } } __inline static struct zonelist *node_zonelist(int nid , gfp_t flags ) { int tmp ; { tmp = gfp_zonelist(flags); return ((struct zonelist *)(& (node_data[nid])->node_zonelists) + (unsigned long )tmp); } } extern struct page *__alloc_pages_nodemask(gfp_t , unsigned int , struct zonelist * , nodemask_t * ) ; __inline static struct page *__alloc_pages(gfp_t gfp_mask , unsigned int order , struct zonelist *zonelist ) { struct page *tmp ; { tmp = __alloc_pages_nodemask(gfp_mask, order, zonelist, (nodemask_t *)0); return (tmp); } } __inline static struct page *alloc_pages_node(int nid , gfp_t gfp_mask , unsigned int order ) { struct zonelist *tmp ; struct page *tmp___0 ; { if (nid < 0) { nid = numa_node_id(); } else { } tmp = node_zonelist(nid, gfp_mask); tmp___0 = __alloc_pages(gfp_mask, order, tmp); return (tmp___0); } } extern void __free_pages(struct page * , unsigned int ) ; extern void kfree(void const * ) ; extern void *__kmalloc(size_t , gfp_t ) ; __inline static void *kmalloc(size_t size , gfp_t flags ) { void *tmp___2 ; { tmp___2 = __kmalloc(size, flags); return (tmp___2); } } __inline static void *kmalloc_array(size_t n , size_t size , gfp_t flags ) { void *tmp ; { if (size != 0UL && 0xffffffffffffffffUL / size < n) { return ((void *)0); } else { } tmp = __kmalloc(n * size, flags); return (tmp); } } __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc_array(n, size, flags | 32768U); return (tmp); } } __inline static void *kzalloc(size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc(size, flags | 32768U); return (tmp); } } struct work_struct *ldv_work_struct_9_2 ; int ldv_state_variable_20 ; int ldv_work_12_3 ; int ldv_irq_5_1 = 0; int ldv_irq_line_4_2 ; int ldv_irq_line_7_1 ; int ldv_irq_3_2 = 0; int ldv_work_9_3 ; int ldv_irq_6_1 = 0; struct timer_list *ldv_timer_list_14_3 ; int ldv_state_variable_14 ; int ldv_irq_line_6_2 ; int ldv_state_variable_17 ; struct work_struct *ldv_work_struct_10_1 ; int ldv_irq_8_2 = 0; void *ldv_irq_data_6_0 ; void *ldv_irq_data_2_3 ; int ldv_state_variable_19 ; int ldv_state_variable_9 ; struct e1000_hw *e1000_nvm_ops_82575_group0 ; int ldv_irq_line_7_2 ; int ldv_irq_7_3 = 0; void *ldv_irq_data_2_2 ; struct work_struct *ldv_work_struct_11_0 ; int ldv_state_variable_7 ; int ldv_irq_line_8_1 ; int ldv_irq_1_3 = 0; int ldv_irq_line_2_2 ; struct ethtool_ringparam *igb_ethtool_ops_group1 ; void *ldv_irq_data_5_2 ; void *ldv_irq_data_1_0 ; void *ldv_irq_data_3_0 ; struct work_struct *ldv_work_struct_12_3 ; int LDV_IN_INTERRUPT = 1; int ldv_irq_1_1 = 0; void *ldv_irq_data_7_2 ; int ldv_irq_line_3_1 ; struct ethtool_pauseparam *igb_ethtool_ops_group4 ; int ldv_irq_4_1 = 0; int ldv_timer_13_3 ; int ldv_irq_8_3 = 0; int ldv_state_variable_8 ; void *ldv_irq_data_8_2 ; int ldv_state_variable_15 ; int ldv_irq_line_5_0 ; int ldv_irq_line_7_3 ; int ldv_irq_line_8_2 ; int ldv_state_variable_21 ; struct device *igb_pm_ops_group1 ; struct net_device *igb_netdev_ops_group1 ; void *ldv_irq_data_4_0 ; int ldv_irq_8_1 = 0; int ldv_irq_line_6_3 ; struct work_struct *ldv_work_struct_11_1 ; int ldv_irq_4_0 = 0; int ldv_work_10_0 ; int ldv_work_12_2 ; int ldv_irq_2_2 = 0; int ldv_irq_line_2_0 ; int ldv_irq_line_4_0 ; int ldv_irq_line_6_1 ; int ldv_irq_line_3_0 ; void *ldv_irq_data_7_0 ; void *ldv_irq_data_6_1 ; void *ldv_irq_data_3_3 ; int ldv_irq_line_3_2 ; int ldv_state_variable_10 ; int ldv_irq_1_0 = 0; struct work_struct *ldv_work_struct_12_0 ; struct timer_list *ldv_timer_list_14_2 ; int ldv_timer_13_2 ; int ldv_work_10_1 ; int ldv_irq_line_2_1 ; void *ldv_irq_data_6_2 ; int ldv_state_variable_2 ; int ldv_state_variable_25 ; struct timer_list *ldv_timer_list_14_1 ; struct timer_list *ldv_timer_list_13_0 ; struct work_struct *ldv_work_struct_10_0 ; void *ldv_irq_data_2_0 ; struct pci_dev *igb_driver_group1 ; int ldv_state_variable_11 ; int ldv_timer_14_1 ; int ldv_irq_1_2 = 0; int ldv_irq_4_3 = 0; int ldv_state_variable_18 ; int ldv_irq_6_0 = 0; struct ethtool_wolinfo *igb_ethtool_ops_group8 ; int ldv_irq_line_4_1 ; struct work_struct *ldv_work_struct_9_1 ; void *ldv_irq_data_8_1 ; int ldv_irq_line_5_3 ; struct ethtool_eeprom *igb_ethtool_ops_group3 ; int ldv_work_11_3 ; struct ethtool_coalesce *igb_ethtool_ops_group6 ; int ldv_work_11_2 ; int ldv_irq_line_8_3 ; struct pci_dev *igb_err_handler_group0 ; int pci_counter ; int ldv_irq_line_6_0 ; int ldv_irq_7_2 = 0; int ldv_state_variable_0 ; void *ldv_irq_data_5_3 ; int ldv_irq_2_0 = 0; int ldv_state_variable_12 ; struct net_device *igb_ethtool_ops_group9 ; int ldv_irq_line_4_3 ; int ldv_state_variable_22 ; struct ethtool_eee *igb_ethtool_ops_group0 ; int ldv_work_9_0 ; int ref_cnt ; int ldv_irq_line_1_1 ; int ldv_irq_6_3 = 0; struct work_struct *ldv_work_struct_10_3 ; int ldv_state_variable_23 ; int ldv_irq_5_2 = 0; struct ethtool_cmd *igb_ethtool_ops_group2 ; int ldv_irq_2_1 = 0; int ldv_irq_3_0 = 0; void *ldv_irq_data_2_1 ; int ldv_state_variable_6 ; void *ldv_irq_data_7_1 ; void *ldv_irq_data_1_3 ; int ldv_irq_8_0 = 0; void *ldv_irq_data_5_0 ; struct e1000_hw *e1000_phy_ops_82575_group0 ; int ldv_irq_7_1 = 0; struct timer_list *ldv_timer_list_13_3 ; void *ldv_irq_data_4_1 ; struct e1000_hw *e1000_mac_ops_82575_group0 ; int ldv_state_variable_3 ; int ldv_irq_line_1_0 ; void *ldv_irq_data_3_2 ; void *ldv_irq_data_6_3 ; struct work_struct *ldv_work_struct_12_1 ; int ldv_work_11_0 ; struct work_struct *ldv_work_struct_11_2 ; int ldv_state_variable_4 ; struct work_struct *ldv_work_struct_9_0 ; int ldv_timer_14_2 ; struct work_struct *ldv_work_struct_9_3 ; int ldv_work_10_2 ; struct ethtool_rxnfc *igb_ethtool_ops_group7 ; int ldv_irq_line_3_3 ; int ldv_work_9_2 ; struct ethtool_channels *igb_ethtool_ops_group5 ; int ldv_timer_14_3 ; int ldv_work_9_1 ; int ldv_state_variable_5 ; int ldv_state_variable_13 ; int ldv_irq_3_1 = 0; int ldv_irq_line_7_0 ; int ldv_irq_line_5_2 ; void *ldv_irq_data_4_3 ; int ldv_irq_4_2 = 0; void *ldv_irq_data_8_0 ; int ldv_work_11_1 ; int ldv_state_variable_24 ; int ldv_irq_6_2 = 0; int ldv_work_12_0 ; struct timer_list *ldv_timer_list_13_1 ; int ldv_timer_13_0 ; int ldv_state_variable_1 ; int ldv_irq_line_1_2 ; int ldv_timer_14_0 ; int ldv_irq_line_2_3 ; int ldv_work_12_1 ; struct timer_list *ldv_timer_list_14_0 ; void *ldv_irq_data_1_1 ; struct work_struct *ldv_work_struct_10_2 ; void *ldv_irq_data_4_2 ; void *ldv_irq_data_3_1 ; void *ldv_irq_data_5_1 ; int ldv_irq_line_8_0 ; int ldv_state_variable_16 ; struct work_struct *ldv_work_struct_12_2 ; void *ldv_irq_data_1_2 ; int ldv_irq_5_3 = 0; int ldv_irq_line_5_1 ; int ldv_irq_7_0 = 0; int ldv_irq_2_3 = 0; int ldv_irq_line_1_3 ; int ldv_irq_5_0 = 0; void *ldv_irq_data_8_3 ; struct timer_list *ldv_timer_list_13_2 ; int ldv_irq_3_3 = 0; int ldv_work_10_3 ; int ldv_timer_13_1 ; void *ldv_irq_data_7_3 ; struct work_struct *ldv_work_struct_11_3 ; void activate_suitable_irq_4(int line , void *data ) ; int ldv_irq_3(int state , int line , void *data ) ; void choose_timer_13(void) ; void disable_suitable_irq_2(int line , void *data ) ; void ldv_initialize_e1000_nvm_operations_16(void) ; void disable_suitable_irq_7(int line , void *data ) ; void activate_suitable_irq_3(int line , void *data ) ; int reg_check_1(irqreturn_t (*handler)(int , void * ) ) ; void choose_interrupt_4(void) ; void call_and_disable_work_10(struct work_struct *work ) ; void work_init_9(void) ; void ldv_initialize_e1000_mac_operations_18(void) ; void ldv_initialize_pci_error_handlers_23(void) ; void invoke_work_10(void) ; void call_and_disable_all_11(int state ) ; void activate_pending_timer_13(struct timer_list *timer , unsigned long data , int pending_flag ) ; void activate_suitable_irq_2(int line , void *data ) ; void ldv_timer_14(int state , struct timer_list *timer ) ; void call_and_disable_all_9(int state ) ; void choose_interrupt_1(void) ; void work_init_10(void) ; int reg_check_2(irqreturn_t (*handler)(int , void * ) ) ; void activate_suitable_irq_7(int line , void *data ) ; int reg_check_3(irqreturn_t (*handler)(int , void * ) ) ; void invoke_work_9(void) ; void activate_work_9(struct work_struct *work , int state ) ; void activate_pending_timer_14(struct timer_list *timer , unsigned long data , int pending_flag ) ; int reg_check_7(irqreturn_t (*handler)(int , void * ) ) ; void ldv_initialize_e1000_phy_operations_17(void) ; void activate_suitable_timer_13(struct timer_list *timer , unsigned long data ) ; void ldv_timer_13(int state , struct timer_list *timer ) ; void disable_suitable_timer_13(struct timer_list *timer ) ; int ldv_irq_4(int state , int line , void *data ) ; void work_init_11(void) ; void ldv_net_device_ops_20(void) ; void activate_work_11(struct work_struct *work , int state ) ; void disable_work_11(struct work_struct *work ) ; void ldv_initialize_ethtool_ops_19(void) ; void call_and_disable_work_9(struct work_struct *work ) ; void disable_suitable_irq_1(int line , void *data ) ; void activate_suitable_irq_1(int line , void *data ) ; int reg_check_4(irqreturn_t (*handler)(int , void * ) ) ; void ldv_dev_pm_ops_25(void) ; void timer_init_13(void) ; int ldv_irq_2(int state , int line , void *data ) ; int reg_timer_14(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) ; void choose_interrupt_2(void) ; void disable_work_9(struct work_struct *work ) ; void disable_suitable_irq_4(int line , void *data ) ; void disable_suitable_timer_14(struct timer_list *timer ) ; void disable_work_10(struct work_struct *work ) ; void ldv_pci_driver_22(void) ; void work_init_12(void) ; void activate_work_10(struct work_struct *work , int state ) ; void activate_suitable_timer_14(struct timer_list *timer , unsigned long data ) ; void timer_init_14(void) ; void disable_suitable_irq_3(int line , void *data ) ; int reg_timer_13(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) ; void call_and_disable_all_10(int state ) ; void choose_timer_14(void) ; int ldv_irq_1(int state , int line , void *data ) ; void choose_interrupt_3(void) ; __inline static char const *kobject_name(struct kobject const *kobj ) { { return ((char const *)kobj->name); } } extern void *vzalloc(unsigned long ) ; extern void vfree(void const * ) ; __inline static int PageTail(struct page const *page ) { int tmp ; { tmp = constant_test_bit(15L, (unsigned long const volatile *)(& page->flags)); return (tmp); } } __inline static struct page *compound_head_by_tail(struct page *tail ) { struct page *head ; int tmp ; long tmp___0 ; { head = tail->__annonCompField46.first_page; __asm__ volatile ("": : : "memory"); tmp = PageTail((struct page const *)tail); tmp___0 = ldv__builtin_expect(tmp != 0, 1L); if (tmp___0 != 0L) { return (head); } else { } return (tail); } } __inline static struct page *compound_head(struct page *page ) { struct page *tmp ; int tmp___0 ; long tmp___1 ; { tmp___0 = PageTail((struct page const *)page); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L) { tmp = compound_head_by_tail(page); return (tmp); } else { } return (page); } } __inline static int page_count(struct page *page ) { struct page *tmp ; int tmp___0 ; { tmp = compound_head(page); tmp___0 = atomic_read((atomic_t const *)(& tmp->__annonCompField42.__annonCompField41.__annonCompField40._count)); return (tmp___0); } } __inline static int page_to_nid(struct page const *page ) { { return ((int )(page->flags >> 54)); } } __inline static void *lowmem_page_address(struct page const *page ) { { return ((void *)((unsigned long )((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) + 0xffff880000000000UL)); } } extern void synchronize_irq(unsigned int ) ; extern void __const_udelay(unsigned long ) ; extern void msleep(unsigned int ) ; extern void usleep_range(unsigned long , unsigned long ) ; extern int driver_for_each_device(struct device_driver * , struct device * , void * , int (*)(struct device * , void * ) ) ; __inline static bool device_can_wakeup(struct device *dev ) { { return ((int )dev->power.can_wakeup != 0); } } extern int device_set_wakeup_enable(struct device * , bool ) ; __inline static char const *dev_name(struct device const *dev ) { char const *tmp ; { if ((unsigned long )dev->init_name != (unsigned long )((char const */* const */)0)) { return ((char const *)dev->init_name); } else { } tmp = kobject_name(& dev->kobj); return (tmp); } } __inline static void *dev_get_drvdata(struct device const *dev ) { { return ((void *)dev->driver_data); } } __inline static void dev_set_drvdata(struct device *dev , void *data ) { { dev->driver_data = data; return; } } extern void dev_err(struct device const * , char const * , ...) ; extern void dev_warn(struct device const * , char const * , ...) ; extern void _dev_info(struct device const * , char const * , ...) ; __inline static void dql_queued(struct dql *dql , unsigned int count ) { long tmp ; { tmp = ldv__builtin_expect(count > 268435455U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/dynamic_queue_limits.h"), "i" (74), "i" (12UL)); ldv_31449: ; goto ldv_31449; } else { } dql->last_obj_cnt = count; __asm__ volatile ("": : : "memory"); dql->num_queued = dql->num_queued + count; return; } } __inline static int dql_avail(struct dql const *dql ) { unsigned int __var ; unsigned int __var___0 ; { __var = 0U; __var___0 = 0U; return ((int )((unsigned int )*((unsigned int const volatile *)(& dql->adj_limit)) - (unsigned int )*((unsigned int const volatile *)(& dql->num_queued)))); } } extern void dql_completed(struct dql * , unsigned int ) ; extern void dql_reset(struct dql * ) ; __inline static void kmemcheck_mark_initialized(void *address , unsigned int n ) { { return; } } extern int net_ratelimit(void) ; __inline static __sum16 csum_fold(__wsum sum ) { { __asm__ (" addl %1,%0\n adcl $0xffff,%0": "=r" (sum): "r" (sum << 16), "0" (sum & 4294901760U)); return ((__sum16 )(~ sum >> 16)); } } __inline static __wsum csum_tcpudp_nofold(__be32 saddr , __be32 daddr , unsigned short len , unsigned short proto , __wsum sum ) { { __asm__ (" addl %1, %0\n adcl %2, %0\n adcl %3, %0\n adcl $0, %0\n": "=r" (sum): "g" (daddr), "g" (saddr), "g" (((int )len + (int )proto) << 8), "0" (sum)); return (sum); } } __inline static __sum16 csum_tcpudp_magic(__be32 saddr , __be32 daddr , unsigned short len , unsigned short proto , __wsum sum ) { __wsum tmp ; __sum16 tmp___0 ; { tmp = csum_tcpudp_nofold(saddr, daddr, (int )len, (int )proto, sum); tmp___0 = csum_fold(tmp); return (tmp___0); } } extern __sum16 csum_ipv6_magic(struct in6_addr const * , struct in6_addr const * , __u32 , unsigned short , __wsum ) ; __inline static int valid_dma_direction(int dma_direction ) { { return ((dma_direction == 0 || dma_direction == 1) || dma_direction == 2); } } extern void debug_dma_map_page(struct device * , struct page * , size_t , size_t , int , dma_addr_t , bool ) ; extern void debug_dma_mapping_error(struct device * , dma_addr_t ) ; extern void debug_dma_unmap_page(struct device * , dma_addr_t , size_t , int , bool ) ; extern void debug_dma_sync_single_range_for_cpu(struct device * , dma_addr_t , unsigned long , size_t , int ) ; extern void debug_dma_sync_single_range_for_device(struct device * , dma_addr_t , unsigned long , size_t , int ) ; extern struct dma_map_ops *dma_ops ; __inline static struct dma_map_ops *get_dma_ops(struct device *dev ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )dev == (unsigned long )((struct device *)0), 0L); if (tmp != 0L || (unsigned long )dev->archdata.dma_ops == (unsigned long )((struct dma_map_ops *)0)) { return (dma_ops); } else { return (dev->archdata.dma_ops); } } } __inline static dma_addr_t dma_map_single_attrs(struct device *dev , void *ptr , size_t size , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; dma_addr_t addr ; int tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; { tmp = get_dma_ops(dev); ops = tmp; kmemcheck_mark_initialized(ptr, (unsigned int )size); tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (19), "i" (12UL)); ldv_33759: ; goto ldv_33759; } else { } tmp___2 = __phys_addr((unsigned long )ptr); addr = (*(ops->map_page))(dev, (struct page *)-24189255811072L + (tmp___2 >> 12), (unsigned long )ptr & 4095UL, size, dir, attrs); tmp___3 = __phys_addr((unsigned long )ptr); debug_dma_map_page(dev, (struct page *)-24189255811072L + (tmp___3 >> 12), (unsigned long )ptr & 4095UL, size, (int )dir, addr, 1); return (addr); } } __inline static void dma_unmap_single_attrs(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (36), "i" (12UL)); ldv_33768: ; goto ldv_33768; } else { } if ((unsigned long )ops->unmap_page != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_page))(dev, addr, size, dir, attrs); } else { } debug_dma_unmap_page(dev, addr, size, (int )dir, 1); return; } } __inline static dma_addr_t dma_map_page(struct device *dev , struct page *page , size_t offset , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; dma_addr_t addr ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = lowmem_page_address((struct page const *)page); kmemcheck_mark_initialized(tmp___0 + offset, (unsigned int )size); tmp___1 = valid_dma_direction((int )dir); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (84), "i" (12UL)); ldv_33803: ; goto ldv_33803; } else { } addr = (*(ops->map_page))(dev, page, offset, size, dir, (struct dma_attrs *)0); debug_dma_map_page(dev, page, offset, size, (int )dir, addr, 0); return (addr); } } __inline static void dma_unmap_page(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (96), "i" (12UL)); ldv_33811: ; goto ldv_33811; } else { } if ((unsigned long )ops->unmap_page != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_page))(dev, addr, size, dir, (struct dma_attrs *)0); } else { } debug_dma_unmap_page(dev, addr, size, (int )dir, 0); return; } } __inline static void dma_sync_single_range_for_cpu(struct device *dev , dma_addr_t addr , unsigned long offset , size_t size , enum dma_data_direction dir ) { struct dma_map_ops const *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = (struct dma_map_ops const *)tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (134), "i" (12UL)); ldv_33836: ; goto ldv_33836; } else { } if ((unsigned long )ops->sync_single_for_cpu != (unsigned long )((void (*/* const */)(struct device * , dma_addr_t , size_t , enum dma_data_direction ))0)) { (*(ops->sync_single_for_cpu))(dev, addr + (unsigned long long )offset, size, dir); } else { } debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, (int )dir); return; } } __inline static void dma_sync_single_range_for_device(struct device *dev , dma_addr_t addr , unsigned long offset , size_t size , enum dma_data_direction dir ) { struct dma_map_ops const *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = (struct dma_map_ops const *)tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (148), "i" (12UL)); ldv_33845: ; goto ldv_33845; } else { } if ((unsigned long )ops->sync_single_for_device != (unsigned long )((void (*/* const */)(struct device * , dma_addr_t , size_t , enum dma_data_direction ))0)) { (*(ops->sync_single_for_device))(dev, addr + (unsigned long long )offset, size, dir); } else { } debug_dma_sync_single_range_for_device(dev, addr, offset, size, (int )dir); return; } } __inline static int dma_mapping_error(struct device *dev , dma_addr_t dma_addr ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; { tmp = get_dma_ops(dev); ops = tmp; debug_dma_mapping_error(dev, dma_addr); if ((unsigned long )ops->mapping_error != (unsigned long )((int (*)(struct device * , dma_addr_t ))0)) { tmp___0 = (*(ops->mapping_error))(dev, dma_addr); return (tmp___0); } else { } return (dma_addr == 0ULL); } } extern int dma_supported(struct device * , u64 ) ; extern int dma_set_mask(struct device * , u64 ) ; extern void *dma_alloc_attrs(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; extern void dma_free_attrs(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; __inline static int dma_set_coherent_mask(struct device *dev , u64 mask ) { int tmp ; { tmp = dma_supported(dev, mask); if (tmp == 0) { return (-5); } else { } dev->coherent_dma_mask = mask; return (0); } } __inline static int dma_set_mask_and_coherent(struct device *dev , u64 mask ) { int rc ; int tmp ; { tmp = dma_set_mask(dev, mask); rc = tmp; if (rc == 0) { dma_set_coherent_mask(dev, mask); } else { } return (rc); } } __inline static unsigned int skb_frag_size(skb_frag_t const *frag ) { { return ((unsigned int )frag->size); } } __inline static void skb_frag_size_sub(skb_frag_t *frag , int delta ) { { frag->size = frag->size - (__u32 )delta; return; } } extern void consume_skb(struct sk_buff * ) ; extern int pskb_expand_head(struct sk_buff * , int , int , gfp_t ) ; extern int skb_pad(struct sk_buff * , int ) ; __inline static void skb_set_hash(struct sk_buff *skb , __u32 hash , enum pkt_hash_types type ) { { skb->l4_hash = (unsigned int )type == 3U; skb->sw_hash = 0U; skb->hash = hash; return; } } __inline static unsigned char *skb_end_pointer(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->end); } } __inline static struct sk_buff *skb_get(struct sk_buff *skb ) { { atomic_inc(& skb->users); return (skb); } } __inline static int skb_header_cloned(struct sk_buff const *skb ) { int dataref ; unsigned char *tmp ; { if ((unsigned int )*((unsigned char *)skb + 142UL) == 0U) { return (0); } else { } tmp = skb_end_pointer(skb); dataref = atomic_read((atomic_t const *)(& ((struct skb_shared_info *)tmp)->dataref)); dataref = (dataref & 65535) - (dataref >> 16); return (dataref != 1); } } __inline static bool skb_is_nonlinear(struct sk_buff const *skb ) { { return ((unsigned int )skb->data_len != 0U); } } __inline static unsigned int skb_headlen(struct sk_buff const *skb ) { { return ((unsigned int )skb->len - (unsigned int )skb->data_len); } } extern void skb_add_rx_frag(struct sk_buff * , int , struct page * , int , int , unsigned int ) ; __inline static unsigned char *skb_tail_pointer(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->tail); } } __inline static unsigned char *__skb_put(struct sk_buff *skb , unsigned int len ) { unsigned char *tmp ; unsigned char *tmp___0 ; bool tmp___1 ; long tmp___2 ; { tmp___0 = skb_tail_pointer((struct sk_buff const *)skb); tmp = tmp___0; tmp___1 = skb_is_nonlinear((struct sk_buff const *)skb); tmp___2 = ldv__builtin_expect((long )tmp___1, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/skbuff.h"), "i" (1696), "i" (12UL)); ldv_35107: ; goto ldv_35107; } else { } skb->tail = skb->tail + len; skb->len = skb->len + len; return (tmp); } } extern unsigned char *__pskb_pull_tail(struct sk_buff * , int ) ; __inline static int pskb_may_pull(struct sk_buff *skb , unsigned int len ) { unsigned int tmp ; long tmp___0 ; long tmp___1 ; unsigned int tmp___2 ; unsigned char *tmp___3 ; { tmp = skb_headlen((struct sk_buff const *)skb); tmp___0 = ldv__builtin_expect(tmp >= len, 1L); if (tmp___0 != 0L) { return (1); } else { } tmp___1 = ldv__builtin_expect(skb->len < len, 0L); if (tmp___1 != 0L) { return (0); } else { } tmp___2 = skb_headlen((struct sk_buff const *)skb); tmp___3 = __pskb_pull_tail(skb, (int )(len - tmp___2)); return ((unsigned long )tmp___3 != (unsigned long )((unsigned char *)0U)); } } __inline static unsigned int skb_headroom(struct sk_buff const *skb ) { { return ((unsigned int )((long )skb->data) - (unsigned int )((long )skb->head)); } } __inline static unsigned char *skb_transport_header(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->transport_header); } } __inline static unsigned char *skb_network_header(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->network_header); } } __inline static int skb_transport_offset(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_transport_header(skb); return ((int )((unsigned int )((long )tmp) - (unsigned int )((long )skb->data))); } } __inline static u32 skb_network_header_len(struct sk_buff const *skb ) { { return ((u32 )((int )skb->transport_header - (int )skb->network_header)); } } __inline static int skb_network_offset(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_network_header(skb); return ((int )((unsigned int )((long )tmp) - (unsigned int )((long )skb->data))); } } extern struct sk_buff *__napi_alloc_skb(struct napi_struct * , unsigned int , gfp_t ) ; __inline static struct sk_buff *napi_alloc_skb(struct napi_struct *napi , unsigned int length ) { struct sk_buff *tmp ; { tmp = __napi_alloc_skb(napi, length, 32U); return (tmp); } } __inline static struct page *__dev_alloc_pages(gfp_t gfp_mask , unsigned int order ) { struct page *tmp ; { gfp_mask = gfp_mask | 24832U; tmp = alloc_pages_node(-1, gfp_mask, order); return (tmp); } } __inline static struct page *__dev_alloc_page(gfp_t gfp_mask ) { struct page *tmp ; { tmp = __dev_alloc_pages(gfp_mask, 0U); return (tmp); } } __inline static struct page *dev_alloc_page(void) { struct page *tmp ; { tmp = __dev_alloc_page(32U); return (tmp); } } __inline static struct page *skb_frag_page(skb_frag_t const *frag ) { { return ((struct page *)frag->page.p); } } __inline static void *skb_frag_address(skb_frag_t const *frag ) { struct page *tmp ; void *tmp___0 ; { tmp = skb_frag_page(frag); tmp___0 = lowmem_page_address((struct page const *)tmp); return (tmp___0 + (unsigned long )frag->page_offset); } } __inline static dma_addr_t skb_frag_dma_map(struct device *dev , skb_frag_t const *frag , size_t offset , size_t size , enum dma_data_direction dir ) { struct page *tmp ; dma_addr_t tmp___0 ; { tmp = skb_frag_page(frag); tmp___0 = dma_map_page(dev, tmp, (size_t )frag->page_offset + offset, size, dir); return (tmp___0); } } __inline static int __skb_cow(struct sk_buff *skb , unsigned int headroom , int cloned ) { int delta ; unsigned int tmp ; unsigned int tmp___0 ; int _max1 ; int _max2 ; int _max1___0 ; int _max2___0 ; int tmp___1 ; { delta = 0; tmp___0 = skb_headroom((struct sk_buff const *)skb); if (tmp___0 < headroom) { tmp = skb_headroom((struct sk_buff const *)skb); delta = (int )(headroom - tmp); } else { } if (delta != 0 || cloned != 0) { _max1 = 32; _max2 = 64; _max1___0 = 32; _max2___0 = 64; tmp___1 = pskb_expand_head(skb, (((_max1 > _max2 ? _max1 : _max2) + -1) + delta) & - (_max1___0 > _max2___0 ? _max1___0 : _max2___0), 0, 32U); return (tmp___1); } else { } return (0); } } __inline static int skb_cow_head(struct sk_buff *skb , unsigned int headroom ) { int tmp ; int tmp___0 ; { tmp = skb_header_cloned((struct sk_buff const *)skb); tmp___0 = __skb_cow(skb, headroom, tmp); return (tmp___0); } } __inline static int skb_put_padto(struct sk_buff *skb , unsigned int len ) { unsigned int size ; int tmp ; long tmp___0 ; { size = skb->len; tmp___0 = ldv__builtin_expect(size < len, 0L); if (tmp___0 != 0L) { len = len - size; tmp = skb_pad(skb, (int )len); if (tmp != 0) { return (-12); } else { } __skb_put(skb, len); } else { } return (0); } } __inline static void skb_copy_to_linear_data(struct sk_buff *skb , void const *from , unsigned int const len ) { { memcpy((void *)skb->data, from, (size_t )len); return; } } extern void skb_clone_tx_timestamp(struct sk_buff * ) ; extern void skb_tstamp_tx(struct sk_buff * , struct skb_shared_hwtstamps * ) ; __inline static void sw_tx_timestamp(struct sk_buff *skb ) { unsigned char *tmp ; unsigned char *tmp___0 ; { tmp = skb_end_pointer((struct sk_buff const *)skb); if (((int )((struct skb_shared_info *)tmp)->tx_flags & 2) != 0) { tmp___0 = skb_end_pointer((struct sk_buff const *)skb); if (((int )((struct skb_shared_info *)tmp___0)->tx_flags & 4) == 0) { skb_tstamp_tx(skb, (struct skb_shared_hwtstamps *)0); } else { } } else { } return; } } __inline static void skb_tx_timestamp(struct sk_buff *skb ) { { skb_clone_tx_timestamp(skb); sw_tx_timestamp(skb); return; } } __inline static void skb_record_rx_queue(struct sk_buff *skb , u16 rx_queue ) { { skb->queue_mapping = (unsigned int )rx_queue + 1U; return; } } __inline static bool skb_is_gso(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_end_pointer(skb); return ((unsigned int )((struct skb_shared_info *)tmp)->gso_size != 0U); } } __inline static bool skb_is_gso_v6(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_end_pointer(skb); return (((int )((struct skb_shared_info *)tmp)->gso_type & 16) != 0); } } __inline static void skb_checksum_none_assert(struct sk_buff const *skb ) { { return; } } __inline static void u64_stats_init(struct u64_stats_sync *syncp ) { { return; } } __inline static unsigned int u64_stats_fetch_begin_irq(struct u64_stats_sync const *syncp ) { { return (0U); } } __inline static bool u64_stats_fetch_retry_irq(struct u64_stats_sync const *syncp , unsigned int start ) { { return (0); } } extern int request_threaded_irq(unsigned int , irqreturn_t (*)(int , void * ) , irqreturn_t (*)(int , void * ) , unsigned long , char const * , void * ) ; __inline static int request_irq(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { int tmp ; { tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int , void * ))0, flags, name, dev); return (tmp); } } __inline static int ldv_request_irq_19(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_20(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_23(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_24(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; extern void free_irq(unsigned int , void * ) ; void ldv_free_irq_21(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; void ldv_free_irq_22(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; void ldv_free_irq_25(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; void ldv_free_irq_26(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; void ldv_free_irq_27(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; __inline static struct mii_ioctl_data *if_mii(struct ifreq *rq ) { { return ((struct mii_ioctl_data *)(& rq->ifr_ifru)); } } extern void __napi_schedule(struct napi_struct * ) ; __inline static bool napi_disable_pending(struct napi_struct *n ) { int tmp ; { tmp = constant_test_bit(1L, (unsigned long const volatile *)(& n->state)); return (tmp != 0); } } __inline static bool napi_schedule_prep(struct napi_struct *n ) { bool tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = napi_disable_pending(n); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { tmp___1 = test_and_set_bit(0L, (unsigned long volatile *)(& n->state)); if (tmp___1 == 0) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } return ((bool )tmp___2); } } __inline static void napi_schedule(struct napi_struct *n ) { bool tmp ; { tmp = napi_schedule_prep(n); if ((int )tmp) { __napi_schedule(n); } else { } return; } } __inline static void napi_complete(struct napi_struct *n ) { { return; } } extern void napi_disable(struct napi_struct * ) ; __inline static void napi_enable(struct napi_struct *n ) { int tmp ; long tmp___0 ; { tmp = constant_test_bit(0L, (unsigned long const volatile *)(& n->state)); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/netdevice.h"), "i" (507), "i" (12UL)); ldv_42805: ; goto ldv_42805; } else { } __asm__ volatile ("": : : "memory"); clear_bit(0L, (unsigned long volatile *)(& n->state)); return; } } __inline static void napi_synchronize(struct napi_struct const *n ) { int tmp ; { goto ldv_42810; ldv_42809: msleep(1U); ldv_42810: tmp = constant_test_bit(0L, (unsigned long const volatile *)(& n->state)); if (tmp != 0) { goto ldv_42809; } else { } return; } } __inline static struct netdev_queue *netdev_get_tx_queue(struct net_device const *dev , unsigned int index ) { { return ((struct netdev_queue *)dev->_tx + (unsigned long )index); } } __inline static void *netdev_priv(struct net_device const *dev ) { { return ((void *)dev + 3008U); } } extern void netif_napi_add(struct net_device * , struct napi_struct * , int (*)(struct napi_struct * , int ) , int ) ; extern void netif_napi_del(struct napi_struct * ) ; extern void free_netdev(struct net_device * ) ; void ldv_free_netdev_31(struct net_device *dev ) ; void ldv_free_netdev_37(struct net_device *dev ) ; extern void netif_schedule_queue(struct netdev_queue * ) ; __inline static void netif_tx_start_queue(struct netdev_queue *dev_queue ) { { clear_bit(0L, (unsigned long volatile *)(& dev_queue->state)); return; } } __inline static void netif_tx_start_all_queues(struct net_device *dev ) { unsigned int i ; struct netdev_queue *txq ; struct netdev_queue *tmp ; { i = 0U; goto ldv_43879; ldv_43878: tmp = netdev_get_tx_queue((struct net_device const *)dev, i); txq = tmp; netif_tx_start_queue(txq); i = i + 1U; ldv_43879: ; if (dev->num_tx_queues > i) { goto ldv_43878; } else { } return; } } __inline static void netif_tx_stop_queue(struct netdev_queue *dev_queue ) { { set_bit(0L, (unsigned long volatile *)(& dev_queue->state)); return; } } extern void netif_tx_stop_all_queues(struct net_device * ) ; __inline static bool netif_tx_queue_stopped(struct netdev_queue const *dev_queue ) { int tmp ; { tmp = constant_test_bit(0L, (unsigned long const volatile *)(& dev_queue->state)); return (tmp != 0); } } __inline static bool netif_xmit_stopped(struct netdev_queue const *dev_queue ) { { return (((unsigned long )dev_queue->state & 3UL) != 0UL); } } __inline static void netdev_tx_sent_queue(struct netdev_queue *dev_queue , unsigned int bytes ) { int tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; { dql_queued(& dev_queue->dql, bytes); tmp = dql_avail((struct dql const *)(& dev_queue->dql)); tmp___0 = ldv__builtin_expect(tmp >= 0, 1L); if (tmp___0 != 0L) { return; } else { } set_bit(1L, (unsigned long volatile *)(& dev_queue->state)); __asm__ volatile ("mfence": : : "memory"); tmp___1 = dql_avail((struct dql const *)(& dev_queue->dql)); tmp___2 = ldv__builtin_expect(tmp___1 >= 0, 0L); if (tmp___2 != 0L) { clear_bit(1L, (unsigned long volatile *)(& dev_queue->state)); } else { } return; } } __inline static void netdev_tx_completed_queue(struct netdev_queue *dev_queue , unsigned int pkts , unsigned int bytes ) { long tmp ; int tmp___0 ; int tmp___1 ; { tmp = ldv__builtin_expect(bytes == 0U, 0L); if (tmp != 0L) { return; } else { } dql_completed(& dev_queue->dql, bytes); __asm__ volatile ("mfence": : : "memory"); tmp___0 = dql_avail((struct dql const *)(& dev_queue->dql)); if (tmp___0 < 0) { return; } else { } tmp___1 = test_and_clear_bit(1L, (unsigned long volatile *)(& dev_queue->state)); if (tmp___1 != 0) { netif_schedule_queue(dev_queue); } else { } return; } } __inline static void netdev_tx_reset_queue(struct netdev_queue *q ) { { clear_bit(1L, (unsigned long volatile *)(& q->state)); dql_reset(& q->dql); return; } } __inline static bool netif_running(struct net_device const *dev ) { int tmp ; { tmp = constant_test_bit(0L, (unsigned long const volatile *)(& dev->state)); return (tmp != 0); } } __inline static void netif_stop_subqueue(struct net_device *dev , u16 queue_index ) { struct netdev_queue *txq ; struct netdev_queue *tmp ; { tmp = netdev_get_tx_queue((struct net_device const *)dev, (unsigned int )queue_index); txq = tmp; netif_tx_stop_queue(txq); return; } } __inline static bool __netif_subqueue_stopped(struct net_device const *dev , u16 queue_index ) { struct netdev_queue *txq ; struct netdev_queue *tmp ; bool tmp___0 ; { tmp = netdev_get_tx_queue(dev, (unsigned int )queue_index); txq = tmp; tmp___0 = netif_tx_queue_stopped((struct netdev_queue const *)txq); return (tmp___0); } } extern void netif_wake_subqueue(struct net_device * , u16 ) ; extern int netif_set_real_num_tx_queues(struct net_device * , unsigned int ) ; extern int netif_set_real_num_rx_queues(struct net_device * , unsigned int ) ; extern void __dev_kfree_skb_any(struct sk_buff * , enum skb_free_reason ) ; __inline static void dev_kfree_skb_any(struct sk_buff *skb ) { { __dev_kfree_skb_any(skb, 1); return; } } __inline static void dev_consume_skb_any(struct sk_buff *skb ) { { __dev_kfree_skb_any(skb, 0); return; } } extern gro_result_t napi_gro_receive(struct napi_struct * , struct sk_buff * ) ; __inline static bool netif_carrier_ok(struct net_device const *dev ) { int tmp ; { tmp = constant_test_bit(2L, (unsigned long const volatile *)(& dev->state)); return (tmp == 0); } } extern void netif_carrier_on(struct net_device * ) ; extern void netif_carrier_off(struct net_device * ) ; extern void netif_device_detach(struct net_device * ) ; extern void netif_device_attach(struct net_device * ) ; __inline static u32 netif_msg_init(int debug_value , int default_msg_enable_bits ) { { if (debug_value < 0 || (unsigned int )debug_value > 31U) { return ((u32 )default_msg_enable_bits); } else { } if (debug_value == 0) { return (0U); } else { } return ((u32 )((1 << debug_value) + -1)); } } extern int register_netdev(struct net_device * ) ; int ldv_register_netdev_30(struct net_device *dev ) ; extern void unregister_netdev(struct net_device * ) ; void ldv_unregister_netdev_36(struct net_device *dev ) ; extern void netdev_rss_key_fill(void * , size_t ) ; extern netdev_features_t passthru_features_check(struct sk_buff * , struct net_device * , netdev_features_t ) ; extern void netdev_err(struct net_device const * , char const * , ...) ; extern void netdev_warn(struct net_device const * , char const * , ...) ; extern void netdev_info(struct net_device const * , char const * , ...) ; extern void rtnl_lock(void) ; extern void rtnl_unlock(void) ; __inline static struct tcphdr *tcp_hdr(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_transport_header(skb); return ((struct tcphdr *)tmp); } } __inline static unsigned int tcp_hdrlen(struct sk_buff const *skb ) { struct tcphdr *tmp ; { tmp = tcp_hdr(skb); return ((unsigned int )((int )tmp->doff * 4)); } } __inline static struct ipv6hdr *ipv6_hdr(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_network_header(skb); return ((struct ipv6hdr *)tmp); } } __inline static struct iphdr *ip_hdr(struct sk_buff const *skb ) { unsigned char *tmp ; { tmp = skb_network_header(skb); return ((struct iphdr *)tmp); } } extern u32 eth_get_headlen(void * , unsigned int ) ; extern __be16 eth_type_trans(struct sk_buff * , struct net_device * ) ; extern int eth_validate_addr(struct net_device * ) ; extern struct net_device *alloc_etherdev_mqs(int , unsigned int , unsigned int ) ; __inline static bool is_zero_ether_addr(u8 const *addr ) { { return (((unsigned int )*((u32 const *)addr) | (unsigned int )*((u16 const *)addr + 4U)) == 0U); } } __inline static bool is_multicast_ether_addr(u8 const *addr ) { u32 a ; { a = *((u32 const *)addr); return ((a & 1U) != 0U); } } __inline static bool is_valid_ether_addr(u8 const *addr ) { bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = is_multicast_ether_addr(addr); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { tmp___1 = is_zero_ether_addr(addr); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { tmp___3 = 1; } else { tmp___3 = 0; } } else { tmp___3 = 0; } return ((bool )tmp___3); } } __inline static void eth_zero_addr(u8 *addr ) { { memset((void *)addr, 0, 6UL); return; } } __inline static int eth_skb_pad(struct sk_buff *skb ) { int tmp ; { tmp = skb_put_padto(skb, 60U); return (tmp); } } __inline static void __vlan_hwaccel_put_tag(struct sk_buff *skb , __be16 vlan_proto , u16 vlan_tci ) { { skb->vlan_proto = vlan_proto; skb->vlan_tci = (__u16 )((unsigned int )vlan_tci | 4096U); return; } } __inline static __be16 __vlan_get_protocol(struct sk_buff *skb , __be16 type , int *depth ) { unsigned int vlan_depth ; int __ret_warn_on ; long tmp ; long tmp___0 ; struct vlan_hdr *vh ; int tmp___1 ; long tmp___2 ; { vlan_depth = (unsigned int )skb->mac_len; if ((unsigned int )type == 129U || (unsigned int )type == 43144U) { if (vlan_depth != 0U) { __ret_warn_on = vlan_depth <= 3U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/if_vlan.h", 492); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (0U); } else { } vlan_depth = vlan_depth - 4U; } else { vlan_depth = 14U; } ldv_52065: tmp___1 = pskb_may_pull(skb, vlan_depth + 4U); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { return (0U); } else { } vh = (struct vlan_hdr *)skb->data + (unsigned long )vlan_depth; type = vh->h_vlan_encapsulated_proto; vlan_depth = vlan_depth + 4U; if ((unsigned int )type == 129U || (unsigned int )type == 43144U) { goto ldv_52065; } else { } } else { } if ((unsigned long )depth != (unsigned long )((int *)0)) { *depth = (int )vlan_depth; } else { } return (type); } } __inline static __be16 vlan_get_protocol(struct sk_buff *skb ) { __be16 tmp ; { tmp = __vlan_get_protocol(skb, (int )skb->protocol, (int *)0); return (tmp); } } __inline static int pci_channel_offline(struct pci_dev *pdev ) { { return (pdev->error_state != 1U); } } extern int pci_bus_read_config_word(struct pci_bus * , unsigned int , int , u16 * ) ; extern int pci_bus_write_config_word(struct pci_bus * , unsigned int , int , u16 ) ; __inline static int pci_read_config_word(struct pci_dev const *dev , int where , u16 *val ) { int tmp ; { tmp = pci_bus_read_config_word(dev->bus, dev->devfn, where, val); return (tmp); } } __inline static int pci_write_config_word(struct pci_dev const *dev , int where , u16 val ) { int tmp ; { tmp = pci_bus_write_config_word(dev->bus, dev->devfn, where, (int )val); return (tmp); } } extern int pcie_capability_read_word(struct pci_dev * , int , u16 * ) ; extern int pcie_capability_write_word(struct pci_dev * , int , u16 ) ; extern int pci_enable_device_mem(struct pci_dev * ) ; extern void pci_disable_device(struct pci_dev * ) ; extern void pci_set_master(struct pci_dev * ) ; extern int pci_select_bars(struct pci_dev * , unsigned long ) ; extern bool pci_device_is_present(struct pci_dev * ) ; extern int pci_save_state(struct pci_dev * ) ; extern void pci_restore_state(struct pci_dev * ) ; extern int pci_set_power_state(struct pci_dev * , pci_power_t ) ; extern int __pci_enable_wake(struct pci_dev * , pci_power_t , bool , bool ) ; extern int pci_wake_from_d3(struct pci_dev * , bool ) ; extern int pci_prepare_to_sleep(struct pci_dev * ) ; __inline static int pci_enable_wake(struct pci_dev *dev , pci_power_t state , bool enable ) { int tmp ; { tmp = __pci_enable_wake(dev, state, 0, (int )enable); return (tmp); } } extern int pci_request_selected_regions(struct pci_dev * , int , char const * ) ; extern void pci_release_selected_regions(struct pci_dev * , int ) ; extern int __pci_register_driver(struct pci_driver * , struct module * , char const * ) ; int ldv___pci_register_driver_17(struct pci_driver *ldv_func_arg1 , struct module *ldv_func_arg2 , char const *ldv_func_arg3 ) ; extern void pci_unregister_driver(struct pci_driver * ) ; void ldv_pci_unregister_driver_18(struct pci_driver *ldv_func_arg1 ) ; extern void pci_disable_msi(struct pci_dev * ) ; extern void pci_disable_msix(struct pci_dev * ) ; extern int pci_enable_msi_range(struct pci_dev * , int , int ) ; __inline static int pci_enable_msi_exact(struct pci_dev *dev , int nvec ) { int rc ; int tmp ; { tmp = pci_enable_msi_range(dev, nvec, nvec); rc = tmp; if (rc < 0) { return (rc); } else { } return (0); } } extern int pci_enable_msix_range(struct pci_dev * , struct msix_entry * , int , int ) ; __inline static void *pci_get_drvdata(struct pci_dev *pdev ) { void *tmp ; { tmp = dev_get_drvdata((struct device const *)(& pdev->dev)); return (tmp); } } __inline static void pci_set_drvdata(struct pci_dev *pdev , void *data ) { { dev_set_drvdata(& pdev->dev, data); return; } } __inline static char const *pci_name(struct pci_dev const *pdev ) { char const *tmp ; { tmp = dev_name(& pdev->dev); return (tmp); } } extern int pci_enable_sriov(struct pci_dev * , int ) ; extern void pci_disable_sriov(struct pci_dev * ) ; extern int pci_num_vf(struct pci_dev * ) ; extern int pci_vfs_assigned(struct pci_dev * ) ; extern int pci_sriov_set_totalvfs(struct pci_dev * , u16 ) ; extern int pci_enable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_disable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev * ) ; extern int __pm_runtime_idle(struct device * , int ) ; extern int __pm_runtime_resume(struct device * , int ) ; extern int pm_schedule_suspend(struct device * , unsigned int ) ; __inline static void pm_runtime_get_noresume(struct device *dev ) { { atomic_inc(& dev->power.usage_count); return; } } __inline static void pm_runtime_put_noidle(struct device *dev ) { { atomic_add_unless(& dev->power.usage_count, -1, 0); return; } } __inline static int pm_runtime_resume(struct device *dev ) { int tmp ; { tmp = __pm_runtime_resume(dev, 0); return (tmp); } } __inline static int pm_runtime_get_sync(struct device *dev ) { int tmp ; { tmp = __pm_runtime_resume(dev, 4); return (tmp); } } __inline static int pm_runtime_put(struct device *dev ) { int tmp ; { tmp = __pm_runtime_idle(dev, 5); return (tmp); } } __inline static int pm_runtime_put_sync(struct device *dev ) { int tmp ; { tmp = __pm_runtime_idle(dev, 4); return (tmp); } } extern void dca_register_notify(struct notifier_block * ) ; extern void dca_unregister_notify(struct notifier_block * ) ; extern int dca_add_requester(struct device * ) ; extern int dca_remove_requester(struct device * ) ; extern u8 dca3_get_tag(struct device * , int ) ; extern s32 i2c_smbus_read_byte_data(struct i2c_client const * , u8 ) ; extern s32 i2c_smbus_write_byte_data(struct i2c_client const * , u8 , u8 ) ; extern void i2c_del_adapter(struct i2c_adapter * ) ; u32 igb_rd32(struct e1000_hw *hw , u32 reg ) ; s32 igb_check_downshift(struct e1000_hw *hw ) ; s32 igb_check_reset_block(struct e1000_hw *hw ) ; void igb_power_up_phy_copper(struct e1000_hw *hw ) ; s32 igb_read_part_string(struct e1000_hw *hw , u8 *part_num , u32 part_num_size ) ; void igb_get_fw_version(struct e1000_hw *hw , struct e1000_fw_version *fw_vers ) ; s32 igb_read_mbx(struct e1000_hw *hw , u32 *msg , u16 size , u16 mbx_id ) ; s32 igb_write_mbx(struct e1000_hw *hw , u32 *msg , u16 size , u16 mbx_id ) ; s32 igb_check_for_msg(struct e1000_hw *hw , u16 mbx_id ) ; s32 igb_check_for_ack(struct e1000_hw *hw , u16 mbx_id ) ; s32 igb_check_for_rst(struct e1000_hw *hw , u16 mbx_id ) ; struct e1000_info const e1000_82575_info ; struct net_device *igb_get_hw_dev(struct e1000_hw *hw ) ; s32 igb_read_pcie_cap_reg(struct e1000_hw *hw , u32 reg , u16 *value ) ; s32 igb_write_pcie_cap_reg(struct e1000_hw *hw , u32 reg , u16 *value ) ; void igb_read_pci_cfg(struct e1000_hw *hw , u32 reg , u16 *value ) ; void igb_write_pci_cfg(struct e1000_hw *hw , u32 reg , u16 *value ) ; bool igb_get_flash_presence_i210(struct e1000_hw *hw ) ; s32 igb_disable_pcie_master(struct e1000_hw *hw ) ; s32 igb_force_mac_fc(struct e1000_hw *hw ) ; s32 igb_get_bus_info_pcie(struct e1000_hw *hw ) ; void igb_update_mc_addr_list(struct e1000_hw *hw , u8 *mc_addr_list , u32 mc_addr_count ) ; s32 igb_setup_link(struct e1000_hw *hw ) ; s32 igb_validate_mdi_setting(struct e1000_hw *hw ) ; s32 igb_vfta_set(struct e1000_hw *hw , u32 vid , bool add ) ; void igb_config_collision_dist(struct e1000_hw *hw ) ; void igb_mta_set(struct e1000_hw *hw , u32 hash_value ) ; bool igb_enable_mng_pass_thru(struct e1000_hw *hw ) ; void igb_shutdown_serdes_link_82575(struct e1000_hw *hw ) ; void igb_power_up_serdes_link_82575(struct e1000_hw *hw ) ; void igb_power_down_phy_copper_82575(struct e1000_hw *hw ) ; void igb_rx_fifo_flush_82575(struct e1000_hw *hw ) ; s32 igb_read_i2c_byte(struct e1000_hw *hw , u8 byte_offset , u8 dev_addr , u8 *data ) ; s32 igb_write_i2c_byte(struct e1000_hw *hw , u8 byte_offset , u8 dev_addr , u8 data ) ; void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw , bool enable , int pf ) ; void igb_vmdq_set_loopback_pf(struct e1000_hw *hw , bool enable ) ; void igb_vmdq_set_replication_pf(struct e1000_hw *hw , bool enable ) ; u16 igb_rxpbs_adjust_82580(u32 data ) ; s32 igb_set_eee_i350(struct e1000_hw *hw , bool adv1G , bool adv100M ) ; s32 igb_set_eee_i354(struct e1000_hw *hw , bool adv1G , bool adv100M ) ; extern void ptp_clock_event(struct ptp_clock * , struct ptp_clock_event * ) ; extern int i2c_bit_add_bus(struct i2c_adapter * ) ; __inline static __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc , u32 const stat_err_bits ) { { return (rx_desc->wb.upper.status_error & (__le32 )stat_err_bits); } } __inline static int igb_desc_unused(struct igb_ring *ring ) { { if ((int )ring->next_to_clean > (int )ring->next_to_use) { return (((int )ring->next_to_clean - (int )ring->next_to_use) + -1); } else { } return ((((int )ring->count + (int )ring->next_to_clean) - (int )ring->next_to_use) + -1); } } char igb_driver_name[4U] ; char igb_driver_version[9U] ; int igb_up(struct igb_adapter *adapter ) ; void igb_down(struct igb_adapter *adapter ) ; void igb_reinit_locked(struct igb_adapter *adapter ) ; void igb_reset(struct igb_adapter *adapter ) ; int igb_reinit_queues(struct igb_adapter *adapter ) ; void igb_write_rss_indir_tbl(struct igb_adapter *adapter ) ; int igb_set_spd_dplx(struct igb_adapter *adapter , u32 spd , u8 dplx ) ; int igb_setup_tx_resources(struct igb_ring *tx_ring ) ; int igb_setup_rx_resources(struct igb_ring *rx_ring ) ; void igb_free_tx_resources(struct igb_ring *tx_ring ) ; void igb_free_rx_resources(struct igb_ring *rx_ring ) ; void igb_configure_tx_ring(struct igb_adapter *adapter , struct igb_ring *ring ) ; void igb_configure_rx_ring(struct igb_adapter *adapter , struct igb_ring *ring ) ; void igb_setup_tctl(struct igb_adapter *adapter ) ; void igb_setup_rctl(struct igb_adapter *adapter ) ; netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb , struct igb_ring *tx_ring ) ; void igb_unmap_and_free_tx_resource(struct igb_ring *ring , struct igb_tx_buffer *tx_buffer ) ; void igb_alloc_rx_buffers(struct igb_ring *rx_ring , u16 cleaned_count ) ; void igb_update_stats(struct igb_adapter *adapter , struct rtnl_link_stats64 *net_stats ) ; bool igb_has_link(struct igb_adapter *adapter ) ; void igb_set_ethtool_ops(struct net_device *netdev ) ; void igb_power_up_link(struct igb_adapter *adapter ) ; void igb_set_fw_version(struct igb_adapter *adapter ) ; void igb_ptp_init(struct igb_adapter *adapter ) ; void igb_ptp_stop(struct igb_adapter *adapter ) ; void igb_ptp_reset(struct igb_adapter *adapter ) ; void igb_ptp_rx_hang(struct igb_adapter *adapter ) ; void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector , struct sk_buff *skb ) ; void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector , unsigned char *va , struct sk_buff *skb ) ; int igb_ptp_set_ts_config(struct net_device *netdev , struct ifreq *ifr ) ; int igb_ptp_get_ts_config(struct net_device *netdev , struct ifreq *ifr ) ; void igb_sysfs_exit(struct igb_adapter *adapter ) ; int igb_sysfs_init(struct igb_adapter *adapter ) ; __inline static s32 igb_reset_phy(struct e1000_hw *hw ) { s32 tmp ; { if ((unsigned long )hw->phy.ops.reset != (unsigned long )((s32 (*)(struct e1000_hw * ))0)) { tmp = (*(hw->phy.ops.reset))(hw); return (tmp); } else { } return (0); } } __inline static s32 igb_read_phy_reg(struct e1000_hw *hw , u32 offset , u16 *data ) { s32 tmp ; { if ((unsigned long )hw->phy.ops.read_reg != (unsigned long )((s32 (*)(struct e1000_hw * , u32 , u16 * ))0)) { tmp = (*(hw->phy.ops.read_reg))(hw, offset, data); return (tmp); } else { } return (0); } } __inline static s32 igb_get_phy_info(struct e1000_hw *hw ) { s32 tmp ; { if ((unsigned long )hw->phy.ops.get_phy_info != (unsigned long )((s32 (*)(struct e1000_hw * ))0)) { tmp = (*(hw->phy.ops.get_phy_info))(hw); return (tmp); } else { } return (0); } } __inline static struct netdev_queue *txring_txq(struct igb_ring const *tx_ring ) { struct netdev_queue *tmp ; { tmp = netdev_get_tx_queue((struct net_device const *)tx_ring->netdev, (unsigned int )tx_ring->queue_index); return (tmp); } } char igb_driver_name[4U] = { 'i', 'g', 'b', '\000'}; char igb_driver_version[9U] = { '5', '.', '2', '.', '1', '8', '-', 'k', '\000'}; static char const igb_driver_string[41U] = { 'I', 'n', 't', 'e', 'l', '(', 'R', ')', ' ', 'G', 'i', 'g', 'a', 'b', 'i', 't', ' ', 'E', 't', 'h', 'e', 'r', 'n', 'e', 't', ' ', 'N', 'e', 't', 'w', 'o', 'r', 'k', ' ', 'D', 'r', 'i', 'v', 'e', 'r', '\000'}; static char const igb_copyright[43U] = { 'C', 'o', 'p', 'y', 'r', 'i', 'g', 'h', 't', ' ', '(', 'c', ')', ' ', '2', '0', '0', '7', '-', '2', '0', '1', '4', ' ', 'I', 'n', 't', 'e', 'l', ' ', 'C', 'o', 'r', 'p', 'o', 'r', 'a', 't', 'i', 'o', 'n', '.', '\000'}; static struct e1000_info const *igb_info_tbl[1U] = { & e1000_82575_info}; static struct pci_device_id const igb_pci_tbl[36U] = { {32902U, 8000U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 8001U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 8005U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5433U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5427U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5430U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5431U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5432U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5499U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5500U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5409U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5410U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5411U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5412U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5390U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5391U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5415U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5392U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5393U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5398U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 1080U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 1082U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 1084U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 1088U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4297U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5386U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5400U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4326U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4327U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5389U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 5414U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4328U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4263U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4265U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {32902U, 4310U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {0U, 0U, 0U, 0U, 0U, 0U, 0UL}}; struct pci_device_id const __mod_pci__igb_pci_tbl_device_table[36U] ; static int igb_setup_all_tx_resources(struct igb_adapter *adapter ) ; static int igb_setup_all_rx_resources(struct igb_adapter *adapter ) ; static void igb_free_all_tx_resources(struct igb_adapter *adapter ) ; static void igb_free_all_rx_resources(struct igb_adapter *adapter ) ; static void igb_setup_mrqc(struct igb_adapter *adapter ) ; static int igb_probe(struct pci_dev *pdev , struct pci_device_id const *ent ) ; static void igb_remove(struct pci_dev *pdev ) ; static int igb_sw_init(struct igb_adapter *adapter ) ; static int igb_open(struct net_device *netdev ) ; static int igb_close(struct net_device *netdev ) ; static void igb_configure(struct igb_adapter *adapter ) ; static void igb_configure_tx(struct igb_adapter *adapter ) ; static void igb_configure_rx(struct igb_adapter *adapter ) ; static void igb_clean_all_tx_rings(struct igb_adapter *adapter ) ; static void igb_clean_all_rx_rings(struct igb_adapter *adapter ) ; static void igb_clean_tx_ring(struct igb_ring *tx_ring ) ; static void igb_clean_rx_ring(struct igb_ring *rx_ring ) ; static void igb_set_rx_mode(struct net_device *netdev ) ; static void igb_update_phy_info(unsigned long data ) ; static void igb_watchdog(unsigned long data ) ; static void igb_watchdog_task(struct work_struct *work ) ; static netdev_tx_t igb_xmit_frame(struct sk_buff *skb , struct net_device *netdev ) ; static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev , struct rtnl_link_stats64 *stats ) ; static int igb_change_mtu(struct net_device *netdev , int new_mtu ) ; static int igb_set_mac(struct net_device *netdev , void *p ) ; static void igb_set_uta(struct igb_adapter *adapter ) ; static irqreturn_t igb_intr(int irq , void *data ) ; static irqreturn_t igb_intr_msi(int irq , void *data ) ; static irqreturn_t igb_msix_other(int irq , void *data ) ; static irqreturn_t igb_msix_ring(int irq , void *data ) ; static void igb_update_dca(struct igb_q_vector *q_vector ) ; static void igb_setup_dca(struct igb_adapter *adapter ) ; static int igb_poll(struct napi_struct *napi , int budget ) ; static bool igb_clean_tx_irq(struct igb_q_vector *q_vector ) ; static bool igb_clean_rx_irq(struct igb_q_vector *q_vector , int const budget ) ; static int igb_ioctl(struct net_device *netdev , struct ifreq *ifr , int cmd ) ; static void igb_tx_timeout(struct net_device *netdev ) ; static void igb_reset_task(struct work_struct *work ) ; static void igb_vlan_mode(struct net_device *netdev , netdev_features_t features ) ; static int igb_vlan_rx_add_vid(struct net_device *netdev , __be16 proto , u16 vid ) ; static int igb_vlan_rx_kill_vid(struct net_device *netdev , __be16 proto , u16 vid ) ; static void igb_restore_vlan(struct igb_adapter *adapter ) ; static void igb_rar_set_qsel(struct igb_adapter *adapter , u8 *addr , u32 index , u8 qsel ) ; static void igb_ping_all_vfs(struct igb_adapter *adapter ) ; static void igb_msg_task(struct igb_adapter *adapter ) ; static void igb_vmm_control(struct igb_adapter *adapter ) ; static int igb_set_vf_mac(struct igb_adapter *adapter , int vf , unsigned char *mac_addr ) ; static void igb_restore_vf_multicasts(struct igb_adapter *adapter ) ; static int igb_ndo_set_vf_mac(struct net_device *netdev , int vf , u8 *mac ) ; static int igb_ndo_set_vf_vlan(struct net_device *netdev , int vf , u16 vlan , u8 qos ) ; static int igb_ndo_set_vf_bw(struct net_device *netdev , int vf , int min_tx_rate , int max_tx_rate ) ; static int igb_ndo_set_vf_spoofchk(struct net_device *netdev , int vf , bool setting ) ; static int igb_ndo_get_vf_config(struct net_device *netdev , int vf , struct ifla_vf_info *ivi ) ; static void igb_check_vf_rate_limit(struct igb_adapter *adapter ) ; static int igb_vf_configure(struct igb_adapter *adapter , int vf ) ; static int igb_pci_enable_sriov(struct pci_dev *dev , int num_vfs ) ; static int igb_suspend(struct device *dev ) ; static int igb_resume(struct device *dev ) ; static int igb_runtime_suspend(struct device *dev ) ; static int igb_runtime_resume(struct device *dev ) ; static int igb_runtime_idle(struct device *dev ) ; static struct dev_pm_ops const igb_pm_ops = {0, 0, & igb_suspend, & igb_resume, & igb_suspend, & igb_resume, & igb_suspend, & igb_resume, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & igb_runtime_suspend, & igb_runtime_resume, & igb_runtime_idle}; static void igb_shutdown(struct pci_dev *pdev ) ; static int igb_pci_sriov_configure(struct pci_dev *dev , int num_vfs ) ; static int igb_notify_dca(struct notifier_block *nb , unsigned long event , void *p ) ; static struct notifier_block dca_notifier = {& igb_notify_dca, (struct notifier_block *)0, 0}; static void igb_netpoll(struct net_device *netdev ) ; static unsigned int max_vfs ; static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev , pci_channel_state_t state ) ; static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev ) ; static void igb_io_resume(struct pci_dev *pdev ) ; static struct pci_error_handlers const igb_err_handler = {(pci_ers_result_t (*)(struct pci_dev * , enum pci_channel_state ))(& igb_io_error_detected), 0, 0, & igb_io_slot_reset, 0, & igb_io_resume}; static void igb_init_dmac(struct igb_adapter *adapter , u32 pba ) ; static struct pci_driver igb_driver = {{0, 0}, (char const *)(& igb_driver_name), (struct pci_device_id const *)(& igb_pci_tbl), & igb_probe, & igb_remove, 0, 0, 0, 0, & igb_shutdown, & igb_pci_sriov_configure, & igb_err_handler, {0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & igb_pm_ops, 0}, {{{{{{0}}, 0U, 0U, 0, {0, {0, 0}, 0, 0, 0UL}}}}, {0, 0}}}; static int debug = -1; static struct igb_reg_info const igb_reg_info_tbl[23U] = { {0U, (char *)"CTRL"}, {8U, (char *)"STATUS"}, {24U, (char *)"CTRL_EXT"}, {192U, (char *)"ICR"}, {256U, (char *)"RCTL"}, {10248U, (char *)"RDLEN"}, {10256U, (char *)"RDH"}, {10264U, (char *)"RDT"}, {10280U, (char *)"RXDCTL"}, {10240U, (char *)"RDBAL"}, {10244U, (char *)"RDBAH"}, {1024U, (char *)"TCTL"}, {14336U, (char *)"TDBAL"}, {14340U, (char *)"TDBAH"}, {14344U, (char *)"TDLEN"}, {14352U, (char *)"TDH"}, {14360U, (char *)"TDT"}, {14376U, (char *)"TXDCTL"}, {13328U, (char *)"TDFH"}, {13336U, (char *)"TDFT"}, {13344U, (char *)"TDFHS"}, {13360U, (char *)"TDFPC"}}; static void igb_regdump(struct e1000_hw *hw , struct igb_reg_info *reginfo ) { int n ; char rname[16U] ; u32 regs[8U] ; u32 tmp ; { n = 0; switch (reginfo->ofs) { case 10248U: n = 0; goto ldv_56637; ldv_56636: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 10248 : n * 64 + 49160)); n = n + 1; ldv_56637: ; if (n <= 3) { goto ldv_56636; } else { } goto ldv_56639; case 10256U: n = 0; goto ldv_56642; ldv_56641: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 10256 : n * 64 + 49168)); n = n + 1; ldv_56642: ; if (n <= 3) { goto ldv_56641; } else { } goto ldv_56639; case 10264U: n = 0; goto ldv_56646; ldv_56645: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 10264 : n * 64 + 49176)); n = n + 1; ldv_56646: ; if (n <= 3) { goto ldv_56645; } else { } goto ldv_56639; case 10280U: n = 0; goto ldv_56650; ldv_56649: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 10280 : n * 64 + 49192)); n = n + 1; ldv_56650: ; if (n <= 3) { goto ldv_56649; } else { } goto ldv_56639; case 10240U: n = 0; goto ldv_56654; ldv_56653: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? (n + 40) * 256 : (n + 768) * 64)); n = n + 1; ldv_56654: ; if (n <= 3) { goto ldv_56653; } else { } goto ldv_56639; case 10244U: n = 0; goto ldv_56658; ldv_56657: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 10244 : n * 64 + 49156)); n = n + 1; ldv_56658: ; if (n <= 3) { goto ldv_56657; } else { } goto ldv_56639; case 14336U: n = 0; goto ldv_56662; ldv_56661: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? (n + 40) * 256 : (n + 768) * 64)); n = n + 1; ldv_56662: ; if (n <= 3) { goto ldv_56661; } else { } goto ldv_56639; case 14340U: n = 0; goto ldv_56666; ldv_56665: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 14340 : n * 64 + 57348)); n = n + 1; ldv_56666: ; if (n <= 3) { goto ldv_56665; } else { } goto ldv_56639; case 14344U: n = 0; goto ldv_56670; ldv_56669: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 14344 : n * 64 + 57352)); n = n + 1; ldv_56670: ; if (n <= 3) { goto ldv_56669; } else { } goto ldv_56639; case 14352U: n = 0; goto ldv_56674; ldv_56673: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 14352 : n * 64 + 57360)); n = n + 1; ldv_56674: ; if (n <= 3) { goto ldv_56673; } else { } goto ldv_56639; case 14360U: n = 0; goto ldv_56678; ldv_56677: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 14360 : n * 64 + 57368)); n = n + 1; ldv_56678: ; if (n <= 3) { goto ldv_56677; } else { } goto ldv_56639; case 14376U: n = 0; goto ldv_56682; ldv_56681: regs[n] = igb_rd32(hw, (u32 )(n <= 3 ? n * 256 + 14376 : n * 64 + 57384)); n = n + 1; ldv_56682: ; if (n <= 3) { goto ldv_56681; } else { } goto ldv_56639; default: tmp = igb_rd32(hw, reginfo->ofs); printk("\016igb: %-15s %08x\n", reginfo->name, tmp); return; } ldv_56639: snprintf((char *)(& rname), 16UL, "%s%s", reginfo->name, (char *)"[0-3]"); printk("\016igb: %-15s %08x %08x %08x %08x\n", (char *)(& rname), regs[0], regs[1], regs[2], regs[3]); return; } } static void igb_dump(struct igb_adapter *adapter ) { struct net_device *netdev ; struct e1000_hw *hw ; struct igb_reg_info *reginfo ; struct igb_ring *tx_ring ; union e1000_adv_tx_desc *tx_desc ; struct my_u0 *u0 ; struct igb_ring *rx_ring ; union e1000_adv_rx_desc *rx_desc ; u32 staterr ; u16 i ; u16 n ; bool tmp ; int tmp___0 ; struct igb_tx_buffer *buffer_info ; char const *next_desc ; struct igb_tx_buffer *buffer_info___0 ; char const *next_desc___0 ; struct igb_rx_buffer *buffer_info___1 ; void *tmp___1 ; { netdev = adapter->netdev; hw = & adapter->hw; if ((adapter->msg_enable & 8192) == 0) { return; } else { } if ((unsigned long )netdev != (unsigned long )((struct net_device *)0)) { _dev_info((struct device const *)(& (adapter->pdev)->dev), "Net device Info\n"); printk("\016igb: Device Name state trans_start last_rx\n"); printk("\016igb: %-15s %016lX %016lX %016lX\n", (char *)(& netdev->name), netdev->state, netdev->trans_start, netdev->last_rx); } else { } _dev_info((struct device const *)(& (adapter->pdev)->dev), "Register Dump\n"); printk("\016igb: Register Name Value\n"); reginfo = (struct igb_reg_info *)(& igb_reg_info_tbl); goto ldv_56703; ldv_56702: igb_regdump(hw, reginfo); reginfo = reginfo + 1; ldv_56703: ; if ((unsigned long )reginfo->name != (unsigned long )((char *)0)) { goto ldv_56702; } else { } if ((unsigned long )netdev == (unsigned long )((struct net_device *)0)) { goto exit; } else { tmp = netif_running((struct net_device const *)netdev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto exit; } else { } } _dev_info((struct device const *)(& (adapter->pdev)->dev), "TX Rings Summary\n"); printk("\016igb: Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); n = 0U; goto ldv_56708; ldv_56707: tx_ring = adapter->tx_ring[(int )n]; buffer_info = tx_ring->__annonCompField117.tx_buffer_info + (unsigned long )tx_ring->next_to_clean; printk("\016igb: %5d %5X %5X %016llX %04X %p %016llX\n", (int )n, (int )tx_ring->next_to_use, (int )tx_ring->next_to_clean, buffer_info->dma, buffer_info->len, buffer_info->next_to_watch, (unsigned long long )buffer_info->time_stamp); n = (u16 )((int )n + 1); ldv_56708: ; if ((int )n < adapter->num_tx_queues) { goto ldv_56707; } else { } if ((adapter->msg_enable & 1024) == 0) { goto rx_ring_summary; } else { } _dev_info((struct device const *)(& (adapter->pdev)->dev), "TX Rings Dump\n"); n = 0U; goto ldv_56717; ldv_56716: tx_ring = adapter->tx_ring[(int )n]; printk("\016igb: ------------------------------------\n"); printk("\016igb: TX QUEUE INDEX = %d\n", (int )tx_ring->queue_index); printk("\016igb: ------------------------------------\n"); printk("\016igb: T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); i = 0U; goto ldv_56714; ldv_56713: tx_desc = (union e1000_adv_tx_desc *)tx_ring->desc + (unsigned long )i; buffer_info___0 = tx_ring->__annonCompField117.tx_buffer_info + (unsigned long )i; u0 = (struct my_u0 *)tx_desc; if ((int )tx_ring->next_to_use == (int )i && (int )tx_ring->next_to_clean == (int )i) { next_desc = " NTC/U"; } else if ((int )tx_ring->next_to_use == (int )i) { next_desc = " NTU"; } else if ((int )tx_ring->next_to_clean == (int )i) { next_desc = " NTC"; } else { next_desc = ""; } printk("\016igb: T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", (int )i, u0->a, u0->b, buffer_info___0->dma, buffer_info___0->len, buffer_info___0->next_to_watch, (unsigned long long )buffer_info___0->time_stamp, buffer_info___0->skb, next_desc); if ((adapter->msg_enable & 4096) != 0 && (unsigned long )buffer_info___0->skb != (unsigned long )((struct sk_buff *)0)) { print_hex_dump("\016", "", 1, 16, 1, (void const *)(buffer_info___0->skb)->data, (size_t )buffer_info___0->len, 1); } else { } i = (u16 )((int )i + 1); ldv_56714: ; if ((unsigned long )tx_ring->desc != (unsigned long )((void *)0) && (int )tx_ring->count > (int )i) { goto ldv_56713; } else { } n = (u16 )((int )n + 1); ldv_56717: ; if ((int )n < adapter->num_tx_queues) { goto ldv_56716; } else { } rx_ring_summary: _dev_info((struct device const *)(& (adapter->pdev)->dev), "RX Rings Summary\n"); printk("\016igb: Queue [NTU] [NTC]\n"); n = 0U; goto ldv_56720; ldv_56719: rx_ring = adapter->rx_ring[(int )n]; printk("\016igb: %5d %5X %5X\n", (int )n, (int )rx_ring->next_to_use, (int )rx_ring->next_to_clean); n = (u16 )((int )n + 1); ldv_56720: ; if ((int )n < adapter->num_rx_queues) { goto ldv_56719; } else { } if ((adapter->msg_enable & 2048) == 0) { goto exit; } else { } _dev_info((struct device const *)(& (adapter->pdev)->dev), "RX Rings Dump\n"); n = 0U; goto ldv_56728; ldv_56727: rx_ring = adapter->rx_ring[(int )n]; printk("\016igb: ------------------------------------\n"); printk("\016igb: RX QUEUE INDEX = %d\n", (int )rx_ring->queue_index); printk("\016igb: ------------------------------------\n"); printk("\016igb: R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); printk("\016igb: RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); i = 0U; goto ldv_56725; ldv_56724: buffer_info___1 = rx_ring->__annonCompField117.rx_buffer_info + (unsigned long )i; rx_desc = (union e1000_adv_rx_desc *)rx_ring->desc + (unsigned long )i; u0 = (struct my_u0 *)rx_desc; staterr = rx_desc->wb.upper.status_error; if ((int )rx_ring->next_to_use == (int )i) { next_desc___0 = " NTU"; } else if ((int )rx_ring->next_to_clean == (int )i) { next_desc___0 = " NTC"; } else { next_desc___0 = ""; } if ((int )staterr & 1) { printk("\016igb: %s[0x%03X] %016llX %016llX ---------------- %s\n", (char *)"RWB", (int )i, u0->a, u0->b, next_desc___0); } else { printk("\016igb: %s[0x%03X] %016llX %016llX %016llX %s\n", (char *)"R ", (int )i, u0->a, u0->b, buffer_info___1->dma, next_desc___0); if (((adapter->msg_enable & 4096) != 0 && buffer_info___1->dma != 0ULL) && (unsigned long )buffer_info___1->page != (unsigned long )((struct page *)0)) { tmp___1 = lowmem_page_address((struct page const *)buffer_info___1->page); print_hex_dump("\016", "", 1, 16, 1, (void const *)tmp___1 + (unsigned long )buffer_info___1->page_offset, 2048UL, 1); } else { } } i = (u16 )((int )i + 1); ldv_56725: ; if ((int )rx_ring->count > (int )i) { goto ldv_56724; } else { } n = (u16 )((int )n + 1); ldv_56728: ; if ((int )n < adapter->num_rx_queues) { goto ldv_56727; } else { } exit: ; return; } } static int igb_get_i2c_data(void *data ) { struct igb_adapter *adapter ; struct e1000_hw *hw ; s32 i2cctl ; u32 tmp ; { adapter = (struct igb_adapter *)data; hw = & adapter->hw; tmp = igb_rd32(hw, 4140U); i2cctl = (s32 )tmp; return ((i2cctl & 4096) != 0); } } static void igb_set_i2c_data(void *data , int state ) { struct igb_adapter *adapter ; struct e1000_hw *hw ; s32 i2cctl ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { adapter = (struct igb_adapter *)data; hw = & adapter->hw; tmp = igb_rd32(hw, 4140U); i2cctl = (s32 )tmp; if (state != 0) { i2cctl = i2cctl | 1024; } else { i2cctl = i2cctl & -1025; } i2cctl = i2cctl & -2049; i2cctl = i2cctl | 8192; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel((unsigned int )i2cctl, (void volatile *)hw_addr + 4140U); } else { } igb_rd32(hw, 8U); return; } } static void igb_set_i2c_clk(void *data , int state ) { struct igb_adapter *adapter ; struct e1000_hw *hw ; s32 i2cctl ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { adapter = (struct igb_adapter *)data; hw = & adapter->hw; tmp = igb_rd32(hw, 4140U); i2cctl = (s32 )tmp; if (state != 0) { i2cctl = i2cctl | 512; i2cctl = i2cctl & -8193; } else { i2cctl = i2cctl & -513; i2cctl = i2cctl & -8193; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel((unsigned int )i2cctl, (void volatile *)hw_addr + 4140U); } else { } igb_rd32(hw, 8U); return; } } static int igb_get_i2c_clk(void *data ) { struct igb_adapter *adapter ; struct e1000_hw *hw ; s32 i2cctl ; u32 tmp ; { adapter = (struct igb_adapter *)data; hw = & adapter->hw; tmp = igb_rd32(hw, 4140U); i2cctl = (s32 )tmp; return ((i2cctl & 16384) != 0); } } static struct i2c_algo_bit_data const igb_i2c_algo = {0, & igb_set_i2c_data, & igb_set_i2c_clk, & igb_get_i2c_data, & igb_get_i2c_clk, 0, 0, 5, 20}; struct net_device *igb_get_hw_dev(struct e1000_hw *hw ) { struct igb_adapter *adapter ; { adapter = (struct igb_adapter *)hw->back; return (adapter->netdev); } } static int igb_init_module(void) { int ret ; { printk("\016igb: %s - version %s\n", (char const *)(& igb_driver_string), (char *)(& igb_driver_version)); printk("\016igb: %s\n", (char const *)(& igb_copyright)); dca_register_notify(& dca_notifier); ret = ldv___pci_register_driver_17(& igb_driver, & __this_module, "igb"); return (ret); } } static void igb_exit_module(void) { { dca_unregister_notify(& dca_notifier); ldv_pci_unregister_driver_18(& igb_driver); return; } } static void igb_cache_ring_register(struct igb_adapter *adapter ) { int i ; int j ; u32 rbase_offset ; { i = 0; j = 0; rbase_offset = adapter->vfs_allocated_count; switch ((unsigned int )adapter->hw.mac.type) { case 2U: ; if (adapter->vfs_allocated_count != 0U) { goto ldv_56792; ldv_56791: (adapter->rx_ring[i])->reg_idx = ((((unsigned int )((u8 )i) & 1U) << 3U) + (unsigned int )((u8 )(i >> 1))) + (unsigned int )((u8 )rbase_offset); i = i + 1; ldv_56792: ; if ((u32 )i < adapter->rss_queues) { goto ldv_56791; } else { } } else { } case 1U: ; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: ; default: ; goto ldv_56802; ldv_56801: (adapter->rx_ring[i])->reg_idx = (int )((u8 )rbase_offset) + (int )((u8 )i); i = i + 1; ldv_56802: ; if (adapter->num_rx_queues > i) { goto ldv_56801; } else { } goto ldv_56805; ldv_56804: (adapter->tx_ring[j])->reg_idx = (int )((u8 )rbase_offset) + (int )((u8 )j); j = j + 1; ldv_56805: ; if (adapter->num_tx_queues > j) { goto ldv_56804; } else { } goto ldv_56807; } ldv_56807: ; return; } } u32 igb_rd32(struct e1000_hw *hw , u32 reg ) { struct igb_adapter *igb ; struct e1000_hw const *__mptr ; u8 *hw_addr ; u8 *__var ; u32 value ; long tmp ; struct net_device *netdev ; unsigned int tmp___0 ; { __mptr = (struct e1000_hw const *)hw; igb = (struct igb_adapter *)__mptr + 0xfffffffffffff920UL; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); value = 0U; tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp != 0L) { return (~ value); } else { } value = readl((void const volatile *)hw_addr + (unsigned long )reg); if (value == 4294967295U) { if (reg == 0U) { netdev = igb->netdev; hw->hw_addr = (u8 *)0U; netif_device_detach(netdev); netdev_err((struct net_device const *)netdev, "PCIe link lost, device now detached\n"); } else { tmp___0 = readl((void const volatile *)hw_addr); if (tmp___0 == 4294967295U) { netdev = igb->netdev; hw->hw_addr = (u8 *)0U; netif_device_detach(netdev); netdev_err((struct net_device const *)netdev, "PCIe link lost, device now detached\n"); } else { } } } else { } return (value); } } static void igb_write_ivar(struct e1000_hw *hw , int msix_vector , int index , int offset ) { u32 ivar ; unsigned int tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { tmp = readl((void const volatile *)(hw->hw_addr + ((unsigned long )(index << 2) + 5888UL))); ivar = tmp; ivar = ~ (255U << offset) & ivar; ivar = (u32 )((msix_vector | 128) << offset) | ivar; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(ivar, (void volatile *)hw_addr + (unsigned long )((index << 2) + 5888)); } else { } return; } } static void igb_assign_vector(struct igb_q_vector *q_vector , int msix_vector ) { struct igb_adapter *adapter ; struct e1000_hw *hw ; int rx_queue ; int tx_queue ; u32 msixbm ; u8 *hw_addr ; u8 *__var ; long tmp ; { adapter = q_vector->adapter; hw = & adapter->hw; rx_queue = -1; tx_queue = -1; msixbm = 0U; if ((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0)) { rx_queue = (int )(q_vector->rx.ring)->reg_idx; } else { } if ((unsigned long )q_vector->tx.ring != (unsigned long )((struct igb_ring *)0)) { tx_queue = (int )(q_vector->tx.ring)->reg_idx; } else { } switch ((unsigned int )hw->mac.type) { case 1U: ; if (rx_queue >= 0) { msixbm = (u32 )(1 << rx_queue); } else { } if (tx_queue >= 0) { msixbm = (u32 )(256 << tx_queue) | msixbm; } else { } if ((adapter->flags & 8192U) == 0U && msix_vector == 0) { msixbm = msixbm | 2147483648U; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(msixbm, (void volatile *)hw_addr + (unsigned long )((msix_vector << 2) + 5632)); } else { } q_vector->eims_value = msixbm; goto ldv_56843; case 2U: ; if (rx_queue >= 0) { igb_write_ivar(hw, msix_vector, rx_queue & 7, (rx_queue & 8) << 1); } else { } if (tx_queue >= 0) { igb_write_ivar(hw, msix_vector, tx_queue & 7, ((tx_queue & 8) << 1) + 8); } else { } q_vector->eims_value = (u32 )(1 << msix_vector); goto ldv_56843; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: ; if (rx_queue >= 0) { igb_write_ivar(hw, msix_vector, rx_queue >> 1, (rx_queue & 1) << 4); } else { } if (tx_queue >= 0) { igb_write_ivar(hw, msix_vector, tx_queue >> 1, ((tx_queue & 1) << 4) + 8); } else { } q_vector->eims_value = (u32 )(1 << msix_vector); goto ldv_56843; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/igb_main.c"), "i" (860), "i" (12UL)); ldv_56851: ; goto ldv_56851; } ldv_56843: adapter->eims_enable_mask = adapter->eims_enable_mask | q_vector->eims_value; q_vector->set_itr = 1U; return; } } static void igb_configure_msix(struct igb_adapter *adapter ) { u32 tmp ; int i ; int vector ; struct e1000_hw *hw ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; int tmp___1 ; long tmp___2 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___3 ; int tmp___4 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___5 ; int tmp___6 ; { vector = 0; hw = & adapter->hw; adapter->eims_enable_mask = 0U; switch ((unsigned int )hw->mac.type) { case 1U: tmp = igb_rd32(hw, 24U); tmp = tmp | 2147483648U; tmp = tmp | 16777216U; tmp = tmp | 1U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(tmp, (void volatile *)hw_addr + 24U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { tmp___1 = vector; vector = vector + 1; writel(2147483648U, (void volatile *)hw_addr___0 + (unsigned long )((tmp___1 << 2) + 5632)); } else { } adapter->eims_other = 2147483648U; goto ldv_56866; case 2U: ; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(3221225489U, (void volatile *)hw_addr___1 + 5396U); } else { } adapter->eims_other = (u32 )(1 << vector); tmp___4 = vector; vector = vector + 1; tmp = (u32 )((tmp___4 | 128) << 8); __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(tmp, (void volatile *)hw_addr___2 + 5952U); } else { } goto ldv_56866; default: ; goto ldv_56866; } ldv_56866: adapter->eims_enable_mask = adapter->eims_enable_mask | adapter->eims_other; i = 0; goto ldv_56881; ldv_56880: tmp___6 = vector; vector = vector + 1; igb_assign_vector(adapter->q_vector[i], tmp___6); i = i + 1; ldv_56881: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_56880; } else { } igb_rd32(hw, 8U); return; } } static int igb_request_msix(struct igb_adapter *adapter ) { struct net_device *netdev ; struct e1000_hw *hw ; int i ; int err ; int vector ; int free_vector ; struct igb_q_vector *q_vector ; int tmp ; int tmp___0 ; { netdev = adapter->netdev; hw = & adapter->hw; err = 0; vector = 0; free_vector = 0; err = ldv_request_irq_19(adapter->msix_entries[vector].vector, & igb_msix_other, 0UL, (char const *)(& netdev->name), (void *)adapter); if (err != 0) { goto err_out; } else { } i = 0; goto ldv_56896; ldv_56895: q_vector = adapter->q_vector[i]; vector = vector + 1; q_vector->itr_register = (void *)hw->hw_addr + (unsigned long )((vector + 1440) * 4); if ((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0) && (unsigned long )q_vector->tx.ring != (unsigned long )((struct igb_ring *)0)) { sprintf((char *)(& q_vector->name), "%s-TxRx-%u", (char *)(& netdev->name), (int )(q_vector->rx.ring)->queue_index); } else if ((unsigned long )q_vector->tx.ring != (unsigned long )((struct igb_ring *)0)) { sprintf((char *)(& q_vector->name), "%s-tx-%u", (char *)(& netdev->name), (int )(q_vector->tx.ring)->queue_index); } else if ((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0)) { sprintf((char *)(& q_vector->name), "%s-rx-%u", (char *)(& netdev->name), (int )(q_vector->rx.ring)->queue_index); } else { sprintf((char *)(& q_vector->name), "%s-unused", (char *)(& netdev->name)); } err = ldv_request_irq_20(adapter->msix_entries[vector].vector, & igb_msix_ring, 0UL, (char const *)(& q_vector->name), (void *)q_vector); if (err != 0) { goto err_free; } else { } i = i + 1; ldv_56896: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_56895; } else { } igb_configure_msix(adapter); return (0); err_free: tmp = free_vector; free_vector = free_vector + 1; ldv_free_irq_21(adapter->msix_entries[tmp].vector, (void *)adapter); vector = vector - 1; i = 0; goto ldv_56899; ldv_56898: tmp___0 = free_vector; free_vector = free_vector + 1; ldv_free_irq_22(adapter->msix_entries[tmp___0].vector, (void *)adapter->q_vector[i]); i = i + 1; ldv_56899: ; if (i < vector) { goto ldv_56898; } else { } err_out: ; return (err); } } static void igb_free_q_vector(struct igb_adapter *adapter , int v_idx ) { struct igb_q_vector *q_vector ; { q_vector = adapter->q_vector[v_idx]; adapter->q_vector[v_idx] = (struct igb_q_vector *)0; if ((unsigned long )q_vector != (unsigned long )((struct igb_q_vector *)0)) { kfree_call_rcu(& q_vector->rcu, (void (*)(struct callback_head * ))360); } else { } return; } } static void igb_reset_q_vector(struct igb_adapter *adapter , int v_idx ) { struct igb_q_vector *q_vector ; { q_vector = adapter->q_vector[v_idx]; if ((unsigned long )q_vector == (unsigned long )((struct igb_q_vector *)0)) { return; } else { } if ((unsigned long )q_vector->tx.ring != (unsigned long )((struct igb_ring *)0)) { adapter->tx_ring[(int )(q_vector->tx.ring)->queue_index] = (struct igb_ring *)0; } else { } if ((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0)) { adapter->rx_ring[(int )(q_vector->rx.ring)->queue_index] = (struct igb_ring *)0; } else { } netif_napi_del(& q_vector->napi); return; } } static void igb_reset_interrupt_capability(struct igb_adapter *adapter ) { int v_idx ; int tmp ; { v_idx = (int )adapter->num_q_vectors; if ((adapter->flags & 8192U) != 0U) { pci_disable_msix(adapter->pdev); } else if ((int )adapter->flags & 1) { pci_disable_msi(adapter->pdev); } else { } goto ldv_56917; ldv_56916: igb_reset_q_vector(adapter, v_idx); ldv_56917: tmp = v_idx; v_idx = v_idx - 1; if (tmp != 0) { goto ldv_56916; } else { } return; } } static void igb_free_q_vectors(struct igb_adapter *adapter ) { int v_idx ; int tmp ; { v_idx = (int )adapter->num_q_vectors; adapter->num_tx_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0U; goto ldv_56924; ldv_56923: igb_reset_q_vector(adapter, v_idx); igb_free_q_vector(adapter, v_idx); ldv_56924: tmp = v_idx; v_idx = v_idx - 1; if (tmp != 0) { goto ldv_56923; } else { } return; } } static void igb_clear_interrupt_scheme(struct igb_adapter *adapter ) { { igb_free_q_vectors(adapter); igb_reset_interrupt_capability(adapter); return; } } static void igb_set_interrupt_capability(struct igb_adapter *adapter , bool msix ) { int err ; int numvecs ; int i ; struct e1000_hw *hw ; u8 *hw_addr ; u8 *__var ; long tmp ; int tmp___0 ; { if (! msix) { goto msi_only; } else { } adapter->flags = adapter->flags | 8192U; adapter->num_rx_queues = (int )adapter->rss_queues; if (adapter->vfs_allocated_count != 0U) { adapter->num_tx_queues = 1; } else { adapter->num_tx_queues = (int )adapter->rss_queues; } numvecs = adapter->num_rx_queues; if ((adapter->flags & 8U) == 0U) { numvecs = adapter->num_tx_queues + numvecs; } else { } adapter->num_q_vectors = (unsigned int )numvecs; numvecs = numvecs + 1; i = 0; goto ldv_56938; ldv_56937: adapter->msix_entries[i].entry = (u16 )i; i = i + 1; ldv_56938: ; if (i < numvecs) { goto ldv_56937; } else { } err = pci_enable_msix_range(adapter->pdev, (struct msix_entry *)(& adapter->msix_entries), numvecs, numvecs); if (err > 0) { return; } else { } igb_reset_interrupt_capability(adapter); msi_only: adapter->flags = adapter->flags & 4294959103U; if ((unsigned long )adapter->vf_data != (unsigned long )((struct vf_data_storage *)0)) { hw = & adapter->hw; pci_disable_sriov(adapter->pdev); msleep(500U); kfree((void const *)adapter->vf_data); adapter->vf_data = (struct vf_data_storage *)0; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(1U, (void volatile *)hw_addr + 23484U); } else { } igb_rd32(hw, 8U); msleep(100U); _dev_info((struct device const *)(& (adapter->pdev)->dev), "IOV Disabled\n"); } else { } adapter->vfs_allocated_count = 0U; adapter->rss_queues = 1U; adapter->flags = adapter->flags | 8U; adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; adapter->num_q_vectors = 1U; tmp___0 = pci_enable_msi_exact(adapter->pdev, 1); if (tmp___0 == 0) { adapter->flags = adapter->flags | 1U; } else { } return; } } static void igb_add_ring(struct igb_ring *ring , struct igb_ring_container *head ) { { head->ring = ring; head->count = (u8 )((int )head->count + 1); return; } } static int igb_alloc_q_vector(struct igb_adapter *adapter , int v_count , int v_idx , int txr_count , int txr_idx , int rxr_count , int rxr_idx ) { struct igb_q_vector *q_vector ; struct igb_ring *ring ; int ring_count ; int size ; void *tmp ; { if (txr_count > 1 || rxr_count > 1) { return (-12); } else { } ring_count = txr_count + rxr_count; size = (int )((unsigned int )((unsigned long )ring_count + 1UL) * 4096U); q_vector = adapter->q_vector[v_idx]; if ((unsigned long )q_vector == (unsigned long )((struct igb_q_vector *)0)) { tmp = kzalloc((size_t )size, 208U); q_vector = (struct igb_q_vector *)tmp; } else { memset((void *)q_vector, 0, (size_t )size); } if ((unsigned long )q_vector == (unsigned long )((struct igb_q_vector *)0)) { return (-12); } else { } netif_napi_add(adapter->netdev, & q_vector->napi, & igb_poll, 64); adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; q_vector->tx.work_limit = adapter->tx_work_limit; q_vector->itr_register = (void *)adapter->hw.hw_addr + 5760U; q_vector->itr_val = 648U; ring = (struct igb_ring *)(& q_vector->ring); if (rxr_count != 0) { if (adapter->rx_itr_setting == 0U || adapter->rx_itr_setting > 3U) { q_vector->itr_val = (u16 )adapter->rx_itr_setting; } else { } } else if (adapter->tx_itr_setting == 0U || adapter->tx_itr_setting > 3U) { q_vector->itr_val = (u16 )adapter->tx_itr_setting; } else { } if (txr_count != 0) { ring->dev = & (adapter->pdev)->dev; ring->netdev = adapter->netdev; ring->q_vector = q_vector; igb_add_ring(ring, & q_vector->tx); if ((unsigned int )adapter->hw.mac.type == 1U) { set_bit(2L, (unsigned long volatile *)(& ring->flags)); } else { } ring->count = adapter->tx_ring_count; ring->queue_index = (u8 )txr_idx; u64_stats_init(& ring->__annonCompField120.__annonCompField118.tx_syncp); u64_stats_init(& ring->__annonCompField120.__annonCompField118.tx_syncp2); adapter->tx_ring[txr_idx] = ring; ring = ring + 1; } else { } if (rxr_count != 0) { ring->dev = & (adapter->pdev)->dev; ring->netdev = adapter->netdev; ring->q_vector = q_vector; igb_add_ring(ring, & q_vector->rx); if ((unsigned int )adapter->hw.mac.type > 1U) { set_bit(0L, (unsigned long volatile *)(& ring->flags)); } else { } if ((unsigned int )adapter->hw.mac.type > 3U) { set_bit(1L, (unsigned long volatile *)(& ring->flags)); } else { } ring->count = adapter->rx_ring_count; ring->queue_index = (u8 )rxr_idx; u64_stats_init(& ring->__annonCompField120.__annonCompField119.rx_syncp); adapter->rx_ring[rxr_idx] = ring; } else { } return (0); } } static int igb_alloc_q_vectors(struct igb_adapter *adapter ) { int q_vectors ; int rxr_remaining ; int txr_remaining ; int rxr_idx ; int txr_idx ; int v_idx ; int err ; int rqpv ; int tqpv ; int tmp ; { q_vectors = (int )adapter->num_q_vectors; rxr_remaining = adapter->num_rx_queues; txr_remaining = adapter->num_tx_queues; rxr_idx = 0; txr_idx = 0; v_idx = 0; if (rxr_remaining + txr_remaining <= q_vectors) { goto ldv_56973; ldv_56972: err = igb_alloc_q_vector(adapter, q_vectors, v_idx, 0, 0, 1, rxr_idx); if (err != 0) { goto err_out; } else { } rxr_remaining = rxr_remaining - 1; rxr_idx = rxr_idx + 1; v_idx = v_idx + 1; ldv_56973: ; if (rxr_remaining != 0) { goto ldv_56972; } else { } } else { } goto ldv_56978; ldv_56977: rqpv = (((q_vectors - v_idx) + rxr_remaining) + -1) / (q_vectors - v_idx); tqpv = (((q_vectors - v_idx) + txr_remaining) + -1) / (q_vectors - v_idx); err = igb_alloc_q_vector(adapter, q_vectors, v_idx, tqpv, txr_idx, rqpv, rxr_idx); if (err != 0) { goto err_out; } else { } rxr_remaining = rxr_remaining - rqpv; txr_remaining = txr_remaining - tqpv; rxr_idx = rxr_idx + 1; txr_idx = txr_idx + 1; v_idx = v_idx + 1; ldv_56978: ; if (v_idx < q_vectors) { goto ldv_56977; } else { } return (0); err_out: adapter->num_tx_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0U; goto ldv_56981; ldv_56980: igb_free_q_vector(adapter, v_idx); ldv_56981: tmp = v_idx; v_idx = v_idx - 1; if (tmp != 0) { goto ldv_56980; } else { } return (-12); } } static int igb_init_interrupt_scheme(struct igb_adapter *adapter , bool msix ) { struct pci_dev *pdev ; int err ; { pdev = adapter->pdev; igb_set_interrupt_capability(adapter, (int )msix); err = igb_alloc_q_vectors(adapter); if (err != 0) { dev_err((struct device const *)(& pdev->dev), "Unable to allocate memory for vectors\n"); goto err_alloc_q_vectors; } else { } igb_cache_ring_register(adapter); return (0); err_alloc_q_vectors: igb_reset_interrupt_capability(adapter); return (err); } } static int igb_request_irq(struct igb_adapter *adapter ) { struct net_device *netdev ; struct pci_dev *pdev ; int err ; { netdev = adapter->netdev; pdev = adapter->pdev; err = 0; if ((adapter->flags & 8192U) != 0U) { err = igb_request_msix(adapter); if (err == 0) { goto request_done; } else { } igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); igb_clear_interrupt_scheme(adapter); err = igb_init_interrupt_scheme(adapter, 0); if (err != 0) { goto request_done; } else { } igb_setup_all_tx_resources(adapter); igb_setup_all_rx_resources(adapter); igb_configure(adapter); } else { } igb_assign_vector(adapter->q_vector[0], 0); if ((int )adapter->flags & 1) { err = ldv_request_irq_23(pdev->irq, & igb_intr_msi, 0UL, (char const *)(& netdev->name), (void *)adapter); if (err == 0) { goto request_done; } else { } igb_reset_interrupt_capability(adapter); adapter->flags = adapter->flags & 4294967294U; } else { } err = ldv_request_irq_24(pdev->irq, & igb_intr, 128UL, (char const *)(& netdev->name), (void *)adapter); if (err != 0) { dev_err((struct device const *)(& pdev->dev), "Error %d getting interrupt\n", err); } else { } request_done: ; return (err); } } static void igb_free_irq(struct igb_adapter *adapter ) { int vector ; int i ; int tmp ; int tmp___0 ; { if ((adapter->flags & 8192U) != 0U) { vector = 0; tmp = vector; vector = vector + 1; ldv_free_irq_25(adapter->msix_entries[tmp].vector, (void *)adapter); i = 0; goto ldv_57003; ldv_57002: tmp___0 = vector; vector = vector + 1; ldv_free_irq_26(adapter->msix_entries[tmp___0].vector, (void *)adapter->q_vector[i]); i = i + 1; ldv_57003: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_57002; } else { } } else { ldv_free_irq_27((adapter->pdev)->irq, (void *)adapter); } return; } } static void igb_irq_disable(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 regval ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___2 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___3 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___4 ; int i ; { hw = & adapter->hw; if ((adapter->flags & 8192U) != 0U) { tmp = igb_rd32(hw, 5424U); regval = tmp; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(~ adapter->eims_enable_mask & regval, (void volatile *)hw_addr + 5424U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(adapter->eims_enable_mask, (void volatile *)hw_addr___0 + 5416U); } else { } regval = igb_rd32(hw, 5420U); __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(~ adapter->eims_enable_mask & regval, (void volatile *)hw_addr___1 + 5420U); } else { } } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(0U, (void volatile *)hw_addr___2 + 224U); } else { } __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(4294967295U, (void volatile *)hw_addr___3 + 216U); } else { } igb_rd32(hw, 8U); if ((adapter->flags & 8192U) != 0U) { i = 0; goto ldv_57027; ldv_57026: synchronize_irq(adapter->msix_entries[i].vector); i = i + 1; ldv_57027: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_57026; } else { } } else { synchronize_irq((adapter->pdev)->irq); } return; } } static void igb_irq_enable(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 ims ; u32 regval ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___2 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___3 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___4 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___5 ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___6 ; { hw = & adapter->hw; if ((adapter->flags & 8192U) != 0U) { ims = 1342177284U; tmp = igb_rd32(hw, 5420U); regval = tmp; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(adapter->eims_enable_mask | regval, (void volatile *)hw_addr + 5420U); } else { } regval = igb_rd32(hw, 5424U); __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(adapter->eims_enable_mask | regval, (void volatile *)hw_addr___0 + 5424U); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(adapter->eims_enable_mask, (void volatile *)hw_addr___1 + 5412U); } else { } if (adapter->vfs_allocated_count != 0U) { __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(255U, (void volatile *)hw_addr___2 + 3204U); } else { } ims = ims | 256U; } else { } __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(ims, (void volatile *)hw_addr___3 + 208U); } else { } } else { __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(1342177437U, (void volatile *)hw_addr___4 + 208U); } else { } __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(1342177437U, (void volatile *)hw_addr___5 + 224U); } else { } } return; } } static void igb_update_mng_vlan(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u16 vid ; u16 old_vid ; int tmp ; { hw = & adapter->hw; vid = adapter->hw.mng_cookie.vlan_id; old_vid = adapter->mng_vlan_id; if (((int )hw->mng_cookie.status & 2) != 0) { igb_vfta_set(hw, (u32 )vid, 1); adapter->mng_vlan_id = vid; } else { adapter->mng_vlan_id = 65535U; } if ((unsigned int )old_vid != 65535U && (int )vid != (int )old_vid) { tmp = variable_test_bit((long )old_vid, (unsigned long const volatile *)(& adapter->active_vlans)); if (tmp == 0) { igb_vfta_set(hw, (u32 )old_vid, 0); } else { } } else { } return; } } static void igb_release_hw_control(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 ctrl_ext ; u8 *hw_addr ; u8 *__var ; long tmp ; { hw = & adapter->hw; ctrl_ext = igb_rd32(hw, 24U); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(ctrl_ext & 4026531839U, (void volatile *)hw_addr + 24U); } else { } return; } } static void igb_get_hw_control(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 ctrl_ext ; u8 *hw_addr ; u8 *__var ; long tmp ; { hw = & adapter->hw; ctrl_ext = igb_rd32(hw, 24U); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(ctrl_ext | 268435456U, (void volatile *)hw_addr + 24U); } else { } return; } } static void igb_configure(struct igb_adapter *adapter ) { struct net_device *netdev ; int i ; struct igb_ring *ring ; int tmp ; { netdev = adapter->netdev; igb_get_hw_control(adapter); igb_set_rx_mode(netdev); igb_restore_vlan(adapter); igb_setup_tctl(adapter); igb_setup_mrqc(adapter); igb_setup_rctl(adapter); igb_configure_tx(adapter); igb_configure_rx(adapter); igb_rx_fifo_flush_82575(& adapter->hw); i = 0; goto ldv_57085; ldv_57084: ring = adapter->rx_ring[i]; tmp = igb_desc_unused(ring); igb_alloc_rx_buffers(ring, (int )((u16 )tmp)); i = i + 1; ldv_57085: ; if (adapter->num_rx_queues > i) { goto ldv_57084; } else { } return; } } void igb_power_up_link(struct igb_adapter *adapter ) { { igb_reset_phy(& adapter->hw); if ((unsigned int )adapter->hw.phy.media_type == 1U) { igb_power_up_phy_copper(& adapter->hw); } else { igb_power_up_serdes_link_82575(& adapter->hw); } igb_setup_link(& adapter->hw); return; } } static void igb_power_down_link(struct igb_adapter *adapter ) { { if ((unsigned int )adapter->hw.phy.media_type == 1U) { igb_power_down_phy_copper_82575(& adapter->hw); } else { igb_shutdown_serdes_link_82575(& adapter->hw); } return; } } static void igb_check_swap_media(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 ctrl_ext ; u32 connsw ; bool swap_now ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; { hw = & adapter->hw; swap_now = 0; ctrl_ext = igb_rd32(hw, 24U); connsw = igb_rd32(hw, 52U); if ((unsigned int )hw->phy.media_type == 1U && (connsw & 1U) == 0U) { swap_now = 1; } else if ((connsw & 512U) == 0U) { if (adapter->copper_tries <= 3) { adapter->copper_tries = adapter->copper_tries + 1; connsw = connsw | 2U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(connsw, (void volatile *)hw_addr + 52U); } else { } return; } else { adapter->copper_tries = 0; if ((connsw & 1024U) != 0U && (connsw & 2048U) == 0U) { swap_now = 1; connsw = connsw & 4294967293U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(connsw, (void volatile *)hw_addr___0 + 52U); } else { } } else { } } } else { } if (! swap_now) { return; } else { } switch ((unsigned int )hw->phy.media_type) { case 1U: netdev_info((struct net_device const *)adapter->netdev, "MAS: changing media to fiber/serdes\n"); ctrl_ext = ctrl_ext | 12582912U; adapter->flags = adapter->flags | 1024U; adapter->copper_tries = 0; goto ldv_57107; case 3U: ; case 2U: netdev_info((struct net_device const *)adapter->netdev, "MAS: changing media to copper\n"); ctrl_ext = ctrl_ext & 4282384383U; adapter->flags = adapter->flags | 1024U; goto ldv_57107; default: netdev_err((struct net_device const *)adapter->netdev, "AMS: Invalid media type found, returning\n"); goto ldv_57107; } ldv_57107: __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(ctrl_ext, (void volatile *)hw_addr___1 + 24U); } else { } return; } } int igb_up(struct igb_adapter *adapter ) { struct e1000_hw *hw ; int i ; u32 reg_data ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { hw = & adapter->hw; igb_configure(adapter); clear_bit(2L, (unsigned long volatile *)(& adapter->state)); i = 0; goto ldv_57120; ldv_57119: napi_enable(& (adapter->q_vector[i])->napi); i = i + 1; ldv_57120: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_57119; } else { } if ((adapter->flags & 8192U) != 0U) { igb_configure_msix(adapter); } else { igb_assign_vector(adapter->q_vector[0], 0); } igb_rd32(hw, 192U); igb_irq_enable(adapter); if (adapter->vfs_allocated_count != 0U) { tmp = igb_rd32(hw, 24U); reg_data = tmp; reg_data = reg_data | 16384U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(reg_data, (void volatile *)hw_addr + 24U); } else { } } else { } netif_tx_start_all_queues(adapter->netdev); hw->mac.get_link_status = 1; schedule_work(& adapter->watchdog_task); if ((adapter->flags & 16384U) != 0U && ! hw->dev_spec._82575.eee_disable) { adapter->eee_advert = 6U; } else { } return (0); } } void igb_down(struct igb_adapter *adapter ) { struct net_device *netdev ; struct e1000_hw *hw ; u32 tctl ; u32 rctl ; int i ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; int tmp___1 ; { netdev = adapter->netdev; hw = & adapter->hw; set_bit(2L, (unsigned long volatile *)(& adapter->state)); rctl = igb_rd32(hw, 256U); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(rctl & 4294967293U, (void volatile *)hw_addr + 256U); } else { } netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); tctl = igb_rd32(hw, 1024U); tctl = tctl & 4294967293U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(tctl, (void volatile *)hw_addr___0 + 1024U); } else { } igb_rd32(hw, 8U); usleep_range(10000UL, 11000UL); igb_irq_disable(adapter); adapter->flags = adapter->flags & 4294966783U; i = 0; goto ldv_57141; ldv_57140: ; if ((unsigned long )adapter->q_vector[i] != (unsigned long )((struct igb_q_vector *)0)) { napi_synchronize((struct napi_struct const *)(& (adapter->q_vector[i])->napi)); napi_disable(& (adapter->q_vector[i])->napi); } else { } i = i + 1; ldv_57141: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_57140; } else { } ldv_del_timer_sync_28(& adapter->watchdog_timer); ldv_del_timer_sync_29(& adapter->phy_info_timer); spin_lock(& adapter->stats64_lock); igb_update_stats(adapter, & adapter->stats64); spin_unlock(& adapter->stats64_lock); adapter->link_speed = 0U; adapter->link_duplex = 0U; tmp___1 = pci_channel_offline(adapter->pdev); if (tmp___1 == 0) { igb_reset(adapter); } else { } igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); igb_setup_dca(adapter); return; } } void igb_reinit_locked(struct igb_adapter *adapter ) { int __ret_warn_on ; int tmp ; long tmp___0 ; int tmp___1 ; { tmp = preempt_count(); __ret_warn_on = ((unsigned long )tmp & 2096896UL) != 0UL; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/igb_main.c", 1827); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); goto ldv_57149; ldv_57148: usleep_range(1000UL, 2000UL); ldv_57149: tmp___1 = test_and_set_bit(1L, (unsigned long volatile *)(& adapter->state)); if (tmp___1 != 0) { goto ldv_57148; } else { } igb_down(adapter); igb_up(adapter); clear_bit(1L, (unsigned long volatile *)(& adapter->state)); return; } } static void igb_enable_mas(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 connsw ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { hw = & adapter->hw; tmp = igb_rd32(hw, 52U); connsw = tmp; if ((unsigned int )hw->phy.media_type == 1U && (connsw & 512U) == 0U) { connsw = connsw | 4U; connsw = connsw | 1U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(connsw, (void volatile *)hw_addr + 52U); } else { } igb_rd32(hw, 8U); } else { } return; } } void igb_reset(struct igb_adapter *adapter ) { struct pci_dev *pdev ; struct e1000_hw *hw ; struct e1000_mac_info *mac ; struct e1000_fc_info *fc ; u32 pba ; u32 tx_space ; u32 min_tx_space ; u32 min_rx_space ; u32 hwm ; u16 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; u32 _min1 ; u32 _min2 ; int i ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___2 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___3 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___4 ; s32 tmp___5 ; int tmp___6 ; bool tmp___7 ; int tmp___8 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___9 ; { pdev = adapter->pdev; hw = & adapter->hw; mac = & hw->mac; fc = & hw->fc; pba = 0U; switch ((unsigned int )mac->type) { case 4U: ; case 5U: ; case 3U: pba = igb_rd32(hw, 9220U); tmp = igb_rxpbs_adjust_82580(pba); pba = (u32 )tmp; goto ldv_57174; case 2U: pba = igb_rd32(hw, 9220U); pba = pba & 127U; goto ldv_57174; case 1U: ; case 6U: ; case 7U: ; default: pba = 34U; goto ldv_57174; } ldv_57174: ; if (adapter->max_frame_size > 1518U && (unsigned int )mac->type <= 1U) { __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(pba, (void volatile *)hw_addr + 4096U); } else { } pba = igb_rd32(hw, 4096U); tx_space = pba >> 16; pba = pba & 65535U; min_tx_space = (u32 )((unsigned long )adapter->max_frame_size + 12UL) * 2U; min_tx_space = (min_tx_space + 1023U) & 4294966272U; min_tx_space = min_tx_space >> 10; min_rx_space = adapter->max_frame_size; min_rx_space = (min_rx_space + 1023U) & 4294966272U; min_rx_space = min_rx_space >> 10; if (tx_space < min_tx_space && min_tx_space - tx_space < pba) { pba = (tx_space - min_tx_space) + pba; if (pba < min_rx_space) { pba = min_rx_space; } else { } } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(pba, (void volatile *)hw_addr___0 + 4096U); } else { } } else { } _min1 = (pba * 9216U) / 10U; _min2 = (pba << 10) - adapter->max_frame_size * 2U; hwm = _min1 < _min2 ? _min1 : _min2; fc->high_water = hwm & 4294967280U; fc->low_water = fc->high_water - 16U; fc->pause_time = 65535U; fc->send_xon = 1; fc->current_mode = fc->requested_mode; if (adapter->vfs_allocated_count != 0U) { i = 0; goto ldv_57191; ldv_57190: (adapter->vf_data + (unsigned long )i)->flags = (adapter->vf_data + (unsigned long )i)->flags & 8U; i = i + 1; ldv_57191: ; if ((unsigned int )i < adapter->vfs_allocated_count) { goto ldv_57190; } else { } igb_ping_all_vfs(adapter); __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(0U, (void volatile *)hw_addr___1 + 3212U); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(0U, (void volatile *)hw_addr___2 + 3216U); } else { } } else { } (*(hw->mac.ops.reset_hw))(hw); __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(0U, (void volatile *)hw_addr___3 + 22528U); } else { } if ((adapter->flags & 1024U) != 0U) { (*(adapter->ei.get_invariants))(hw); adapter->flags = adapter->flags & 4294966271U; } else { } if ((unsigned int )mac->type == 1U && (adapter->flags & 4096U) != 0U) { igb_enable_mas(adapter); } else { } tmp___5 = (*(hw->mac.ops.init_hw))(hw); if (tmp___5 != 0) { dev_err((struct device const *)(& pdev->dev), "Hardware Error\n"); } else { } if (! hw->mac.autoneg) { igb_force_mac_fc(hw); } else { } igb_init_dmac(adapter, pba); tmp___6 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___6 == 0) { if ((unsigned int )mac->type == 4U && (unsigned int )hw->bus.func == 0U) { if ((int )adapter->ets) { (*(mac->ops.init_thermal_sensor_thresh))(hw); } else { } } else { } } else { } if ((unsigned int )hw->phy.media_type == 1U) { switch ((unsigned int )mac->type) { case 4U: ; case 6U: ; case 7U: igb_set_eee_i350(hw, 1, 1); goto ldv_57205; case 5U: igb_set_eee_i354(hw, 1, 1); goto ldv_57205; default: ; goto ldv_57205; } ldv_57205: ; } else { } tmp___7 = netif_running((struct net_device const *)adapter->netdev); if (tmp___7) { tmp___8 = 0; } else { tmp___8 = 1; } if (tmp___8) { igb_power_down_link(adapter); } else { } igb_update_mng_vlan(adapter); __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___9 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___9 == 0L) { writel(33024U, (void volatile *)hw_addr___4 + 56U); } else { } igb_ptp_reset(adapter); igb_get_phy_info(hw); return; } } static netdev_features_t igb_fix_features(struct net_device *netdev , netdev_features_t features ) { { if ((features & 256ULL) != 0ULL) { features = features | 128ULL; } else { features = features & 0xffffffffffffff7fULL; } return (features); } } static int igb_set_features(struct net_device *netdev , netdev_features_t features ) { netdev_features_t changed ; struct igb_adapter *adapter ; void *tmp ; bool tmp___0 ; { changed = netdev->features ^ features; tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; if ((changed & 256ULL) != 0ULL) { igb_vlan_mode(netdev, features); } else { } if ((changed & 274877906944ULL) == 0ULL) { return (0); } else { } netdev->features = features; tmp___0 = netif_running((struct net_device const *)netdev); if ((int )tmp___0) { igb_reinit_locked(adapter); } else { igb_reset(adapter); } return (0); } } static struct net_device_ops const igb_netdev_ops = {0, 0, & igb_open, & igb_close, & igb_xmit_frame, 0, 0, & igb_set_rx_mode, & igb_set_mac, & eth_validate_addr, & igb_ioctl, 0, & igb_change_mtu, 0, & igb_tx_timeout, & igb_get_stats64, 0, & igb_vlan_rx_add_vid, & igb_vlan_rx_kill_vid, & igb_netpoll, 0, 0, 0, & igb_ndo_set_vf_mac, & igb_ndo_set_vf_vlan, & igb_ndo_set_vf_bw, & igb_ndo_set_vf_spoofchk, & igb_ndo_get_vf_config, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & igb_fix_features, & igb_set_features, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & passthru_features_check, 0, 0}; void igb_set_fw_version(struct igb_adapter *adapter ) { struct e1000_hw *hw ; struct e1000_fw_version fw ; bool tmp ; int tmp___0 ; { hw = & adapter->hw; igb_get_fw_version(hw, & fw); switch ((unsigned int )hw->mac.type) { case 6U: ; case 7U: tmp = igb_get_flash_presence_i210(hw); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { snprintf((char *)(& adapter->fw_version), 32UL, "%2d.%2d-%d", (int )fw.invm_major, (int )fw.invm_minor, (int )fw.invm_img_type); goto ldv_57229; } else { } default: ; if ((int )fw.or_valid) { snprintf((char *)(& adapter->fw_version), 32UL, "%d.%d, 0x%08x, %d.%d.%d", (int )fw.eep_major, (int )fw.eep_minor, fw.etrack_id, (int )fw.or_major, (int )fw.or_build, (int )fw.or_patch); } else if (fw.etrack_id != 0U) { snprintf((char *)(& adapter->fw_version), 32UL, "%d.%d, 0x%08x", (int )fw.eep_major, (int )fw.eep_minor, fw.etrack_id); } else { snprintf((char *)(& adapter->fw_version), 32UL, "%d.%d.%d", (int )fw.eep_major, (int )fw.eep_minor, (int )fw.eep_build); } goto ldv_57229; } ldv_57229: ; return; } } static void igb_init_mas(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u16 eeprom_data ; { hw = & adapter->hw; (*(hw->nvm.ops.read))(hw, 3, 1, & eeprom_data); switch ((int )hw->bus.func) { case 0: ; if ((int )eeprom_data & 1) { adapter->flags = adapter->flags | 4096U; netdev_info((struct net_device const *)adapter->netdev, "MAS: Enabling Media Autosense for port %d\n", (int )hw->bus.func); } else { } goto ldv_57237; case 1: ; if (((int )eeprom_data & 2) != 0) { adapter->flags = adapter->flags | 4096U; netdev_info((struct net_device const *)adapter->netdev, "MAS: Enabling Media Autosense for port %d\n", (int )hw->bus.func); } else { } goto ldv_57237; case 2: ; if (((int )eeprom_data & 4) != 0) { adapter->flags = adapter->flags | 4096U; netdev_info((struct net_device const *)adapter->netdev, "MAS: Enabling Media Autosense for port %d\n", (int )hw->bus.func); } else { } goto ldv_57237; case 3: ; if (((int )eeprom_data & 8) != 0) { adapter->flags = adapter->flags | 4096U; netdev_info((struct net_device const *)adapter->netdev, "MAS: Enabling Media Autosense for port %d\n", (int )hw->bus.func); } else { } goto ldv_57237; default: netdev_err((struct net_device const *)adapter->netdev, "MAS: Invalid port configuration, returning\n"); goto ldv_57237; } ldv_57237: ; return; } } static s32 igb_init_i2c(struct igb_adapter *adapter ) { s32 status ; { status = 0; if ((unsigned int )adapter->hw.mac.type != 4U) { return (0); } else { } adapter->i2c_adap.owner = & __this_module; adapter->i2c_algo = igb_i2c_algo; adapter->i2c_algo.data = (void *)adapter; adapter->i2c_adap.algo_data = (void *)(& adapter->i2c_algo); adapter->i2c_adap.dev.parent = & (adapter->pdev)->dev; strlcpy((char *)(& adapter->i2c_adap.name), "igb BB", 48UL); status = i2c_bit_add_bus(& adapter->i2c_adap); return (status); } } static int igb_probe(struct pci_dev *pdev , struct pci_device_id const *ent ) { struct net_device *netdev ; struct igb_adapter *adapter ; struct e1000_hw *hw ; u16 eeprom_data ; s32 ret_val ; int global_quad_port_a ; struct e1000_info const *ei ; int err ; int pci_using_dac ; u8 part_str[11U] ; int __ret_warn_on ; char const *tmp ; long tmp___0 ; int tmp___1 ; void *tmp___2 ; u32 tmp___3 ; void *tmp___4 ; char const *tmp___5 ; s32 tmp___6 ; bool tmp___7 ; s32 tmp___8 ; bool tmp___9 ; s32 tmp___10 ; s32 tmp___11 ; bool tmp___12 ; int tmp___13 ; u8 *hw_addr ; u8 *__var ; long tmp___14 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___15 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_1 ; u32 tmp___16 ; bool tmp___17 ; int tmp___18 ; int tmp___19 ; u16 ets_word ; int tmp___20 ; bool tmp___21 ; u32 tmp___22 ; s32 tmp___23 ; int tmp___24 ; { eeprom_data = 0U; ei = igb_info_tbl[ent->driver_data]; if ((unsigned int )*((unsigned char *)pdev + 2531UL) != 0U) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { tmp = pci_name((struct pci_dev const *)pdev); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/igb_main.c", 2242, "\v%s (%hx:%hx) should not be a VF!\n", tmp, (int )pdev->vendor, (int )pdev->device); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (-22); } else { } err = pci_enable_device_mem(pdev); if (err != 0) { return (err); } else { } pci_using_dac = 0; err = dma_set_mask_and_coherent(& pdev->dev, 0xffffffffffffffffULL); if (err == 0) { pci_using_dac = 1; } else { err = dma_set_mask_and_coherent(& pdev->dev, 4294967295ULL); if (err != 0) { dev_err((struct device const *)(& pdev->dev), "No usable DMA configuration, aborting\n"); goto err_dma; } else { } } tmp___1 = pci_select_bars(pdev, 512UL); err = pci_request_selected_regions(pdev, tmp___1, (char const *)(& igb_driver_name)); if (err != 0) { goto err_pci_reg; } else { } pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); pci_save_state(pdev); err = -12; netdev = alloc_etherdev_mqs(16384, 8U, 8U); if ((unsigned long )netdev == (unsigned long )((struct net_device *)0)) { goto err_alloc_etherdev; } else { } netdev->dev.parent = & pdev->dev; pci_set_drvdata(pdev, (void *)netdev); tmp___2 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___2; adapter->netdev = netdev; adapter->pdev = pdev; hw = & adapter->hw; hw->back = (void *)adapter; tmp___3 = netif_msg_init(debug, 7); adapter->msg_enable = (int )tmp___3; err = -5; tmp___4 = pci_iomap(pdev, 0, 0UL); hw->hw_addr = (u8 *)tmp___4; if ((unsigned long )hw->hw_addr == (unsigned long )((u8 *)0U)) { goto err_ioremap; } else { } netdev->netdev_ops = & igb_netdev_ops; igb_set_ethtool_ops(netdev); netdev->watchdog_timeo = 1250; tmp___5 = pci_name((struct pci_dev const *)pdev); strncpy((char *)(& netdev->name), tmp___5, 15UL); netdev->mem_start = (unsigned long )pdev->resource[0].start; netdev->mem_end = (unsigned long )pdev->resource[0].end; hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; memcpy((void *)(& hw->mac.ops), (void const *)ei->mac_ops, 96UL); memcpy((void *)(& hw->phy.ops), (void const *)ei->phy_ops, 120UL); memcpy((void *)(& hw->nvm.ops), (void const *)ei->nvm_ops, 56UL); err = (*(ei->get_invariants))(hw); if (err != 0) { goto err_sw_init; } else { } err = igb_sw_init(adapter); if (err != 0) { goto err_sw_init; } else { } igb_get_bus_info_pcie(hw); hw->phy.autoneg_wait_to_complete = 0; if ((unsigned int )hw->phy.media_type == 1U) { hw->phy.mdix = 0U; hw->phy.disable_polarity_correction = 0; hw->phy.ms_type = 0; } else { } tmp___6 = igb_check_reset_block(hw); if (tmp___6 != 0) { _dev_info((struct device const *)(& pdev->dev), "PHY reset is blocked due to SOL/IDER session.\n"); } else { } netdev->features = netdev->features | 25770918291ULL; netdev->hw_features = netdev->hw_features | netdev->features; netdev->hw_features = netdev->hw_features | 274877906944ULL; netdev->features = netdev->features | 512ULL; netdev->vlan_features = netdev->vlan_features | 1114131ULL; netdev->priv_flags = netdev->priv_flags | 524288U; if (pci_using_dac != 0) { netdev->features = netdev->features | 32ULL; netdev->vlan_features = netdev->vlan_features | 32ULL; } else { } if ((unsigned int )hw->mac.type > 1U) { netdev->hw_features = netdev->hw_features | 1073741824ULL; netdev->features = netdev->features | 1073741824ULL; } else { } netdev->priv_flags = netdev->priv_flags | 131072U; tmp___7 = igb_enable_mng_pass_thru(hw); adapter->en_mng_pt = (u32 )tmp___7; (*(hw->mac.ops.reset_hw))(hw); switch ((unsigned int )hw->mac.type) { case 6U: ; case 7U: tmp___9 = igb_get_flash_presence_i210(hw); if ((int )tmp___9) { tmp___8 = (*(hw->nvm.ops.validate))(hw); if (tmp___8 < 0) { dev_err((struct device const *)(& pdev->dev), "The NVM Checksum Is Not Valid\n"); err = -5; goto err_eeprom; } else { } } else { } goto ldv_57270; default: tmp___10 = (*(hw->nvm.ops.validate))(hw); if (tmp___10 < 0) { dev_err((struct device const *)(& pdev->dev), "The NVM Checksum Is Not Valid\n"); err = -5; goto err_eeprom; } else { } goto ldv_57270; } ldv_57270: tmp___11 = (*(hw->mac.ops.read_mac_addr))(hw); if (tmp___11 != 0) { dev_err((struct device const *)(& pdev->dev), "NVM Read Error\n"); } else { } memcpy((void *)netdev->dev_addr, (void const *)(& hw->mac.addr), (size_t )netdev->addr_len); tmp___12 = is_valid_ether_addr((u8 const *)netdev->dev_addr); if (tmp___12) { tmp___13 = 0; } else { tmp___13 = 1; } if (tmp___13) { dev_err((struct device const *)(& pdev->dev), "Invalid MAC Address\n"); err = -5; goto err_eeprom; } else { } igb_set_fw_version(adapter); if ((unsigned int )hw->mac.type == 6U) { __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___14 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___14 == 0L) { writel(162U, (void volatile *)hw_addr + 9220U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___15 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___15 == 0L) { writel(67108884U, (void volatile *)hw_addr___0 + 13316U); } else { } } else { } reg_timer_13(& adapter->watchdog_timer, & igb_watchdog, (unsigned long )adapter); reg_timer_13(& adapter->phy_info_timer, & igb_update_phy_info, (unsigned long )adapter); __init_work(& adapter->reset_task, 0); __constr_expr_0.counter = 137438953408L; adapter->reset_task.data = __constr_expr_0; lockdep_init_map(& adapter->reset_task.lockdep_map, "(&adapter->reset_task)", & __key, 0); INIT_LIST_HEAD(& adapter->reset_task.entry); adapter->reset_task.func = & igb_reset_task; __init_work(& adapter->watchdog_task, 0); __constr_expr_1.counter = 137438953408L; adapter->watchdog_task.data = __constr_expr_1; lockdep_init_map(& adapter->watchdog_task.lockdep_map, "(&adapter->watchdog_task)", & __key___0, 0); INIT_LIST_HEAD(& adapter->watchdog_task.entry); adapter->watchdog_task.func = & igb_watchdog_task; adapter->fc_autoneg = 1; hw->mac.autoneg = 1; hw->phy.autoneg_advertised = 47U; hw->fc.requested_mode = 255; hw->fc.current_mode = 255; igb_validate_mdi_setting(hw); if ((unsigned int )hw->bus.func == 0U) { adapter->flags = adapter->flags | 256U; } else { } if ((unsigned int )hw->mac.type > 2U) { (*(hw->nvm.ops.read))(hw, (unsigned int )hw->bus.func != 0U ? (int )((unsigned int )((u16 )((int )hw->bus.func + 1)) * 64U + 36U) : 36, 1, & eeprom_data); } else if ((unsigned int )hw->bus.func == 1U) { (*(hw->nvm.ops.read))(hw, 20, 1, & eeprom_data); } else { } if (((int )eeprom_data & 1024) != 0) { adapter->flags = adapter->flags | 256U; } else { } switch ((int )pdev->device) { case 4310: adapter->flags = adapter->flags & 4294967039U; goto ldv_57283; case 4265: ; case 4326: ; case 4327: tmp___16 = igb_rd32(hw, 8U); if ((tmp___16 & 4U) != 0U) { adapter->flags = adapter->flags & 4294967039U; } else { } goto ldv_57283; case 4328: ; case 5414: ; if (global_quad_port_a != 0) { adapter->flags = adapter->flags & 4294967039U; } else { adapter->flags = adapter->flags | 4U; } global_quad_port_a = global_quad_port_a + 1; if (global_quad_port_a == 4) { global_quad_port_a = 0; } else { } goto ldv_57283; default: tmp___17 = device_can_wakeup(& (adapter->pdev)->dev); if (tmp___17) { tmp___18 = 0; } else { tmp___18 = 1; } if (tmp___18) { adapter->flags = adapter->flags & 4294967039U; } else { } } ldv_57283: ; if ((adapter->flags & 256U) != 0U) { adapter->wol = adapter->wol | 2U; } else { } if ((unsigned int )hw->mac.type == 4U && (unsigned int )pdev->subsystem_vendor == 4156U) { adapter->flags = adapter->flags | 256U; adapter->wol = 0U; } else { } device_set_wakeup_enable(& (adapter->pdev)->dev, (adapter->flags & 256U) != 0U); igb_reset(adapter); err = igb_init_i2c(adapter); if (err != 0) { dev_err((struct device const *)(& pdev->dev), "failed to init i2c interface\n"); goto err_eeprom; } else { } igb_get_hw_control(adapter); strcpy((char *)(& netdev->name), "eth%d"); err = ldv_register_netdev_30(netdev); if (err != 0) { goto err_register; } else { } netif_carrier_off(netdev); tmp___19 = dca_add_requester(& pdev->dev); if (tmp___19 == 0) { adapter->flags = adapter->flags | 2U; _dev_info((struct device const *)(& pdev->dev), "DCA enabled\n"); igb_setup_dca(adapter); } else { } if ((unsigned int )hw->mac.type == 4U && (unsigned int )hw->bus.func == 0U) { (*(hw->nvm.ops.read))(hw, 62, 1, & ets_word); if ((unsigned int )ets_word != 0U && (unsigned int )ets_word != 65535U) { adapter->ets = 1; } else { adapter->ets = 0; } tmp___20 = igb_sysfs_init(adapter); if (tmp___20 != 0) { dev_err((struct device const *)(& pdev->dev), "failed to allocate sysfs resources\n"); } else { } } else { adapter->ets = 0; } adapter->ei = *ei; if ((int )hw->dev_spec._82575.mas_capable) { igb_init_mas(adapter); } else { } igb_ptp_init(adapter); _dev_info((struct device const *)(& pdev->dev), "Intel(R) Gigabit Ethernet Network Connection\n"); if ((unsigned int )hw->mac.type != 5U) { _dev_info((struct device const *)(& pdev->dev), "%s: (PCIe:%s:%s) %pM\n", (char *)(& netdev->name), (unsigned int )hw->bus.speed != 6U ? ((unsigned int )hw->bus.speed == 7U ? (char *)"5.0Gb/s" : (char *)"unknown") : (char *)"2.5Gb/s", (unsigned int )hw->bus.width != 4U ? ((unsigned int )hw->bus.width != 2U ? ((unsigned int )hw->bus.width == 1U ? (char *)"Width x1" : (char *)"unknown") : (char *)"Width x2") : (char *)"Width x4", netdev->dev_addr); } else { } if ((unsigned int )hw->mac.type > 5U) { ret_val = igb_read_part_string(hw, (u8 *)(& part_str), 11U); } else { tmp___21 = igb_get_flash_presence_i210(hw); if ((int )tmp___21) { ret_val = igb_read_part_string(hw, (u8 *)(& part_str), 11U); } else { ret_val = -19; } } if (ret_val != 0) { strcpy((char *)(& part_str), "Unknown"); } else { } _dev_info((struct device const *)(& pdev->dev), "%s: PBA No: %s\n", (char *)(& netdev->name), (u8 *)(& part_str)); _dev_info((struct device const *)(& pdev->dev), "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", (adapter->flags & 8192U) == 0U ? ((int )adapter->flags & 1 ? (char *)"MSI" : (char *)"legacy") : (char *)"MSI-X", adapter->num_rx_queues, adapter->num_tx_queues); if ((unsigned int )hw->phy.media_type == 1U) { switch ((unsigned int )hw->mac.type) { case 4U: ; case 6U: ; case 7U: err = igb_set_eee_i350(hw, 1, 1); if (err == 0 && ! hw->dev_spec._82575.eee_disable) { adapter->eee_advert = 6U; adapter->flags = adapter->flags | 16384U; } else { } goto ldv_57295; case 5U: tmp___22 = igb_rd32(hw, 24U); if ((tmp___22 & 8388608U) != 0U) { err = igb_set_eee_i354(hw, 1, 1); if (err == 0 && ! hw->dev_spec._82575.eee_disable) { adapter->eee_advert = 6U; adapter->flags = adapter->flags | 16384U; } else { } } else { } goto ldv_57295; default: ; goto ldv_57295; } ldv_57295: ; } else { } pm_runtime_put_noidle(& pdev->dev); return (0); err_register: igb_release_hw_control(adapter); memset((void *)(& adapter->i2c_adap), 0, 1936UL); err_eeprom: tmp___23 = igb_check_reset_block(hw); if (tmp___23 == 0) { igb_reset_phy(hw); } else { } if ((unsigned long )hw->flash_address != (unsigned long )((u8 *)0U)) { iounmap((void volatile *)hw->flash_address); } else { } err_sw_init: igb_clear_interrupt_scheme(adapter); pci_iounmap(pdev, (void *)hw->hw_addr); err_ioremap: ldv_free_netdev_31(netdev); err_alloc_etherdev: tmp___24 = pci_select_bars(pdev, 512UL); pci_release_selected_regions(pdev, tmp___24); err_pci_reg: ; err_dma: pci_disable_device(pdev); return (err); } } static int igb_disable_sriov(struct pci_dev *pdev ) { struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; struct e1000_hw *hw ; int tmp___1 ; u8 *hw_addr ; u8 *__var ; long tmp___2 ; { tmp = pci_get_drvdata(pdev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; hw = & adapter->hw; if ((unsigned long )adapter->vf_data != (unsigned long )((struct vf_data_storage *)0)) { tmp___1 = pci_vfs_assigned(pdev); if (tmp___1 != 0) { dev_warn((struct device const *)(& pdev->dev), "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); return (-1); } else { pci_disable_sriov(pdev); msleep(500U); } kfree((void const *)adapter->vf_data); adapter->vf_data = (struct vf_data_storage *)0; adapter->vfs_allocated_count = 0U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(1U, (void volatile *)hw_addr + 23484U); } else { } igb_rd32(hw, 8U); msleep(100U); _dev_info((struct device const *)(& pdev->dev), "IOV Disabled\n"); adapter->flags = adapter->flags | 16U; } else { } return (0); } } static int igb_enable_sriov(struct pci_dev *pdev , int num_vfs ) { struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; int old_vfs ; int tmp___1 ; int err ; int i ; void *tmp___2 ; { tmp = pci_get_drvdata(pdev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; tmp___1 = pci_num_vf(pdev); old_vfs = tmp___1; err = 0; if ((adapter->flags & 8192U) == 0U || num_vfs > 7) { err = -1; goto out; } else { } if (num_vfs == 0) { goto out; } else { } if (old_vfs != 0) { _dev_info((struct device const *)(& pdev->dev), "%d pre-allocated VFs found - override max_vfs setting of %d\n", old_vfs, max_vfs); adapter->vfs_allocated_count = (unsigned int )old_vfs; } else { adapter->vfs_allocated_count = (unsigned int )num_vfs; } tmp___2 = kcalloc((size_t )adapter->vfs_allocated_count, 96UL, 208U); adapter->vf_data = (struct vf_data_storage *)tmp___2; if ((unsigned long )adapter->vf_data == (unsigned long )((struct vf_data_storage *)0)) { adapter->vfs_allocated_count = 0U; dev_err((struct device const *)(& pdev->dev), "Unable to allocate memory for VF Data Storage\n"); err = -12; goto out; } else { } if (old_vfs == 0) { err = pci_enable_sriov(pdev, (int )adapter->vfs_allocated_count); if (err != 0) { goto err_out; } else { } } else { } _dev_info((struct device const *)(& pdev->dev), "%d VFs allocated\n", adapter->vfs_allocated_count); i = 0; goto ldv_57319; ldv_57318: igb_vf_configure(adapter, i); i = i + 1; ldv_57319: ; if ((unsigned int )i < adapter->vfs_allocated_count) { goto ldv_57318; } else { } adapter->flags = adapter->flags & 4294967279U; goto out; err_out: kfree((void const *)adapter->vf_data); adapter->vf_data = (struct vf_data_storage *)0; adapter->vfs_allocated_count = 0U; out: ; return (err); } } static void igb_remove_i2c(struct igb_adapter *adapter ) { { i2c_del_adapter(& adapter->i2c_adap); return; } } static void igb_remove(struct pci_dev *pdev ) { struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; struct e1000_hw *hw ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; int tmp___2 ; { tmp = pci_get_drvdata(pdev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; hw = & adapter->hw; pm_runtime_get_noresume(& pdev->dev); igb_sysfs_exit(adapter); igb_remove_i2c(adapter); igb_ptp_stop(adapter); set_bit(2L, (unsigned long volatile *)(& adapter->state)); ldv_del_timer_sync_32(& adapter->watchdog_timer); ldv_del_timer_sync_33(& adapter->phy_info_timer); ldv_cancel_work_sync_34(& adapter->reset_task); ldv_cancel_work_sync_35(& adapter->watchdog_task); if ((adapter->flags & 2U) != 0U) { _dev_info((struct device const *)(& pdev->dev), "DCA disabled\n"); dca_remove_requester(& pdev->dev); adapter->flags = adapter->flags & 4294967293U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(1U, (void volatile *)hw_addr + 23412U); } else { } } else { } igb_release_hw_control(adapter); ldv_unregister_netdev_36(netdev); igb_clear_interrupt_scheme(adapter); igb_disable_sriov(pdev); pci_iounmap(pdev, (void *)hw->hw_addr); if ((unsigned long )hw->flash_address != (unsigned long )((u8 *)0U)) { iounmap((void volatile *)hw->flash_address); } else { } tmp___2 = pci_select_bars(pdev, 512UL); pci_release_selected_regions(pdev, tmp___2); kfree((void const *)adapter->shadow_vfta); ldv_free_netdev_37(netdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); return; } } static void igb_probe_vfs(struct igb_adapter *adapter ) { struct pci_dev *pdev ; struct e1000_hw *hw ; { pdev = adapter->pdev; hw = & adapter->hw; if ((unsigned int )hw->mac.type == 6U || (unsigned int )hw->mac.type == 7U) { return; } else { } pci_sriov_set_totalvfs(pdev, 7); igb_pci_enable_sriov(pdev, (int )max_vfs); return; } } static void igb_init_queue_configuration(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 max_rss_queues ; u32 __min1 ; u32 __min2 ; unsigned int tmp ; { hw = & adapter->hw; switch ((unsigned int )hw->mac.type) { case 7U: max_rss_queues = 2U; goto ldv_57344; case 1U: ; case 6U: max_rss_queues = 4U; goto ldv_57344; case 4U: ; if (adapter->vfs_allocated_count != 0U) { max_rss_queues = 1U; goto ldv_57344; } else { } case 2U: ; if (adapter->vfs_allocated_count != 0U) { max_rss_queues = 2U; goto ldv_57344; } else { } case 3U: ; case 5U: ; default: max_rss_queues = 8U; goto ldv_57344; } ldv_57344: __min1 = max_rss_queues; tmp = cpumask_weight(cpu_online_mask); __min2 = tmp; adapter->rss_queues = __min1 < __min2 ? __min1 : __min2; switch ((unsigned int )hw->mac.type) { case 1U: ; case 7U: ; goto ldv_57357; case 2U: ; if (adapter->rss_queues > 1U && adapter->vfs_allocated_count > 6U) { adapter->flags = adapter->flags | 8U; } else { } case 3U: ; case 4U: ; case 5U: ; case 6U: ; default: ; if (adapter->rss_queues > max_rss_queues / 2U) { adapter->flags = adapter->flags | 8U; } else { } goto ldv_57357; } ldv_57357: ; return; } } static int igb_sw_init(struct igb_adapter *adapter ) { struct e1000_hw *hw ; struct net_device *netdev ; struct pci_dev *pdev ; struct lock_class_key __key ; unsigned int tmp ; void *tmp___0 ; int tmp___1 ; { hw = & adapter->hw; netdev = adapter->netdev; pdev = adapter->pdev; pci_read_config_word((struct pci_dev const *)pdev, 4, & hw->bus.pci_cmd_word); adapter->tx_ring_count = 256U; adapter->rx_ring_count = 256U; adapter->rx_itr_setting = 3U; adapter->tx_itr_setting = 3U; adapter->tx_work_limit = 128U; adapter->max_frame_size = netdev->mtu + 22U; adapter->min_frame_size = 64U; spinlock_check(& adapter->stats64_lock); __raw_spin_lock_init(& adapter->stats64_lock.__annonCompField17.rlock, "&(&adapter->stats64_lock)->rlock", & __key); switch ((unsigned int )hw->mac.type) { case 2U: ; case 4U: ; if (max_vfs > 7U) { dev_warn((struct device const *)(& pdev->dev), "Maximum of 7 VFs per PF, using max\n"); tmp = 7U; adapter->vfs_allocated_count = tmp; max_vfs = tmp; } else { adapter->vfs_allocated_count = max_vfs; } if (adapter->vfs_allocated_count != 0U) { dev_warn((struct device const *)(& pdev->dev), "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); } else { } goto ldv_57373; default: ; goto ldv_57373; } ldv_57373: igb_init_queue_configuration(adapter); tmp___0 = kcalloc(128UL, 4UL, 32U); adapter->shadow_vfta = (u32 *)tmp___0; tmp___1 = igb_init_interrupt_scheme(adapter, 1); if (tmp___1 != 0) { dev_err((struct device const *)(& pdev->dev), "Unable to allocate memory for queues\n"); return (-12); } else { } igb_probe_vfs(adapter); igb_irq_disable(adapter); if ((unsigned int )hw->mac.type > 3U) { adapter->flags = adapter->flags & 4294967279U; } else { } set_bit(2L, (unsigned long volatile *)(& adapter->state)); return (0); } } static int __igb_open(struct net_device *netdev , bool resuming ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; struct pci_dev *pdev ; int err ; int i ; int __ret_warn_on ; long tmp___0 ; int tmp___1 ; u32 reg_data ; u32 tmp___2 ; u8 *hw_addr ; u8 *__var ; long tmp___3 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; pdev = adapter->pdev; tmp___1 = constant_test_bit(0L, (unsigned long const volatile *)(& adapter->state)); if (tmp___1 != 0) { __ret_warn_on = (int )resuming; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/igb_main.c", 3017); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (-16); } else { } if (! resuming) { pm_runtime_get_sync(& pdev->dev); } else { } netif_carrier_off(netdev); err = igb_setup_all_tx_resources(adapter); if (err != 0) { goto err_setup_tx; } else { } err = igb_setup_all_rx_resources(adapter); if (err != 0) { goto err_setup_rx; } else { } igb_power_up_link(adapter); igb_configure(adapter); err = igb_request_irq(adapter); if (err != 0) { goto err_req_irq; } else { } err = netif_set_real_num_tx_queues(adapter->netdev, (unsigned int )adapter->num_tx_queues); if (err != 0) { goto err_set_queues; } else { } err = netif_set_real_num_rx_queues(adapter->netdev, (unsigned int )adapter->num_rx_queues); if (err != 0) { goto err_set_queues; } else { } clear_bit(2L, (unsigned long volatile *)(& adapter->state)); i = 0; goto ldv_57391; ldv_57390: napi_enable(& (adapter->q_vector[i])->napi); i = i + 1; ldv_57391: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_57390; } else { } igb_rd32(hw, 192U); igb_irq_enable(adapter); if (adapter->vfs_allocated_count != 0U) { tmp___2 = igb_rd32(hw, 24U); reg_data = tmp___2; reg_data = reg_data | 16384U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(reg_data, (void volatile *)hw_addr + 24U); } else { } } else { } netif_tx_start_all_queues(netdev); if (! resuming) { pm_runtime_put(& pdev->dev); } else { } hw->mac.get_link_status = 1; schedule_work(& adapter->watchdog_task); return (0); err_set_queues: igb_free_irq(adapter); err_req_irq: igb_release_hw_control(adapter); igb_power_down_link(adapter); igb_free_all_rx_resources(adapter); err_setup_rx: igb_free_all_tx_resources(adapter); err_setup_tx: igb_reset(adapter); if (! resuming) { pm_runtime_put(& pdev->dev); } else { } return (err); } } static int igb_open(struct net_device *netdev ) { int tmp ; { tmp = __igb_open(netdev, 0); return (tmp); } } static int __igb_close(struct net_device *netdev , bool suspending ) { struct igb_adapter *adapter ; void *tmp ; struct pci_dev *pdev ; int __ret_warn_on ; int tmp___0 ; long tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; pdev = adapter->pdev; tmp___0 = constant_test_bit(1L, (unsigned long const volatile *)(& adapter->state)); __ret_warn_on = tmp___0 != 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/igb_main.c", 3127); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if (! suspending) { pm_runtime_get_sync(& pdev->dev); } else { } igb_down(adapter); igb_free_irq(adapter); igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); if (! suspending) { pm_runtime_put_sync(& pdev->dev); } else { } return (0); } } static int igb_close(struct net_device *netdev ) { int tmp ; { tmp = __igb_close(netdev, 0); return (tmp); } } int igb_setup_tx_resources(struct igb_ring *tx_ring ) { struct device *dev ; int size ; void *tmp ; { dev = tx_ring->dev; size = (int )((unsigned int )tx_ring->count * 48U); tmp = vzalloc((unsigned long )size); tx_ring->__annonCompField117.tx_buffer_info = (struct igb_tx_buffer *)tmp; if ((unsigned long )tx_ring->__annonCompField117.tx_buffer_info == (unsigned long )((struct igb_tx_buffer *)0)) { goto err; } else { } tx_ring->size = (unsigned int )tx_ring->count * 16U; tx_ring->size = (tx_ring->size + 4095U) & 4294963200U; tx_ring->desc = dma_alloc_attrs(dev, (size_t )tx_ring->size, & tx_ring->dma, 208U, (struct dma_attrs *)0); if ((unsigned long )tx_ring->desc == (unsigned long )((void *)0)) { goto err; } else { } tx_ring->next_to_use = 0U; tx_ring->next_to_clean = 0U; return (0); err: vfree((void const *)tx_ring->__annonCompField117.tx_buffer_info); tx_ring->__annonCompField117.tx_buffer_info = (struct igb_tx_buffer *)0; dev_err((struct device const *)dev, "Unable to allocate memory for the Tx descriptor ring\n"); return (-12); } } static int igb_setup_all_tx_resources(struct igb_adapter *adapter ) { struct pci_dev *pdev ; int i ; int err ; { pdev = adapter->pdev; err = 0; i = 0; goto ldv_57428; ldv_57427: err = igb_setup_tx_resources(adapter->tx_ring[i]); if (err != 0) { dev_err((struct device const *)(& pdev->dev), "Allocation for Tx Queue %u failed\n", i); i = i - 1; goto ldv_57424; ldv_57423: igb_free_tx_resources(adapter->tx_ring[i]); i = i - 1; ldv_57424: ; if (i >= 0) { goto ldv_57423; } else { } goto ldv_57426; } else { } i = i + 1; ldv_57428: ; if (adapter->num_tx_queues > i) { goto ldv_57427; } else { } ldv_57426: ; return (err); } } void igb_setup_tctl(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 tctl ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { hw = & adapter->hw; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(0U, (void volatile *)hw_addr + 14376U); } else { } tctl = igb_rd32(hw, 1024U); tctl = tctl & 4294963215U; tctl = tctl | 16777464U; igb_config_collision_dist(hw); tctl = tctl | 2U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(tctl, (void volatile *)hw_addr___0 + 1024U); } else { } return; } } void igb_configure_tx_ring(struct igb_adapter *adapter , struct igb_ring *ring ) { struct e1000_hw *hw ; u32 txdctl ; u64 tdba ; int reg_idx ; u8 *hw_addr ; u8 *__var ; long tmp ; unsigned long __ms ; unsigned long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___2 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___3 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___4 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___5 ; { hw = & adapter->hw; txdctl = 0U; tdba = ring->dma; reg_idx = (int )ring->reg_idx; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(0U, (void volatile *)hw_addr + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 14376 : reg_idx * 64 + 57384)); } else { } igb_rd32(hw, 8U); __ms = 10UL; goto ldv_57453; ldv_57452: __const_udelay(4295000UL); ldv_57453: tmp___0 = __ms; __ms = __ms - 1UL; if (tmp___0 != 0UL) { goto ldv_57452; } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel((unsigned int )ring->count * 16U, (void volatile *)hw_addr___0 + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 14344 : reg_idx * 64 + 57352)); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel((unsigned int )tdba, (void volatile *)hw_addr___1 + (unsigned long )(reg_idx <= 3 ? (reg_idx + 56) * 256 : (reg_idx + 896) * 64)); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel((unsigned int )(tdba >> 32), (void volatile *)hw_addr___2 + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 14340 : reg_idx * 64 + 57348)); } else { } ring->tail = (void *)hw->hw_addr + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 14360 : reg_idx * 64 + 57368); __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(0U, (void volatile *)hw_addr___3 + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 14352 : reg_idx * 64 + 57360)); } else { } writel(0U, (void volatile *)ring->tail); txdctl = ((unsigned int )hw->mac.type == 5U ? 20U : 8U) | txdctl; txdctl = txdctl | 256U; txdctl = ((unsigned int )hw->mac.type == 2U && (adapter->flags & 8192U) != 0U ? 65536U : 1048576U) | txdctl; txdctl = txdctl | 33554432U; __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(txdctl, (void volatile *)hw_addr___4 + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 14376 : reg_idx * 64 + 57384)); } else { } return; } } static void igb_configure_tx(struct igb_adapter *adapter ) { int i ; { i = 0; goto ldv_57475; ldv_57474: igb_configure_tx_ring(adapter, adapter->tx_ring[i]); i = i + 1; ldv_57475: ; if (adapter->num_tx_queues > i) { goto ldv_57474; } else { } return; } } int igb_setup_rx_resources(struct igb_ring *rx_ring ) { struct device *dev ; int size ; void *tmp ; { dev = rx_ring->dev; size = (int )((unsigned int )rx_ring->count * 24U); tmp = vzalloc((unsigned long )size); rx_ring->__annonCompField117.rx_buffer_info = (struct igb_rx_buffer *)tmp; if ((unsigned long )rx_ring->__annonCompField117.rx_buffer_info == (unsigned long )((struct igb_rx_buffer *)0)) { goto err; } else { } rx_ring->size = (unsigned int )rx_ring->count * 16U; rx_ring->size = (rx_ring->size + 4095U) & 4294963200U; rx_ring->desc = dma_alloc_attrs(dev, (size_t )rx_ring->size, & rx_ring->dma, 208U, (struct dma_attrs *)0); if ((unsigned long )rx_ring->desc == (unsigned long )((void *)0)) { goto err; } else { } rx_ring->next_to_alloc = 0U; rx_ring->next_to_clean = 0U; rx_ring->next_to_use = 0U; return (0); err: vfree((void const *)rx_ring->__annonCompField117.rx_buffer_info); rx_ring->__annonCompField117.rx_buffer_info = (struct igb_rx_buffer *)0; dev_err((struct device const *)dev, "Unable to allocate memory for the Rx descriptor ring\n"); return (-12); } } static int igb_setup_all_rx_resources(struct igb_adapter *adapter ) { struct pci_dev *pdev ; int i ; int err ; { pdev = adapter->pdev; err = 0; i = 0; goto ldv_57494; ldv_57493: err = igb_setup_rx_resources(adapter->rx_ring[i]); if (err != 0) { dev_err((struct device const *)(& pdev->dev), "Allocation for Rx Queue %u failed\n", i); i = i - 1; goto ldv_57490; ldv_57489: igb_free_rx_resources(adapter->rx_ring[i]); i = i - 1; ldv_57490: ; if (i >= 0) { goto ldv_57489; } else { } goto ldv_57492; } else { } i = i + 1; ldv_57494: ; if (adapter->num_rx_queues > i) { goto ldv_57493; } else { } ldv_57492: ; return (err); } } static void igb_setup_mrqc(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 mrqc ; u32 rxcsum ; u32 j ; u32 num_rx_queues ; u32 rss_key[10U] ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u32 vtctl ; u32 tmp___1 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___2 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___3 ; { hw = & adapter->hw; netdev_rss_key_fill((void *)(& rss_key), 40UL); j = 0U; goto ldv_57508; ldv_57507: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(rss_key[j], (void volatile *)hw_addr + (unsigned long )((j + 5920U) * 4U)); } else { } j = j + 1U; ldv_57508: ; if (j <= 9U) { goto ldv_57507; } else { } num_rx_queues = adapter->rss_queues; switch ((unsigned int )hw->mac.type) { case 2U: ; if (adapter->vfs_allocated_count != 0U) { num_rx_queues = 2U; } else { } goto ldv_57511; default: ; goto ldv_57511; } ldv_57511: ; if (adapter->rss_indir_tbl_init != num_rx_queues) { j = 0U; goto ldv_57514; ldv_57513: adapter->rss_indir_tbl[j] = (u8 )((j * num_rx_queues) / 128U); j = j + 1U; ldv_57514: ; if (j <= 127U) { goto ldv_57513; } else { } adapter->rss_indir_tbl_init = num_rx_queues; } else { } igb_write_rss_indir_tbl(adapter); rxcsum = igb_rd32(hw, 20480U); rxcsum = rxcsum | 8192U; if ((unsigned int )adapter->hw.mac.type > 1U) { rxcsum = rxcsum | 2048U; } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(rxcsum, (void volatile *)hw_addr___0 + 20480U); } else { } mrqc = 3604480U; if ((adapter->flags & 64U) != 0U) { mrqc = mrqc | 4194304U; } else { } if ((adapter->flags & 128U) != 0U) { mrqc = mrqc | 8388608U; } else { } if (adapter->vfs_allocated_count != 0U) { if ((unsigned int )hw->mac.type > 1U) { tmp___1 = igb_rd32(hw, 22556U); vtctl = tmp___1; vtctl = vtctl & 3758095487U; vtctl = (adapter->vfs_allocated_count << 7) | vtctl; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(vtctl, (void volatile *)hw_addr___1 + 22556U); } else { } } else { } if (adapter->rss_queues > 1U) { mrqc = mrqc | 5U; } else { mrqc = mrqc | 3U; } } else if ((unsigned int )hw->mac.type != 7U) { mrqc = mrqc | 2U; } else { } igb_vmm_control(adapter); __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(mrqc, (void volatile *)hw_addr___2 + 22552U); } else { } return; } } void igb_setup_rctl(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 rctl ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; { hw = & adapter->hw; rctl = igb_rd32(hw, 256U); rctl = rctl & 4294955007U; rctl = rctl & 4294967103U; rctl = ((hw->mac.mc_filter_type << 12) | rctl) | 32770U; rctl = rctl | 67108864U; rctl = rctl & 4294770683U; rctl = rctl | 32U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(0U, (void volatile *)hw_addr + 10280U); } else { } if (adapter->vfs_allocated_count != 0U) { __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(65535U, (void volatile *)hw_addr___0 + 9224U); } else { } } else { } if (((adapter->netdev)->features & 274877906944ULL) != 0ULL) { rctl = rctl | 8421380U; rctl = rctl & 4289986559U; } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(rctl, (void volatile *)hw_addr___1 + 256U); } else { } return; } } __inline static int igb_set_vf_rlpml(struct igb_adapter *adapter , int size , int vfn ) { struct e1000_hw *hw ; u32 vmolr ; u8 *hw_addr ; u8 *__var ; long tmp ; { hw = & adapter->hw; if ((unsigned int )vfn < adapter->vfs_allocated_count && (unsigned int )(adapter->vf_data + (unsigned long )vfn)->vlans_enabled != 0U) { size = size + 4; } else { } vmolr = igb_rd32(hw, (u32 )((vfn + 5812) * 4)); vmolr = vmolr & 4294950912U; vmolr = ((u32 )size | vmolr) | 65536U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(vmolr, (void volatile *)hw_addr + (unsigned long )((vfn + 5812) * 4)); } else { } return (0); } } static void igb_rlpml_set(struct igb_adapter *adapter ) { u32 max_frame_size ; struct e1000_hw *hw ; u16 pf_id ; u8 *hw_addr ; u8 *__var ; long tmp ; { max_frame_size = adapter->max_frame_size; hw = & adapter->hw; pf_id = (u16 )adapter->vfs_allocated_count; if ((unsigned int )pf_id != 0U) { igb_set_vf_rlpml(adapter, (int )max_frame_size, (int )pf_id); max_frame_size = 16128U; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(max_frame_size, (void volatile *)hw_addr + 20484U); } else { } return; } } __inline static void igb_set_vmolr(struct igb_adapter *adapter , int vfn , bool aupe ) { struct e1000_hw *hw ; u32 vmolr ; u32 dvmolr ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { hw = & adapter->hw; if ((unsigned int )hw->mac.type <= 1U) { return; } else { } vmolr = igb_rd32(hw, (u32 )((vfn + 5812) * 4)); vmolr = vmolr | 1073741824U; if ((unsigned int )hw->mac.type == 4U) { dvmolr = igb_rd32(hw, (u32 )(vfn * 64 + 49208)); dvmolr = dvmolr | 1073741824U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(dvmolr, (void volatile *)hw_addr + (unsigned long )(vfn * 64 + 49208)); } else { } } else { } if ((int )aupe) { vmolr = vmolr | 16777216U; } else { vmolr = vmolr & 4278190079U; } vmolr = vmolr & 4160618495U; if (adapter->rss_queues > 1U && (unsigned int )vfn == adapter->vfs_allocated_count) { vmolr = vmolr | 131072U; } else { } if ((unsigned int )vfn <= adapter->vfs_allocated_count) { vmolr = vmolr | 134217728U; } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(vmolr, (void volatile *)hw_addr___0 + (unsigned long )((vfn + 5812) * 4)); } else { } return; } } void igb_configure_rx_ring(struct igb_adapter *adapter , struct igb_ring *ring ) { struct e1000_hw *hw ; u64 rdba ; int reg_idx ; u32 srrctl ; u32 rxdctl ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___2 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___3 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___4 ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___5 ; { hw = & adapter->hw; rdba = ring->dma; reg_idx = (int )ring->reg_idx; srrctl = 0U; rxdctl = 0U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(0U, (void volatile *)hw_addr + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 10280 : reg_idx * 64 + 49192)); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel((unsigned int )rdba, (void volatile *)hw_addr___0 + (unsigned long )(reg_idx <= 3 ? (reg_idx + 40) * 256 : (reg_idx + 768) * 64)); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel((unsigned int )(rdba >> 32), (void volatile *)hw_addr___1 + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 10244 : reg_idx * 64 + 49156)); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel((unsigned int )ring->count * 16U, (void volatile *)hw_addr___2 + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 10248 : reg_idx * 64 + 49160)); } else { } ring->tail = (void *)hw->hw_addr + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 10264 : reg_idx * 64 + 49176); __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(0U, (void volatile *)hw_addr___3 + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 10256 : reg_idx * 64 + 49168)); } else { } writel(0U, (void volatile *)ring->tail); srrctl = 1024U; srrctl = srrctl | 2U; srrctl = srrctl | 33554432U; if ((unsigned int )hw->mac.type > 2U) { srrctl = srrctl | 1073741824U; } else { } if (adapter->vfs_allocated_count != 0U || adapter->num_rx_queues > 1) { srrctl = srrctl | 2147483648U; } else { } __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(srrctl, (void volatile *)hw_addr___4 + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 10252 : reg_idx * 64 + 49164)); } else { } igb_set_vmolr(adapter, reg_idx & 7, 1); rxdctl = ((unsigned int )hw->mac.type == 5U ? 12U : 8U) | rxdctl; rxdctl = rxdctl | 2048U; rxdctl = ((unsigned int )hw->mac.type == 2U && (adapter->flags & 8192U) != 0U ? 65536U : 262144U) | rxdctl; rxdctl = rxdctl | 33554432U; __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(rxdctl, (void volatile *)hw_addr___5 + (unsigned long )(reg_idx <= 3 ? reg_idx * 256 + 10280 : reg_idx * 64 + 49192)); } else { } return; } } static void igb_configure_rx(struct igb_adapter *adapter ) { int i ; { igb_set_uta(adapter); igb_rar_set_qsel(adapter, (u8 *)(& adapter->hw.mac.addr), 0U, (int )((u8 )adapter->vfs_allocated_count)); i = 0; goto ldv_57608; ldv_57607: igb_configure_rx_ring(adapter, adapter->rx_ring[i]); i = i + 1; ldv_57608: ; if (adapter->num_rx_queues > i) { goto ldv_57607; } else { } return; } } void igb_free_tx_resources(struct igb_ring *tx_ring ) { { igb_clean_tx_ring(tx_ring); vfree((void const *)tx_ring->__annonCompField117.tx_buffer_info); tx_ring->__annonCompField117.tx_buffer_info = (struct igb_tx_buffer *)0; if ((unsigned long )tx_ring->desc == (unsigned long )((void *)0)) { return; } else { } dma_free_attrs(tx_ring->dev, (size_t )tx_ring->size, tx_ring->desc, tx_ring->dma, (struct dma_attrs *)0); tx_ring->desc = (void *)0; return; } } static void igb_free_all_tx_resources(struct igb_adapter *adapter ) { int i ; { i = 0; goto ldv_57618; ldv_57617: ; if ((unsigned long )adapter->tx_ring[i] != (unsigned long )((struct igb_ring *)0)) { igb_free_tx_resources(adapter->tx_ring[i]); } else { } i = i + 1; ldv_57618: ; if (adapter->num_tx_queues > i) { goto ldv_57617; } else { } return; } } void igb_unmap_and_free_tx_resource(struct igb_ring *ring , struct igb_tx_buffer *tx_buffer ) { { if ((unsigned long )tx_buffer->skb != (unsigned long )((struct sk_buff *)0)) { dev_kfree_skb_any(tx_buffer->skb); if (tx_buffer->len != 0U) { dma_unmap_single_attrs(ring->dev, tx_buffer->dma, (size_t )tx_buffer->len, 1, (struct dma_attrs *)0); } else { } } else if (tx_buffer->len != 0U) { dma_unmap_page(ring->dev, tx_buffer->dma, (size_t )tx_buffer->len, 1); } else { } tx_buffer->next_to_watch = (union e1000_adv_tx_desc *)0; tx_buffer->skb = (struct sk_buff *)0; tx_buffer->len = 0U; return; } } static void igb_clean_tx_ring(struct igb_ring *tx_ring ) { struct igb_tx_buffer *buffer_info ; unsigned long size ; u16 i ; struct netdev_queue *tmp ; { if ((unsigned long )tx_ring->__annonCompField117.tx_buffer_info == (unsigned long )((struct igb_tx_buffer *)0)) { return; } else { } i = 0U; goto ldv_57631; ldv_57630: buffer_info = tx_ring->__annonCompField117.tx_buffer_info + (unsigned long )i; igb_unmap_and_free_tx_resource(tx_ring, buffer_info); i = (u16 )((int )i + 1); ldv_57631: ; if ((int )tx_ring->count > (int )i) { goto ldv_57630; } else { } tmp = txring_txq((struct igb_ring const *)tx_ring); netdev_tx_reset_queue(tmp); size = (unsigned long )tx_ring->count * 48UL; memset((void *)tx_ring->__annonCompField117.tx_buffer_info, 0, size); memset(tx_ring->desc, 0, (size_t )tx_ring->size); tx_ring->next_to_use = 0U; tx_ring->next_to_clean = 0U; return; } } static void igb_clean_all_tx_rings(struct igb_adapter *adapter ) { int i ; { i = 0; goto ldv_57638; ldv_57637: ; if ((unsigned long )adapter->tx_ring[i] != (unsigned long )((struct igb_ring *)0)) { igb_clean_tx_ring(adapter->tx_ring[i]); } else { } i = i + 1; ldv_57638: ; if (adapter->num_tx_queues > i) { goto ldv_57637; } else { } return; } } void igb_free_rx_resources(struct igb_ring *rx_ring ) { { igb_clean_rx_ring(rx_ring); vfree((void const *)rx_ring->__annonCompField117.rx_buffer_info); rx_ring->__annonCompField117.rx_buffer_info = (struct igb_rx_buffer *)0; if ((unsigned long )rx_ring->desc == (unsigned long )((void *)0)) { return; } else { } dma_free_attrs(rx_ring->dev, (size_t )rx_ring->size, rx_ring->desc, rx_ring->dma, (struct dma_attrs *)0); rx_ring->desc = (void *)0; return; } } static void igb_free_all_rx_resources(struct igb_adapter *adapter ) { int i ; { i = 0; goto ldv_57648; ldv_57647: ; if ((unsigned long )adapter->rx_ring[i] != (unsigned long )((struct igb_ring *)0)) { igb_free_rx_resources(adapter->rx_ring[i]); } else { } i = i + 1; ldv_57648: ; if (adapter->num_rx_queues > i) { goto ldv_57647; } else { } return; } } static void igb_clean_rx_ring(struct igb_ring *rx_ring ) { unsigned long size ; u16 i ; struct igb_rx_buffer *buffer_info ; { if ((unsigned long )rx_ring->__annonCompField120.__annonCompField119.skb != (unsigned long )((struct sk_buff *)0)) { consume_skb(rx_ring->__annonCompField120.__annonCompField119.skb); } else { } rx_ring->__annonCompField120.__annonCompField119.skb = (struct sk_buff *)0; if ((unsigned long )rx_ring->__annonCompField117.rx_buffer_info == (unsigned long )((struct igb_rx_buffer *)0)) { return; } else { } i = 0U; goto ldv_57658; ldv_57657: buffer_info = rx_ring->__annonCompField117.rx_buffer_info + (unsigned long )i; if ((unsigned long )buffer_info->page == (unsigned long )((struct page *)0)) { goto ldv_57656; } else { } dma_unmap_page(rx_ring->dev, buffer_info->dma, 4096UL, 2); __free_pages(buffer_info->page, 0U); buffer_info->page = (struct page *)0; ldv_57656: i = (u16 )((int )i + 1); ldv_57658: ; if ((int )rx_ring->count > (int )i) { goto ldv_57657; } else { } size = (unsigned long )rx_ring->count * 24UL; memset((void *)rx_ring->__annonCompField117.rx_buffer_info, 0, size); memset(rx_ring->desc, 0, (size_t )rx_ring->size); rx_ring->next_to_alloc = 0U; rx_ring->next_to_clean = 0U; rx_ring->next_to_use = 0U; return; } } static void igb_clean_all_rx_rings(struct igb_adapter *adapter ) { int i ; { i = 0; goto ldv_57665; ldv_57664: ; if ((unsigned long )adapter->rx_ring[i] != (unsigned long )((struct igb_ring *)0)) { igb_clean_rx_ring(adapter->rx_ring[i]); } else { } i = i + 1; ldv_57665: ; if (adapter->num_rx_queues > i) { goto ldv_57664; } else { } return; } } static int igb_set_mac(struct net_device *netdev , void *p ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; struct sockaddr *addr ; bool tmp___0 ; int tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; addr = (struct sockaddr *)p; tmp___0 = is_valid_ether_addr((u8 const *)(& addr->sa_data)); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (-99); } else { } memcpy((void *)netdev->dev_addr, (void const *)(& addr->sa_data), (size_t )netdev->addr_len); memcpy((void *)(& hw->mac.addr), (void const *)(& addr->sa_data), (size_t )netdev->addr_len); igb_rar_set_qsel(adapter, (u8 *)(& hw->mac.addr), 0U, (int )((u8 )adapter->vfs_allocated_count)); return (0); } } static int igb_write_mc_addr_list(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; struct netdev_hw_addr *ha ; u8 *mta_list ; int i ; void *tmp___0 ; struct list_head const *__mptr ; int tmp___1 ; struct list_head const *__mptr___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; if (netdev->mc.count == 0) { igb_update_mc_addr_list(hw, (u8 *)0U, 0U); igb_restore_vf_multicasts(adapter); return (0); } else { } tmp___0 = kzalloc((size_t )(netdev->mc.count * 6), 32U); mta_list = (u8 *)tmp___0; if ((unsigned long )mta_list == (unsigned long )((u8 *)0U)) { return (-12); } else { } i = 0; __mptr = (struct list_head const *)netdev->mc.list.next; ha = (struct netdev_hw_addr *)__mptr; goto ldv_57687; ldv_57686: tmp___1 = i; i = i + 1; memcpy((void *)mta_list + (unsigned long )(tmp___1 * 6), (void const *)(& ha->addr), 6UL); __mptr___0 = (struct list_head const *)ha->list.next; ha = (struct netdev_hw_addr *)__mptr___0; ldv_57687: ; if ((unsigned long )(& ha->list) != (unsigned long )(& netdev->mc.list)) { goto ldv_57686; } else { } igb_update_mc_addr_list(hw, mta_list, (u32 )i); kfree((void const *)mta_list); return (netdev->mc.count); } } static int igb_write_uc_addr_list(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; unsigned int vfn ; unsigned int rar_entries ; int count ; struct netdev_hw_addr *ha ; struct list_head const *__mptr ; unsigned int tmp___0 ; struct list_head const *__mptr___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; vfn = adapter->vfs_allocated_count; rar_entries = ((unsigned int )hw->mac.rar_entry_count - vfn) - 1U; count = 0; if ((unsigned int )netdev->uc.count > rar_entries) { return (-12); } else { } if (netdev->uc.count != 0 && rar_entries != 0U) { __mptr = (struct list_head const *)netdev->uc.list.next; ha = (struct netdev_hw_addr *)__mptr; goto ldv_57704; ldv_57703: ; if (rar_entries == 0U) { goto ldv_57702; } else { } tmp___0 = rar_entries; rar_entries = rar_entries - 1U; igb_rar_set_qsel(adapter, (u8 *)(& ha->addr), tmp___0, (int )((u8 )vfn)); count = count + 1; __mptr___0 = (struct list_head const *)ha->list.next; ha = (struct netdev_hw_addr *)__mptr___0; ldv_57704: ; if ((unsigned long )(& ha->list) != (unsigned long )(& netdev->uc.list)) { goto ldv_57703; } else { } ldv_57702: ; } else { } goto ldv_57712; ldv_57711: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(0U, (void volatile *)hw_addr + (unsigned long )(rar_entries <= 15U ? rar_entries * 8U + 21508U : (rar_entries + 536870896U) * 8U + 21732U)); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(0U, (void volatile *)hw_addr___0 + (unsigned long )(rar_entries <= 15U ? (rar_entries + 2688U) * 8U : (rar_entries + 2700U) * 8U)); } else { } rar_entries = rar_entries - 1U; ldv_57712: ; if (rar_entries != 0U) { goto ldv_57711; } else { } igb_rd32(hw, 8U); return (count); } } static void igb_set_rx_mode(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; unsigned int vfn ; u32 rctl ; u32 vmolr ; int count ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u32 tmp___1 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; vfn = adapter->vfs_allocated_count; vmolr = 0U; rctl = igb_rd32(hw, 256U); rctl = rctl & 4294705127U; if ((netdev->flags & 256U) != 0U) { if (adapter->vfs_allocated_count != 0U) { rctl = rctl | 262144U; } else { } rctl = rctl | 24U; vmolr = vmolr | 335544320U; } else { if ((netdev->flags & 512U) != 0U) { rctl = rctl | 16U; vmolr = vmolr | 268435456U; } else { count = igb_write_mc_addr_list(netdev); if (count < 0) { rctl = rctl | 16U; vmolr = vmolr | 268435456U; } else if (count != 0) { vmolr = vmolr | 33554432U; } else { } } count = igb_write_uc_addr_list(netdev); if (count < 0) { rctl = rctl | 8U; vmolr = vmolr | 67108864U; } else { } rctl = rctl | 262144U; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(rctl, (void volatile *)hw_addr + 256U); } else { } if ((unsigned int )hw->mac.type <= 1U || (unsigned int )hw->mac.type > 4U) { return; } else { } tmp___1 = igb_rd32(hw, (vfn + 5812U) * 4U); vmolr = (tmp___1 & 3925868543U) | vmolr; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(vmolr, (void volatile *)hw_addr___0 + (unsigned long )((vfn + 5812U) * 4U)); } else { } igb_restore_vf_multicasts(adapter); return; } } static void igb_check_wvbr(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 wvbr ; { hw = & adapter->hw; wvbr = 0U; switch ((unsigned int )hw->mac.type) { case 2U: ; case 4U: wvbr = igb_rd32(hw, 13652U); if (wvbr == 0U) { return; } else { } goto ldv_57736; default: ; goto ldv_57736; } ldv_57736: adapter->wvbr = adapter->wvbr | wvbr; return; } } static void igb_spoof_check(struct igb_adapter *adapter ) { int j ; { if (adapter->wvbr == 0U) { return; } else { } j = 0; goto ldv_57743; ldv_57742: ; if ((adapter->wvbr & (u32 )(1 << j)) != 0U || (adapter->wvbr & (u32 )(1 << (j + 8))) != 0U) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "Spoof event(s) detected on VF %d\n", j); adapter->wvbr = adapter->wvbr & (u32 )(~ ((1 << j) | (1 << (j + 8)))); } else { } j = j + 1; ldv_57743: ; if ((unsigned int )j < adapter->vfs_allocated_count) { goto ldv_57742; } else { } return; } } static void igb_update_phy_info(unsigned long data ) { struct igb_adapter *adapter ; { adapter = (struct igb_adapter *)data; igb_get_phy_info(& adapter->hw); return; } } bool igb_has_link(struct igb_adapter *adapter ) { struct e1000_hw *hw ; bool link_active ; bool tmp ; int tmp___0 ; { hw = & adapter->hw; link_active = 0; switch ((unsigned int )hw->phy.media_type) { case 1U: ; if (! hw->mac.get_link_status) { return (1); } else { } case 3U: (*(hw->mac.ops.check_for_link))(hw); link_active = (bool )(! ((int )hw->mac.get_link_status != 0)); goto ldv_57756; default: ; case 0U: ; goto ldv_57756; } ldv_57756: ; if (((unsigned int )hw->mac.type == 6U || (unsigned int )hw->mac.type == 7U) && hw->phy.id == 21040128U) { tmp = netif_carrier_ok((struct net_device const *)adapter->netdev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { adapter->flags = adapter->flags & 4294966783U; } else if ((adapter->flags & 512U) == 0U) { adapter->flags = adapter->flags | 512U; adapter->link_check_timeout = jiffies; } else { } } else { } return (link_active); } } static bool igb_thermal_sensor_event(struct e1000_hw *hw , u32 event ) { bool ret ; u32 ctrl_ext ; u32 thstat ; { ret = 0; if ((unsigned int )hw->mac.type == 4U) { thstat = igb_rd32(hw, 33040U); ctrl_ext = igb_rd32(hw, 24U); if ((unsigned int )hw->phy.media_type == 1U && (ctrl_ext & 8388608U) == 0U) { ret = (thstat & event) != 0U; } else { } } else { } return (ret); } } static void igb_check_lvmmc(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 lvmmc ; int tmp ; long tmp___0 ; { hw = & adapter->hw; lvmmc = igb_rd32(hw, 13640U); if (lvmmc != 0U) { tmp = net_ratelimit(); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { netdev_warn((struct net_device const *)adapter->netdev, "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", lvmmc); } else { } } else { } return; } } static void igb_watchdog(unsigned long data ) { struct igb_adapter *adapter ; { adapter = (struct igb_adapter *)data; schedule_work(& adapter->watchdog_task); return; } } static void igb_watchdog_task(struct work_struct *work ) { struct igb_adapter *adapter ; struct work_struct const *__mptr ; struct e1000_hw *hw ; struct e1000_phy_info *phy ; struct net_device *netdev ; u32 link ; int i ; u32 connsw ; bool tmp ; u32 ctrl ; bool tmp___0 ; unsigned long tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; bool tmp___5 ; unsigned long tmp___6 ; int tmp___7 ; bool tmp___8 ; int tmp___9 ; bool tmp___10 ; struct igb_ring *tx_ring ; int tmp___11 ; bool tmp___12 ; int tmp___13 ; u32 eics ; u8 *hw_addr ; u8 *__var ; long tmp___14 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___15 ; unsigned long tmp___16 ; unsigned long tmp___17 ; int tmp___18 ; { __mptr = (struct work_struct const *)work; adapter = (struct igb_adapter *)__mptr + 0xfffffffffffffb08UL; hw = & adapter->hw; phy = & hw->phy; netdev = adapter->netdev; tmp = igb_has_link(adapter); link = (u32 )tmp; if ((adapter->flags & 512U) != 0U) { if ((long )((adapter->link_check_timeout - (unsigned long )jiffies) + 250UL) < 0L) { adapter->flags = adapter->flags & 4294966783U; } else { link = 0U; } } else { } if ((adapter->flags & 4096U) != 0U) { if ((unsigned int )hw->phy.media_type == 1U) { connsw = igb_rd32(hw, 52U); if ((connsw & 1U) == 0U) { link = 0U; } else { } } else { } } else { } if (link != 0U) { if ((int )hw->dev_spec._82575.media_changed) { hw->dev_spec._82575.media_changed = 0; adapter->flags = adapter->flags | 1024U; igb_reset(adapter); } else { } pm_runtime_resume(netdev->dev.parent); tmp___3 = netif_carrier_ok((struct net_device const *)netdev); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { (*(hw->mac.ops.get_speed_and_duplex))(hw, & adapter->link_speed, & adapter->link_duplex); ctrl = igb_rd32(hw, 0U); netdev_info((struct net_device const *)netdev, "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", (char *)(& netdev->name), (int )adapter->link_speed, (unsigned int )adapter->link_duplex == 2U ? (char *)"Full" : (char *)"Half", (ctrl & 268435456U) == 0U || (ctrl & 134217728U) == 0U ? ((ctrl & 134217728U) == 0U ? ((ctrl & 268435456U) != 0U ? (char *)"TX" : (char *)"None") : (char *)"RX") : (char *)"RX/TX"); if ((adapter->flags & 16384U) != 0U && (unsigned int )adapter->link_duplex == 1U) { _dev_info((struct device const *)(& (adapter->pdev)->dev), "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); adapter->hw.dev_spec._82575.eee_disable = 1; adapter->flags = adapter->flags & 4294950911U; } else { } igb_check_downshift(hw); if ((int )phy->speed_downgraded) { netdev_warn((struct net_device const *)netdev, "Link Speed was downgraded by SmartSpeed\n"); } else { } tmp___0 = igb_thermal_sensor_event(hw, 2U); if ((int )tmp___0) { netdev_info((struct net_device const *)netdev, "The network adapter link speed was downshifted because it overheated\n"); } else { } adapter->tx_timeout_factor = 1U; switch ((int )adapter->link_speed) { case 10: adapter->tx_timeout_factor = 14U; goto ldv_57795; case 100: ; goto ldv_57795; } ldv_57795: netif_carrier_on(netdev); igb_ping_all_vfs(adapter); igb_check_vf_rate_limit(adapter); tmp___2 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___2 == 0) { tmp___1 = round_jiffies((unsigned long )jiffies + 500UL); ldv_mod_timer_38(& adapter->phy_info_timer, tmp___1); } else { } } else { } } else { tmp___10 = netif_carrier_ok((struct net_device const *)netdev); if ((int )tmp___10) { adapter->link_speed = 0U; adapter->link_duplex = 0U; tmp___5 = igb_thermal_sensor_event(hw, 1U); if ((int )tmp___5) { netdev_err((struct net_device const *)netdev, "The network adapter was stopped because it overheated\n"); } else { } netdev_info((struct net_device const *)netdev, "igb: %s NIC Link is Down\n", (char *)(& netdev->name)); netif_carrier_off(netdev); igb_ping_all_vfs(adapter); tmp___7 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___7 == 0) { tmp___6 = round_jiffies((unsigned long )jiffies + 500UL); ldv_mod_timer_39(& adapter->phy_info_timer, tmp___6); } else { } if ((adapter->flags & 4096U) != 0U) { igb_check_swap_media(adapter); if ((adapter->flags & 1024U) != 0U) { schedule_work(& adapter->reset_task); return; } else { } } else { } pm_schedule_suspend(netdev->dev.parent, 5000U); } else { tmp___8 = netif_carrier_ok((struct net_device const *)netdev); if (tmp___8) { tmp___9 = 0; } else { tmp___9 = 1; } if (tmp___9 && (adapter->flags & 4096U) != 0U) { igb_check_swap_media(adapter); if ((adapter->flags & 1024U) != 0U) { schedule_work(& adapter->reset_task); return; } else { } } else { } } } spin_lock(& adapter->stats64_lock); igb_update_stats(adapter, & adapter->stats64); spin_unlock(& adapter->stats64_lock); i = 0; goto ldv_57799; ldv_57798: tx_ring = adapter->tx_ring[i]; tmp___12 = netif_carrier_ok((struct net_device const *)netdev); if (tmp___12) { tmp___13 = 0; } else { tmp___13 = 1; } if (tmp___13) { tmp___11 = igb_desc_unused(tx_ring); if (tmp___11 + 1 < (int )tx_ring->count) { adapter->tx_timeout_count = adapter->tx_timeout_count + 1U; schedule_work(& adapter->reset_task); return; } else { } } else { } set_bit(3L, (unsigned long volatile *)(& tx_ring->flags)); i = i + 1; ldv_57799: ; if (adapter->num_tx_queues > i) { goto ldv_57798; } else { } if ((adapter->flags & 8192U) != 0U) { eics = 0U; i = 0; goto ldv_57803; ldv_57802: eics = (adapter->q_vector[i])->eims_value | eics; i = i + 1; ldv_57803: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_57802; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___14 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___14 == 0L) { writel(eics, (void volatile *)hw_addr + 5408U); } else { } } else { __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___15 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___15 == 0L) { writel(16U, (void volatile *)hw_addr___0 + 200U); } else { } } igb_spoof_check(adapter); igb_ptp_rx_hang(adapter); if ((unsigned int )adapter->hw.mac.type == 4U || (unsigned int )adapter->hw.mac.type == 5U) { igb_check_lvmmc(adapter); } else { } tmp___18 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___18 == 0) { if ((adapter->flags & 512U) != 0U) { tmp___16 = round_jiffies((unsigned long )jiffies + 250UL); ldv_mod_timer_40(& adapter->watchdog_timer, tmp___16); } else { tmp___17 = round_jiffies((unsigned long )jiffies + 500UL); ldv_mod_timer_41(& adapter->watchdog_timer, tmp___17); } } else { } return; } } static void igb_update_ring_itr(struct igb_q_vector *q_vector ) { int new_val ; int avg_wire_size ; struct igb_adapter *adapter ; unsigned int packets ; u32 __max1 ; u32 __max2 ; int _min1 ; int _min2 ; { new_val = (int )q_vector->itr_val; avg_wire_size = 0; adapter = q_vector->adapter; if ((unsigned int )adapter->link_speed != 1000U) { new_val = 980; goto set_itr_val; } else { } packets = q_vector->rx.total_packets; if (packets != 0U) { avg_wire_size = (int )(q_vector->rx.total_bytes / packets); } else { } packets = q_vector->tx.total_packets; if (packets != 0U) { __max1 = (u32 )avg_wire_size; __max2 = q_vector->tx.total_bytes / packets; avg_wire_size = (int )(__max1 > __max2 ? __max1 : __max2); } else { } if (avg_wire_size == 0) { goto clear_counts; } else { } avg_wire_size = avg_wire_size + 24; _min1 = avg_wire_size; _min2 = 3000; avg_wire_size = _min1 < _min2 ? _min1 : _min2; if (avg_wire_size > 300 && avg_wire_size <= 1199) { new_val = avg_wire_size / 3; } else { new_val = avg_wire_size / 2; } if (new_val <= 195 && (((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0) && adapter->rx_itr_setting == 3U) || ((unsigned long )q_vector->rx.ring == (unsigned long )((struct igb_ring *)0) && adapter->tx_itr_setting == 3U))) { new_val = 196; } else { } set_itr_val: ; if ((int )q_vector->itr_val != new_val) { q_vector->itr_val = (u16 )new_val; q_vector->set_itr = 1U; } else { } clear_counts: q_vector->rx.total_bytes = 0U; q_vector->rx.total_packets = 0U; q_vector->tx.total_bytes = 0U; q_vector->tx.total_packets = 0U; return; } } static void igb_update_itr(struct igb_q_vector *q_vector , struct igb_ring_container *ring_container ) { unsigned int packets ; unsigned int bytes ; u8 itrval ; { packets = ring_container->total_packets; bytes = ring_container->total_bytes; itrval = ring_container->itr; if (packets == 0U) { return; } else { } switch ((int )itrval) { case 0: ; if (bytes / packets > 8000U) { itrval = 2U; } else if (packets <= 4U && bytes > 512U) { itrval = 1U; } else { } goto ldv_57839; case 1: ; if (bytes > 10000U) { if (bytes / packets > 8000U) { itrval = 2U; } else if (packets <= 9U || bytes / packets > 1200U) { itrval = 2U; } else if (packets > 35U) { itrval = 0U; } else { } } else if (bytes / packets > 2000U) { itrval = 2U; } else if (packets <= 2U && bytes <= 511U) { itrval = 0U; } else { } goto ldv_57839; case 2: ; if (bytes > 25000U) { if (packets > 35U) { itrval = 1U; } else { } } else if (bytes <= 1499U) { itrval = 1U; } else { } goto ldv_57839; } ldv_57839: ring_container->total_bytes = 0U; ring_container->total_packets = 0U; ring_container->itr = itrval; return; } } static void igb_set_itr(struct igb_q_vector *q_vector ) { struct igb_adapter *adapter ; u32 new_itr ; u8 current_itr ; u8 _max1 ; u8 _max2 ; u32 _max1___0 ; u32 _max2___0 ; { adapter = q_vector->adapter; new_itr = (u32 )q_vector->itr_val; current_itr = 0U; if ((unsigned int )adapter->link_speed != 1000U) { current_itr = 0U; new_itr = 980U; goto set_itr_now; } else { } igb_update_itr(q_vector, & q_vector->tx); igb_update_itr(q_vector, & q_vector->rx); _max1 = q_vector->rx.itr; _max2 = q_vector->tx.itr; current_itr = (u8 )((int )_max1 > (int )_max2 ? _max1 : _max2); if ((unsigned int )current_itr == 0U && (((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0) && adapter->rx_itr_setting == 3U) || ((unsigned long )q_vector->rx.ring == (unsigned long )((struct igb_ring *)0) && adapter->tx_itr_setting == 3U))) { current_itr = 1U; } else { } switch ((int )current_itr) { case 0: new_itr = 56U; goto ldv_57853; case 1: new_itr = 196U; goto ldv_57853; case 2: new_itr = 980U; goto ldv_57853; default: ; goto ldv_57853; } ldv_57853: ; set_itr_now: ; if ((u32 )q_vector->itr_val != new_itr) { if ((u32 )q_vector->itr_val < new_itr) { _max1___0 = ((u32 )q_vector->itr_val * new_itr) / ((u32 )((int )q_vector->itr_val >> 2) + new_itr); _max2___0 = new_itr; new_itr = _max1___0 > _max2___0 ? _max1___0 : _max2___0; } else { new_itr = new_itr; } q_vector->itr_val = (u16 )new_itr; q_vector->set_itr = 1U; } else { } return; } } static void igb_tx_ctxtdesc(struct igb_ring *tx_ring , u32 vlan_macip_lens , u32 type_tucmd , u32 mss_l4len_idx ) { struct e1000_adv_tx_context_desc *context_desc ; u16 i ; int tmp ; { i = tx_ring->next_to_use; context_desc = (struct e1000_adv_tx_context_desc *)tx_ring->desc + (unsigned long )i; i = (u16 )((int )i + 1); tx_ring->next_to_use = (int )tx_ring->count > (int )i ? i : 0U; type_tucmd = type_tucmd | 538968064U; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& tx_ring->flags)); if (tmp != 0) { mss_l4len_idx = (u32 )((int )tx_ring->reg_idx << 4) | mss_l4len_idx; } else { } context_desc->vlan_macip_lens = vlan_macip_lens; context_desc->seqnum_seed = 0U; context_desc->type_tucmd_mlhl = type_tucmd; context_desc->mss_l4len_idx = mss_l4len_idx; return; } } static int igb_tso(struct igb_ring *tx_ring , struct igb_tx_buffer *first , u8 *hdr_len ) { struct sk_buff *skb ; u32 vlan_macip_lens ; u32 type_tucmd ; u32 mss_l4len_idx ; u32 l4len ; int err ; bool tmp ; int tmp___0 ; struct iphdr *iph ; struct iphdr *tmp___1 ; struct tcphdr *tmp___2 ; __sum16 tmp___3 ; struct ipv6hdr *tmp___4 ; struct tcphdr *tmp___5 ; struct ipv6hdr *tmp___6 ; struct ipv6hdr *tmp___7 ; __sum16 tmp___8 ; bool tmp___9 ; int tmp___10 ; unsigned char *tmp___11 ; unsigned char *tmp___12 ; int tmp___13 ; { skb = first->skb; if ((unsigned int )*((unsigned char *)skb + 145UL) != 6U) { return (0); } else { } tmp = skb_is_gso((struct sk_buff const *)skb); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } err = skb_cow_head(skb, 0U); if (err < 0) { return (err); } else { } type_tucmd = 2048U; if ((unsigned int )first->protocol == 8U) { tmp___1 = ip_hdr((struct sk_buff const *)skb); iph = tmp___1; iph->tot_len = 0U; iph->check = 0U; tmp___2 = tcp_hdr((struct sk_buff const *)skb); tmp___3 = csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 6, 0U); tmp___2->check = ~ ((int )tmp___3); type_tucmd = type_tucmd | 1024U; first->tx_flags = first->tx_flags | 50U; } else { tmp___9 = skb_is_gso_v6((struct sk_buff const *)skb); if ((int )tmp___9) { tmp___4 = ipv6_hdr((struct sk_buff const *)skb); tmp___4->payload_len = 0U; tmp___5 = tcp_hdr((struct sk_buff const *)skb); tmp___6 = ipv6_hdr((struct sk_buff const *)skb); tmp___7 = ipv6_hdr((struct sk_buff const *)skb); tmp___8 = csum_ipv6_magic((struct in6_addr const *)(& tmp___7->saddr), (struct in6_addr const *)(& tmp___6->daddr), 0U, 6, 0U); tmp___5->check = ~ ((int )tmp___8); first->tx_flags = first->tx_flags | 34U; } else { } } l4len = tcp_hdrlen((struct sk_buff const *)skb); tmp___10 = skb_transport_offset((struct sk_buff const *)skb); *hdr_len = (int )((u8 )tmp___10) + (int )((u8 )l4len); tmp___11 = skb_end_pointer((struct sk_buff const *)skb); first->gso_segs = ((struct skb_shared_info *)tmp___11)->gso_segs; first->bytecount = first->bytecount + (unsigned int )(((int )first->gso_segs + -1) * (int )*hdr_len); mss_l4len_idx = l4len << 8; tmp___12 = skb_end_pointer((struct sk_buff const *)skb); mss_l4len_idx = (u32 )((int )((struct skb_shared_info *)tmp___12)->gso_size << 16) | mss_l4len_idx; vlan_macip_lens = skb_network_header_len((struct sk_buff const *)skb); tmp___13 = skb_network_offset((struct sk_buff const *)skb); vlan_macip_lens = (u32 )(tmp___13 << 9) | vlan_macip_lens; vlan_macip_lens = (first->tx_flags & 4294901760U) | vlan_macip_lens; igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); return (1); } } static void igb_tx_csum(struct igb_ring *tx_ring , struct igb_tx_buffer *first ) { struct sk_buff *skb ; u32 vlan_macip_lens ; u32 mss_l4len_idx ; u32 type_tucmd ; u8 l4_hdr ; u32 tmp ; struct iphdr *tmp___0 ; u32 tmp___1 ; struct ipv6hdr *tmp___2 ; int tmp___3 ; long tmp___4 ; unsigned int tmp___5 ; int tmp___6 ; long tmp___7 ; int tmp___8 ; { skb = first->skb; vlan_macip_lens = 0U; mss_l4len_idx = 0U; type_tucmd = 0U; if ((unsigned int )*((unsigned char *)skb + 145UL) != 6U) { if ((first->tx_flags & 1U) == 0U) { return; } else { } } else { l4_hdr = 0U; switch ((int )first->protocol) { case 8: tmp = skb_network_header_len((struct sk_buff const *)skb); vlan_macip_lens = tmp | vlan_macip_lens; type_tucmd = type_tucmd | 1024U; tmp___0 = ip_hdr((struct sk_buff const *)skb); l4_hdr = tmp___0->protocol; goto ldv_57890; case 56710: tmp___1 = skb_network_header_len((struct sk_buff const *)skb); vlan_macip_lens = tmp___1 | vlan_macip_lens; tmp___2 = ipv6_hdr((struct sk_buff const *)skb); l4_hdr = tmp___2->nexthdr; goto ldv_57890; default: tmp___3 = net_ratelimit(); tmp___4 = ldv__builtin_expect(tmp___3 != 0, 0L); if (tmp___4 != 0L) { dev_warn((struct device const *)tx_ring->dev, "partial checksum but proto=%x!\n", (int )first->protocol); } else { } goto ldv_57890; } ldv_57890: ; switch ((int )l4_hdr) { case 6: type_tucmd = type_tucmd | 2048U; tmp___5 = tcp_hdrlen((struct sk_buff const *)skb); mss_l4len_idx = tmp___5 << 8; goto ldv_57894; case 132: type_tucmd = type_tucmd | 4096U; mss_l4len_idx = 3072U; goto ldv_57894; case 17: mss_l4len_idx = 2048U; goto ldv_57894; default: tmp___6 = net_ratelimit(); tmp___7 = ldv__builtin_expect(tmp___6 != 0, 0L); if (tmp___7 != 0L) { dev_warn((struct device const *)tx_ring->dev, "partial checksum but l4 proto=%x!\n", (int )l4_hdr); } else { } goto ldv_57894; } ldv_57894: first->tx_flags = first->tx_flags | 32U; } tmp___8 = skb_network_offset((struct sk_buff const *)skb); vlan_macip_lens = (u32 )(tmp___8 << 9) | vlan_macip_lens; vlan_macip_lens = (first->tx_flags & 4294901760U) | vlan_macip_lens; igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); return; } } static u32 igb_tx_cmd_type(struct sk_buff *skb , u32 tx_flags ) { u32 cmd_type ; { cmd_type = 573571072U; cmd_type = (tx_flags & 1U) * 1073741824U | cmd_type; cmd_type = (tx_flags & 2U) * 1073741824U | cmd_type; cmd_type = (tx_flags & 4U) * 131072U | cmd_type; cmd_type = (unsigned int )skb->no_fcs * 33554432U ^ cmd_type; return (cmd_type); } } static void igb_tx_olinfo_status(struct igb_ring *tx_ring , union e1000_adv_tx_desc *tx_desc , u32 tx_flags , unsigned int paylen ) { u32 olinfo_status ; int tmp ; { olinfo_status = paylen << 14; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& tx_ring->flags)); if (tmp != 0) { olinfo_status = (u32 )((int )tx_ring->reg_idx << 4) | olinfo_status; } else { } olinfo_status = (tx_flags & 32U) * 16U | olinfo_status; olinfo_status = (tx_flags & 16U) * 16U | olinfo_status; tx_desc->read.olinfo_status = olinfo_status; return; } } static int __igb_maybe_stop_tx(struct igb_ring *tx_ring , u16 const size ) { struct net_device *netdev ; int tmp ; { netdev = tx_ring->netdev; netif_stop_subqueue(netdev, (int )tx_ring->queue_index); __asm__ volatile ("mfence": : : "memory"); tmp = igb_desc_unused(tx_ring); if (tmp < (int )size) { return (-16); } else { } netif_wake_subqueue(netdev, (int )tx_ring->queue_index); u64_stats_init(& tx_ring->__annonCompField120.__annonCompField118.tx_syncp2); tx_ring->__annonCompField120.__annonCompField118.tx_stats.restart_queue2 = tx_ring->__annonCompField120.__annonCompField118.tx_stats.restart_queue2 + 1ULL; u64_stats_init(& tx_ring->__annonCompField120.__annonCompField118.tx_syncp2); return (0); } } __inline static int igb_maybe_stop_tx(struct igb_ring *tx_ring , u16 const size ) { int tmp ; int tmp___0 ; { tmp = igb_desc_unused(tx_ring); if (tmp >= (int )size) { return (0); } else { } tmp___0 = __igb_maybe_stop_tx(tx_ring, (int )size); return (tmp___0); } } static void igb_tx_map(struct igb_ring *tx_ring , struct igb_tx_buffer *first , u8 const hdr_len ) { struct sk_buff *skb ; struct igb_tx_buffer *tx_buffer ; union e1000_adv_tx_desc *tx_desc ; struct skb_frag_struct *frag ; dma_addr_t dma ; unsigned int data_len ; unsigned int size ; u32 tx_flags ; u32 cmd_type ; u32 tmp ; u16 i ; unsigned char *tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; struct netdev_queue *tmp___4 ; struct netdev_queue *tmp___5 ; bool tmp___6 ; { skb = first->skb; tx_flags = first->tx_flags; tmp = igb_tx_cmd_type(skb, tx_flags); cmd_type = tmp; i = tx_ring->next_to_use; tx_desc = (union e1000_adv_tx_desc *)tx_ring->desc + (unsigned long )i; igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - (unsigned int )hdr_len); size = skb_headlen((struct sk_buff const *)skb); data_len = skb->data_len; dma = dma_map_single_attrs(tx_ring->dev, (void *)skb->data, (size_t )size, 1, (struct dma_attrs *)0); tx_buffer = first; tmp___0 = skb_end_pointer((struct sk_buff const *)skb); frag = (struct skb_frag_struct *)(& ((struct skb_shared_info *)tmp___0)->frags); ldv_57939: tmp___1 = dma_mapping_error(tx_ring->dev, dma); if (tmp___1 != 0) { goto dma_error; } else { } tx_buffer->len = size; tx_buffer->dma = dma; tx_desc->read.buffer_addr = dma; goto ldv_57936; ldv_57935: tx_desc->read.cmd_type_len = cmd_type ^ 32768U; i = (u16 )((int )i + 1); tx_desc = tx_desc + 1; if ((int )tx_ring->count == (int )i) { tx_desc = (union e1000_adv_tx_desc *)tx_ring->desc; i = 0U; } else { } tx_desc->read.olinfo_status = 0U; dma = dma + 32768ULL; size = size - 32768U; tx_desc->read.buffer_addr = dma; ldv_57936: tmp___2 = ldv__builtin_expect(size > 32768U, 0L); if (tmp___2 != 0L) { goto ldv_57935; } else { } tmp___3 = ldv__builtin_expect(data_len == 0U, 1L); if (tmp___3 != 0L) { goto ldv_57938; } else { } tx_desc->read.cmd_type_len = cmd_type ^ size; i = (u16 )((int )i + 1); tx_desc = tx_desc + 1; if ((int )tx_ring->count == (int )i) { tx_desc = (union e1000_adv_tx_desc *)tx_ring->desc; i = 0U; } else { } tx_desc->read.olinfo_status = 0U; size = skb_frag_size((skb_frag_t const *)frag); data_len = data_len - size; dma = skb_frag_dma_map(tx_ring->dev, (skb_frag_t const *)frag, 0UL, (size_t )size, 1); tx_buffer = tx_ring->__annonCompField117.tx_buffer_info + (unsigned long )i; frag = frag + 1; goto ldv_57939; ldv_57938: cmd_type = (size | cmd_type) | 150994944U; tx_desc->read.cmd_type_len = cmd_type; tmp___4 = txring_txq((struct igb_ring const *)tx_ring); netdev_tx_sent_queue(tmp___4, first->bytecount); first->time_stamp = jiffies; __asm__ volatile ("sfence": : : "memory"); first->next_to_watch = tx_desc; i = (u16 )((int )i + 1); if ((int )tx_ring->count == (int )i) { i = 0U; } else { } tx_ring->next_to_use = i; igb_maybe_stop_tx(tx_ring, 21); tmp___5 = txring_txq((struct igb_ring const *)tx_ring); tmp___6 = netif_xmit_stopped((struct netdev_queue const *)tmp___5); if ((int )tmp___6 || (unsigned int )*((unsigned char *)skb + 142UL) == 0U) { writel((unsigned int )i, (void volatile *)tx_ring->tail); __asm__ volatile ("": : : "memory"); } else { } return; dma_error: dev_err((struct device const *)tx_ring->dev, "TX DMA map failed\n"); ldv_57941: tx_buffer = tx_ring->__annonCompField117.tx_buffer_info + (unsigned long )i; igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); if ((unsigned long )tx_buffer == (unsigned long )first) { goto ldv_57940; } else { } if ((unsigned int )i == 0U) { i = tx_ring->count; } else { } i = (u16 )((int )i - 1); goto ldv_57941; ldv_57940: tx_ring->next_to_use = i; return; } } netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb , struct igb_ring *tx_ring ) { struct igb_tx_buffer *first ; int tso ; u32 tx_flags ; unsigned short f ; u16 count ; unsigned int tmp ; __be16 protocol ; __be16 tmp___0 ; u8 hdr_len ; unsigned char *tmp___1 ; unsigned char *tmp___2 ; int tmp___3 ; struct igb_adapter *adapter ; void *tmp___4 ; unsigned char *tmp___5 ; unsigned char *tmp___6 ; int tmp___7 ; unsigned char *tmp___8 ; long tmp___9 ; { tx_flags = 0U; tmp = skb_headlen((struct sk_buff const *)skb); count = (u16 )((tmp + 32767U) / 32768U); tmp___0 = vlan_get_protocol(skb); protocol = tmp___0; hdr_len = 0U; f = 0U; goto ldv_57954; ldv_57953: tmp___1 = skb_end_pointer((struct sk_buff const *)skb); count = (int )((u16 )((((struct skb_shared_info *)tmp___1)->frags[(int )f].size + 32767U) / 32768U)) + (int )count; f = (unsigned short )((int )f + 1); ldv_57954: tmp___2 = skb_end_pointer((struct sk_buff const *)skb); if ((int )((unsigned short )((struct skb_shared_info *)tmp___2)->nr_frags) > (int )f) { goto ldv_57953; } else { } tmp___3 = igb_maybe_stop_tx(tx_ring, (int )((unsigned int )((u16 const )count) + 3U)); if (tmp___3 != 0) { return (16); } else { } first = tx_ring->__annonCompField117.tx_buffer_info + (unsigned long )tx_ring->next_to_use; first->skb = skb; first->bytecount = skb->len; first->gso_segs = 1U; tmp___8 = skb_end_pointer((struct sk_buff const *)skb); tmp___9 = ldv__builtin_expect((long )((struct skb_shared_info *)tmp___8)->tx_flags & 1L, 0L); if (tmp___9 != 0L) { tmp___4 = netdev_priv((struct net_device const *)tx_ring->netdev); adapter = (struct igb_adapter *)tmp___4; tmp___7 = test_and_set_bit_lock(3L, (unsigned long volatile *)(& adapter->state)); if (tmp___7 == 0) { tmp___5 = skb_end_pointer((struct sk_buff const *)skb); tmp___6 = skb_end_pointer((struct sk_buff const *)skb); ((struct skb_shared_info *)tmp___5)->tx_flags = (__u8 )((unsigned int )((struct skb_shared_info *)tmp___6)->tx_flags | 4U); tx_flags = tx_flags | 4U; adapter->ptp_tx_skb = skb_get(skb); adapter->ptp_tx_start = jiffies; if ((unsigned int )adapter->hw.mac.type == 2U) { schedule_work(& adapter->ptp_tx_work); } else { } } else { } } else { } skb_tx_timestamp(skb); if (((int )skb->vlan_tci & 4096) != 0) { tx_flags = tx_flags | 1U; tx_flags = (u32 )(((int )skb->vlan_tci & -4097) << 16) | tx_flags; } else { } first->tx_flags = tx_flags; first->protocol = protocol; tso = igb_tso(tx_ring, first, & hdr_len); if (tso < 0) { goto out_drop; } else if (tso == 0) { igb_tx_csum(tx_ring, first); } else { } igb_tx_map(tx_ring, first, (int )hdr_len); return (0); out_drop: igb_unmap_and_free_tx_resource(tx_ring, first); return (0); } } __inline static struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter , struct sk_buff *skb ) { unsigned int r_idx ; { r_idx = (unsigned int )skb->queue_mapping; if ((unsigned int )adapter->num_tx_queues <= r_idx) { r_idx = r_idx % (unsigned int )adapter->num_tx_queues; } else { } return (adapter->tx_ring[r_idx]); } } static netdev_tx_t igb_xmit_frame(struct sk_buff *skb , struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; int tmp___0 ; int tmp___1 ; struct igb_ring *tmp___2 ; netdev_tx_t tmp___3 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { dev_kfree_skb_any(skb); return (0); } else { } if (skb->len == 0U) { dev_kfree_skb_any(skb); return (0); } else { } tmp___1 = skb_put_padto(skb, 17U); if (tmp___1 != 0) { return (0); } else { } tmp___2 = igb_tx_queue_mapping(adapter, skb); tmp___3 = igb_xmit_frame_ring(skb, tmp___2); return (tmp___3); } } static void igb_tx_timeout(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; adapter->tx_timeout_count = adapter->tx_timeout_count + 1U; if ((unsigned int )hw->mac.type > 2U) { hw->dev_spec._82575.global_device_reset = 1; } else { } schedule_work(& adapter->reset_task); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(adapter->eims_enable_mask & ~ adapter->eims_other, (void volatile *)hw_addr + 5408U); } else { } return; } } static void igb_reset_task(struct work_struct *work ) { struct igb_adapter *adapter ; struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)work; adapter = (struct igb_adapter *)__mptr + 0xfffffffffffffb58UL; igb_dump(adapter); netdev_err((struct net_device const *)adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); return; } } static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev , struct rtnl_link_stats64 *stats ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; spin_lock(& adapter->stats64_lock); igb_update_stats(adapter, & adapter->stats64); memcpy((void *)stats, (void const *)(& adapter->stats64), 184UL); spin_unlock(& adapter->stats64_lock); return (stats); } } static int igb_change_mtu(struct net_device *netdev , int new_mtu ) { struct igb_adapter *adapter ; void *tmp ; struct pci_dev *pdev ; int max_frame ; int tmp___0 ; bool tmp___1 ; bool tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; pdev = adapter->pdev; max_frame = new_mtu + 22; if (new_mtu <= 67 || max_frame > 16128) { dev_err((struct device const *)(& pdev->dev), "Invalid MTU setting\n"); return (-22); } else { } if (max_frame > 9238) { dev_err((struct device const *)(& pdev->dev), "MTU > 9216 not supported.\n"); return (-22); } else { } if (max_frame <= 1517) { max_frame = 1518; } else { } goto ldv_57995; ldv_57994: usleep_range(1000UL, 2000UL); ldv_57995: tmp___0 = test_and_set_bit(1L, (unsigned long volatile *)(& adapter->state)); if (tmp___0 != 0) { goto ldv_57994; } else { } adapter->max_frame_size = (u32 )max_frame; tmp___1 = netif_running((struct net_device const *)netdev); if ((int )tmp___1) { igb_down(adapter); } else { } _dev_info((struct device const *)(& pdev->dev), "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = (unsigned int )new_mtu; tmp___2 = netif_running((struct net_device const *)netdev); if ((int )tmp___2) { igb_up(adapter); } else { igb_reset(adapter); } clear_bit(1L, (unsigned long volatile *)(& adapter->state)); return (0); } } void igb_update_stats(struct igb_adapter *adapter , struct rtnl_link_stats64 *net_stats ) { struct e1000_hw *hw ; struct pci_dev *pdev ; u32 reg ; u32 mpc ; int i ; u64 bytes ; u64 packets ; unsigned int start ; u64 _bytes ; u64 _packets ; int tmp ; struct igb_ring *ring ; u32 rqdpc ; u32 tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; bool tmp___2 ; struct igb_ring *ring___0 ; bool tmp___3 ; u32 tmp___4 ; u32 tmp___5 ; u32 tmp___6 ; u32 tmp___7 ; u32 tmp___8 ; u32 tmp___9 ; u32 tmp___10 ; u32 tmp___11 ; u32 tmp___12 ; u32 tmp___13 ; u32 tmp___14 ; u32 tmp___15 ; u32 tmp___16 ; u32 tmp___17 ; u32 tmp___18 ; u32 tmp___19 ; u32 tmp___20 ; u32 tmp___21 ; u32 tmp___22 ; u32 tmp___23 ; u32 tmp___24 ; u32 tmp___25 ; u32 tmp___26 ; u32 tmp___27 ; u32 tmp___28 ; u32 tmp___29 ; u32 tmp___30 ; u32 tmp___31 ; u32 tmp___32 ; u32 tmp___33 ; u32 tmp___34 ; u32 tmp___35 ; u32 tmp___36 ; u32 tmp___37 ; u32 tmp___38 ; u32 tmp___39 ; u32 tmp___40 ; u32 tmp___41 ; u32 tmp___42 ; u32 tmp___43 ; u32 tmp___44 ; u32 tmp___45 ; u32 tmp___46 ; u32 tmp___47 ; u32 tmp___48 ; u32 tmp___49 ; u32 tmp___50 ; u32 tmp___51 ; u32 tmp___52 ; u32 tmp___53 ; u32 tmp___54 ; u32 tmp___55 ; u32 tmp___56 ; u32 tmp___57 ; u32 tmp___58 ; u32 tmp___59 ; u32 tmp___60 ; u32 tmp___61 ; u32 tmp___62 ; u32 tmp___63 ; u32 tmp___64 ; u32 tmp___65 ; u32 tmp___66 ; u32 tmp___67 ; u32 tmp___68 ; { hw = & adapter->hw; pdev = adapter->pdev; if ((unsigned int )adapter->link_speed == 0U) { return; } else { } tmp = pci_channel_offline(pdev); if (tmp != 0) { return; } else { } bytes = 0ULL; packets = 0ULL; rcu_read_lock(); i = 0; goto ldv_58019; ldv_58018: ring = adapter->rx_ring[i]; tmp___0 = igb_rd32(hw, (u32 )(i * 64 + 49200)); rqdpc = tmp___0; if ((unsigned int )hw->mac.type > 5U) { __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(0U, (void volatile *)hw_addr + (unsigned long )(i * 64 + 49200)); } else { } } else { } if (rqdpc != 0U) { ring->__annonCompField120.__annonCompField119.rx_stats.drops = ring->__annonCompField120.__annonCompField119.rx_stats.drops + (u64 )rqdpc; net_stats->rx_fifo_errors = net_stats->rx_fifo_errors + (__u64 )rqdpc; } else { } ldv_58016: start = u64_stats_fetch_begin_irq((struct u64_stats_sync const *)(& ring->__annonCompField120.__annonCompField119.rx_syncp)); _bytes = ring->__annonCompField120.__annonCompField119.rx_stats.bytes; _packets = ring->__annonCompField120.__annonCompField119.rx_stats.packets; tmp___2 = u64_stats_fetch_retry_irq((struct u64_stats_sync const *)(& ring->__annonCompField120.__annonCompField119.rx_syncp), start); if ((int )tmp___2) { goto ldv_58016; } else { } bytes = bytes + _bytes; packets = packets + _packets; i = i + 1; ldv_58019: ; if (adapter->num_rx_queues > i) { goto ldv_58018; } else { } net_stats->rx_bytes = bytes; net_stats->rx_packets = packets; bytes = 0ULL; packets = 0ULL; i = 0; goto ldv_58025; ldv_58024: ring___0 = adapter->tx_ring[i]; ldv_58022: start = u64_stats_fetch_begin_irq((struct u64_stats_sync const *)(& ring___0->__annonCompField120.__annonCompField118.tx_syncp)); _bytes = ring___0->__annonCompField120.__annonCompField118.tx_stats.bytes; _packets = ring___0->__annonCompField120.__annonCompField118.tx_stats.packets; tmp___3 = u64_stats_fetch_retry_irq((struct u64_stats_sync const *)(& ring___0->__annonCompField120.__annonCompField118.tx_syncp), start); if ((int )tmp___3) { goto ldv_58022; } else { } bytes = bytes + _bytes; packets = packets + _packets; i = i + 1; ldv_58025: ; if (adapter->num_tx_queues > i) { goto ldv_58024; } else { } net_stats->tx_bytes = bytes; net_stats->tx_packets = packets; rcu_read_unlock(); tmp___4 = igb_rd32(hw, 16384U); adapter->stats.crcerrs = adapter->stats.crcerrs + (u64 )tmp___4; tmp___5 = igb_rd32(hw, 16500U); adapter->stats.gprc = adapter->stats.gprc + (u64 )tmp___5; tmp___6 = igb_rd32(hw, 16520U); adapter->stats.gorc = adapter->stats.gorc + (u64 )tmp___6; igb_rd32(hw, 16524U); tmp___7 = igb_rd32(hw, 16504U); adapter->stats.bprc = adapter->stats.bprc + (u64 )tmp___7; tmp___8 = igb_rd32(hw, 16508U); adapter->stats.mprc = adapter->stats.mprc + (u64 )tmp___8; tmp___9 = igb_rd32(hw, 16556U); adapter->stats.roc = adapter->stats.roc + (u64 )tmp___9; tmp___10 = igb_rd32(hw, 16476U); adapter->stats.prc64 = adapter->stats.prc64 + (u64 )tmp___10; tmp___11 = igb_rd32(hw, 16480U); adapter->stats.prc127 = adapter->stats.prc127 + (u64 )tmp___11; tmp___12 = igb_rd32(hw, 16484U); adapter->stats.prc255 = adapter->stats.prc255 + (u64 )tmp___12; tmp___13 = igb_rd32(hw, 16488U); adapter->stats.prc511 = adapter->stats.prc511 + (u64 )tmp___13; tmp___14 = igb_rd32(hw, 16492U); adapter->stats.prc1023 = adapter->stats.prc1023 + (u64 )tmp___14; tmp___15 = igb_rd32(hw, 16496U); adapter->stats.prc1522 = adapter->stats.prc1522 + (u64 )tmp___15; tmp___16 = igb_rd32(hw, 16392U); adapter->stats.symerrs = adapter->stats.symerrs + (u64 )tmp___16; tmp___17 = igb_rd32(hw, 16440U); adapter->stats.sec = adapter->stats.sec + (u64 )tmp___17; mpc = igb_rd32(hw, 16400U); adapter->stats.mpc = adapter->stats.mpc + (u64 )mpc; net_stats->rx_fifo_errors = net_stats->rx_fifo_errors + (__u64 )mpc; tmp___18 = igb_rd32(hw, 16404U); adapter->stats.scc = adapter->stats.scc + (u64 )tmp___18; tmp___19 = igb_rd32(hw, 16408U); adapter->stats.ecol = adapter->stats.ecol + (u64 )tmp___19; tmp___20 = igb_rd32(hw, 16412U); adapter->stats.mcc = adapter->stats.mcc + (u64 )tmp___20; tmp___21 = igb_rd32(hw, 16416U); adapter->stats.latecol = adapter->stats.latecol + (u64 )tmp___21; tmp___22 = igb_rd32(hw, 16432U); adapter->stats.dc = adapter->stats.dc + (u64 )tmp___22; tmp___23 = igb_rd32(hw, 16448U); adapter->stats.rlec = adapter->stats.rlec + (u64 )tmp___23; tmp___24 = igb_rd32(hw, 16456U); adapter->stats.xonrxc = adapter->stats.xonrxc + (u64 )tmp___24; tmp___25 = igb_rd32(hw, 16460U); adapter->stats.xontxc = adapter->stats.xontxc + (u64 )tmp___25; tmp___26 = igb_rd32(hw, 16464U); adapter->stats.xoffrxc = adapter->stats.xoffrxc + (u64 )tmp___26; tmp___27 = igb_rd32(hw, 16468U); adapter->stats.xofftxc = adapter->stats.xofftxc + (u64 )tmp___27; tmp___28 = igb_rd32(hw, 16472U); adapter->stats.fcruc = adapter->stats.fcruc + (u64 )tmp___28; tmp___29 = igb_rd32(hw, 16512U); adapter->stats.gptc = adapter->stats.gptc + (u64 )tmp___29; tmp___30 = igb_rd32(hw, 16528U); adapter->stats.gotc = adapter->stats.gotc + (u64 )tmp___30; igb_rd32(hw, 16532U); tmp___31 = igb_rd32(hw, 16544U); adapter->stats.rnbc = adapter->stats.rnbc + (u64 )tmp___31; tmp___32 = igb_rd32(hw, 16548U); adapter->stats.ruc = adapter->stats.ruc + (u64 )tmp___32; tmp___33 = igb_rd32(hw, 16552U); adapter->stats.rfc = adapter->stats.rfc + (u64 )tmp___33; tmp___34 = igb_rd32(hw, 16560U); adapter->stats.rjc = adapter->stats.rjc + (u64 )tmp___34; tmp___35 = igb_rd32(hw, 16580U); adapter->stats.tor = adapter->stats.tor + (u64 )tmp___35; tmp___36 = igb_rd32(hw, 16588U); adapter->stats.tot = adapter->stats.tot + (u64 )tmp___36; tmp___37 = igb_rd32(hw, 16592U); adapter->stats.tpr = adapter->stats.tpr + (u64 )tmp___37; tmp___38 = igb_rd32(hw, 16600U); adapter->stats.ptc64 = adapter->stats.ptc64 + (u64 )tmp___38; tmp___39 = igb_rd32(hw, 16604U); adapter->stats.ptc127 = adapter->stats.ptc127 + (u64 )tmp___39; tmp___40 = igb_rd32(hw, 16608U); adapter->stats.ptc255 = adapter->stats.ptc255 + (u64 )tmp___40; tmp___41 = igb_rd32(hw, 16612U); adapter->stats.ptc511 = adapter->stats.ptc511 + (u64 )tmp___41; tmp___42 = igb_rd32(hw, 16616U); adapter->stats.ptc1023 = adapter->stats.ptc1023 + (u64 )tmp___42; tmp___43 = igb_rd32(hw, 16620U); adapter->stats.ptc1522 = adapter->stats.ptc1522 + (u64 )tmp___43; tmp___44 = igb_rd32(hw, 16624U); adapter->stats.mptc = adapter->stats.mptc + (u64 )tmp___44; tmp___45 = igb_rd32(hw, 16628U); adapter->stats.bptc = adapter->stats.bptc + (u64 )tmp___45; tmp___46 = igb_rd32(hw, 16596U); adapter->stats.tpt = adapter->stats.tpt + (u64 )tmp___46; tmp___47 = igb_rd32(hw, 16424U); adapter->stats.colc = adapter->stats.colc + (u64 )tmp___47; tmp___48 = igb_rd32(hw, 16388U); adapter->stats.algnerrc = adapter->stats.algnerrc + (u64 )tmp___48; reg = igb_rd32(hw, 24U); if ((reg & 12582912U) == 0U) { tmp___49 = igb_rd32(hw, 16396U); adapter->stats.rxerrc = adapter->stats.rxerrc + (u64 )tmp___49; if ((unsigned int )hw->mac.type != 6U && (unsigned int )hw->mac.type != 7U) { tmp___50 = igb_rd32(hw, 16436U); adapter->stats.tncrs = adapter->stats.tncrs + (u64 )tmp___50; } else { } } else { } tmp___51 = igb_rd32(hw, 16632U); adapter->stats.tsctc = adapter->stats.tsctc + (u64 )tmp___51; tmp___52 = igb_rd32(hw, 16636U); adapter->stats.tsctfc = adapter->stats.tsctfc + (u64 )tmp___52; tmp___53 = igb_rd32(hw, 16640U); adapter->stats.iac = adapter->stats.iac + (u64 )tmp___53; tmp___54 = igb_rd32(hw, 16676U); adapter->stats.icrxoc = adapter->stats.icrxoc + (u64 )tmp___54; tmp___55 = igb_rd32(hw, 16644U); adapter->stats.icrxptc = adapter->stats.icrxptc + (u64 )tmp___55; tmp___56 = igb_rd32(hw, 16648U); adapter->stats.icrxatc = adapter->stats.icrxatc + (u64 )tmp___56; tmp___57 = igb_rd32(hw, 16652U); adapter->stats.ictxptc = adapter->stats.ictxptc + (u64 )tmp___57; tmp___58 = igb_rd32(hw, 16656U); adapter->stats.ictxatc = adapter->stats.ictxatc + (u64 )tmp___58; tmp___59 = igb_rd32(hw, 16664U); adapter->stats.ictxqec = adapter->stats.ictxqec + (u64 )tmp___59; tmp___60 = igb_rd32(hw, 16668U); adapter->stats.ictxqmtc = adapter->stats.ictxqmtc + (u64 )tmp___60; tmp___61 = igb_rd32(hw, 16672U); adapter->stats.icrxdmtc = adapter->stats.icrxdmtc + (u64 )tmp___61; net_stats->multicast = adapter->stats.mprc; net_stats->collisions = adapter->stats.colc; net_stats->rx_errors = ((((adapter->stats.rxerrc + adapter->stats.crcerrs) + adapter->stats.algnerrc) + adapter->stats.ruc) + adapter->stats.roc) + adapter->stats.cexterr; net_stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; net_stats->rx_crc_errors = adapter->stats.crcerrs; net_stats->rx_frame_errors = adapter->stats.algnerrc; net_stats->rx_missed_errors = adapter->stats.mpc; net_stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; net_stats->tx_aborted_errors = adapter->stats.ecol; net_stats->tx_window_errors = adapter->stats.latecol; net_stats->tx_carrier_errors = adapter->stats.tncrs; tmp___62 = igb_rd32(hw, 16572U); adapter->stats.mgptc = adapter->stats.mgptc + (u64 )tmp___62; tmp___63 = igb_rd32(hw, 16564U); adapter->stats.mgprc = adapter->stats.mgprc + (u64 )tmp___63; tmp___64 = igb_rd32(hw, 16568U); adapter->stats.mgpdc = adapter->stats.mgpdc + (u64 )tmp___64; reg = igb_rd32(hw, 22560U); if ((reg & 268435456U) != 0U) { tmp___65 = igb_rd32(hw, 36836U); adapter->stats.o2bgptc = adapter->stats.o2bgptc + (u64 )tmp___65; tmp___66 = igb_rd32(hw, 16732U); adapter->stats.o2bspc = adapter->stats.o2bspc + (u64 )tmp___66; tmp___67 = igb_rd32(hw, 36832U); adapter->stats.b2ospc = adapter->stats.b2ospc + (u64 )tmp___67; tmp___68 = igb_rd32(hw, 16728U); adapter->stats.b2ogprc = adapter->stats.b2ogprc + (u64 )tmp___68; } else { } return; } } static void igb_tsync_interrupt(struct igb_adapter *adapter ) { struct e1000_hw *hw ; struct ptp_clock_event event ; struct timespec ts ; u32 ack ; u32 tsauxc ; u32 sec ; u32 nsec ; u32 tsicr ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___2 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___3 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___4 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___5 ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___6 ; { hw = & adapter->hw; ack = 0U; tmp = igb_rd32(hw, 46700U); tsicr = tmp; if ((int )tsicr & 1) { event.type = 2; if (adapter->ptp_caps.pps != 0) { ptp_clock_event(adapter->ptp_clock, & event); } else { dev_err((struct device const *)(& (adapter->pdev)->dev), "unexpected SYS WRAP"); } ack = ack | 1U; } else { } if ((tsicr & 2U) != 0U) { schedule_work(& adapter->ptp_tx_work); ack = ack | 2U; } else { } if ((tsicr & 8U) != 0U) { spin_lock(& adapter->tmreg_lock); ts = timespec_add(adapter->perout[0].start, adapter->perout[0].period); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel((unsigned int )ts.tv_nsec, (void volatile *)hw_addr + 46660U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel((unsigned int )ts.tv_sec, (void volatile *)hw_addr___0 + 46664U); } else { } tsauxc = igb_rd32(hw, 46656U); tsauxc = tsauxc | 1U; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(tsauxc, (void volatile *)hw_addr___1 + 46656U); } else { } adapter->perout[0].start = ts; spin_unlock(& adapter->tmreg_lock); ack = ack | 8U; } else { } if ((tsicr & 16U) != 0U) { spin_lock(& adapter->tmreg_lock); ts = timespec_add(adapter->perout[1].start, adapter->perout[1].period); __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel((unsigned int )ts.tv_nsec, (void volatile *)hw_addr___2 + 46668U); } else { } __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel((unsigned int )ts.tv_sec, (void volatile *)hw_addr___3 + 46672U); } else { } tsauxc = igb_rd32(hw, 46656U); tsauxc = tsauxc | 2U; __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(tsauxc, (void volatile *)hw_addr___4 + 46656U); } else { } adapter->perout[1].start = ts; spin_unlock(& adapter->tmreg_lock); ack = ack | 16U; } else { } if ((tsicr & 32U) != 0U) { nsec = igb_rd32(hw, 46684U); sec = igb_rd32(hw, 46688U); event.type = 1; event.index = 0; event.__annonCompField116.timestamp = (unsigned long long )sec * 1000000000ULL + (unsigned long long )nsec; ptp_clock_event(adapter->ptp_clock, & event); ack = ack | 32U; } else { } if ((tsicr & 64U) != 0U) { nsec = igb_rd32(hw, 46692U); sec = igb_rd32(hw, 46696U); event.type = 1; event.index = 1; event.__annonCompField116.timestamp = (unsigned long long )sec * 1000000000ULL + (unsigned long long )nsec; ptp_clock_event(adapter->ptp_clock, & event); ack = ack | 64U; } else { } __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(ack, (void volatile *)hw_addr___5 + 46700U); } else { } return; } } static irqreturn_t igb_msix_other(int irq , void *data ) { struct igb_adapter *adapter ; struct e1000_hw *hw ; u32 icr ; u32 tmp ; int tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; { adapter = (struct igb_adapter *)data; hw = & adapter->hw; tmp = igb_rd32(hw, 192U); icr = tmp; if ((icr & 1073741824U) != 0U) { schedule_work(& adapter->reset_task); } else { } if ((icr & 268435456U) != 0U) { adapter->stats.doosync = adapter->stats.doosync + 1ULL; igb_check_wvbr(adapter); } else { } if ((icr & 256U) != 0U) { igb_msg_task(adapter); } else { } if ((icr & 4U) != 0U) { hw->mac.get_link_status = 1; tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 == 0) { ldv_mod_timer_42(& adapter->watchdog_timer, (unsigned long )jiffies + 1UL); } else { } } else { } if ((icr & 524288U) != 0U) { igb_tsync_interrupt(adapter); } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(adapter->eims_other, (void volatile *)hw_addr + 5412U); } else { } return (1); } } static void igb_write_itr(struct igb_q_vector *q_vector ) { struct igb_adapter *adapter ; u32 itr_val ; { adapter = q_vector->adapter; itr_val = (u32 )q_vector->itr_val & 32764U; if ((unsigned int )q_vector->set_itr == 0U) { return; } else { } if (itr_val == 0U) { itr_val = 4U; } else { } if ((unsigned int )adapter->hw.mac.type == 1U) { itr_val = (itr_val << 16) | itr_val; } else { itr_val = itr_val | 2147483648U; } writel(itr_val, (void volatile *)q_vector->itr_register); q_vector->set_itr = 0U; return; } } static irqreturn_t igb_msix_ring(int irq , void *data ) { struct igb_q_vector *q_vector ; { q_vector = (struct igb_q_vector *)data; igb_write_itr(q_vector); napi_schedule(& q_vector->napi); return (1); } } static void igb_update_tx_dca(struct igb_adapter *adapter , struct igb_ring *tx_ring , int cpu ) { struct e1000_hw *hw ; u32 txctrl ; u8 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { hw = & adapter->hw; tmp = dca3_get_tag(tx_ring->dev, cpu); txctrl = (u32 )tmp; if ((unsigned int )hw->mac.type != 1U) { txctrl = txctrl << 24; } else { } txctrl = txctrl | 8736U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(txctrl, (void volatile *)hw_addr + (unsigned long )((unsigned int )tx_ring->reg_idx <= 3U ? (int )tx_ring->reg_idx * 256 + 14356 : (int )tx_ring->reg_idx * 64 + 57364)); } else { } return; } } static void igb_update_rx_dca(struct igb_adapter *adapter , struct igb_ring *rx_ring , int cpu ) { struct e1000_hw *hw ; u32 rxctrl ; u8 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { hw = & adapter->hw; tmp = dca3_get_tag(& (adapter->pdev)->dev, cpu); rxctrl = (u32 )tmp; if ((unsigned int )hw->mac.type != 1U) { rxctrl = rxctrl << 24; } else { } rxctrl = rxctrl | 544U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(rxctrl, (void volatile *)hw_addr + (unsigned long )((unsigned int )rx_ring->reg_idx <= 3U ? (int )rx_ring->reg_idx * 256 + 10260 : (int )rx_ring->reg_idx * 64 + 49172)); } else { } return; } } static void igb_update_dca(struct igb_q_vector *q_vector ) { struct igb_adapter *adapter ; int cpu ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; { adapter = q_vector->adapter; __preempt_count_add(1); __asm__ volatile ("": : : "memory"); __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_58109; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_58109; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_58109; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_58109; default: __bad_percpu_size(); } ldv_58109: pscr_ret__ = pfo_ret__; goto ldv_58115; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_58119; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_58119; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_58119; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_58119; default: __bad_percpu_size(); } ldv_58119: pscr_ret__ = pfo_ret_____0; goto ldv_58115; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_58128; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_58128; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_58128; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_58128; default: __bad_percpu_size(); } ldv_58128: pscr_ret__ = pfo_ret_____1; goto ldv_58115; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_58137; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_58137; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_58137; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_58137; default: __bad_percpu_size(); } ldv_58137: pscr_ret__ = pfo_ret_____2; goto ldv_58115; default: __bad_size_call_parameter(); goto ldv_58115; } ldv_58115: cpu = pscr_ret__; if (q_vector->cpu == cpu) { goto out_no_update; } else { } if ((unsigned long )q_vector->tx.ring != (unsigned long )((struct igb_ring *)0)) { igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); } else { } if ((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0)) { igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); } else { } q_vector->cpu = cpu; out_no_update: __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); return; } } static void igb_setup_dca(struct igb_adapter *adapter ) { struct e1000_hw *hw ; int i ; u8 *hw_addr ; u8 *__var ; long tmp ; { hw = & adapter->hw; if ((adapter->flags & 2U) == 0U) { return; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(2U, (void volatile *)hw_addr + 23412U); } else { } i = 0; goto ldv_58156; ldv_58155: (adapter->q_vector[i])->cpu = -1; igb_update_dca(adapter->q_vector[i]); i = i + 1; ldv_58156: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_58155; } else { } return; } } static int __igb_notify_dca(struct device *dev , void *data ) { struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; struct pci_dev *pdev ; struct e1000_hw *hw ; unsigned long event ; int tmp___1 ; u8 *hw_addr ; u8 *__var ; long tmp___2 ; { tmp = dev_get_drvdata((struct device const *)dev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; pdev = adapter->pdev; hw = & adapter->hw; event = *((unsigned long *)data); switch (event) { case 1UL: ; if ((adapter->flags & 2U) != 0U) { goto ldv_58168; } else { } tmp___1 = dca_add_requester(dev); if (tmp___1 == 0) { adapter->flags = adapter->flags | 2U; _dev_info((struct device const *)(& pdev->dev), "DCA enabled\n"); igb_setup_dca(adapter); goto ldv_58168; } else { } case 2UL: ; if ((adapter->flags & 2U) != 0U) { dca_remove_requester(dev); _dev_info((struct device const *)(& pdev->dev), "DCA disabled\n"); adapter->flags = adapter->flags & 4294967293U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(1U, (void volatile *)hw_addr + 23412U); } else { } } else { } goto ldv_58168; } ldv_58168: ; return (0); } } static int igb_notify_dca(struct notifier_block *nb , unsigned long event , void *p ) { int ret_val ; { ret_val = driver_for_each_device(& igb_driver.driver, (struct device *)0, (void *)(& event), & __igb_notify_dca); return (ret_val != 0 ? 32770 : 0); } } static int igb_vf_configure(struct igb_adapter *adapter , int vf ) { unsigned char mac_addr[6U] ; { eth_zero_addr((u8 *)(& mac_addr)); igb_set_vf_mac(adapter, vf, (unsigned char *)(& mac_addr)); (adapter->vf_data + (unsigned long )vf)->spoofchk_enabled = 1; return (0); } } static void igb_ping_all_vfs(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 ping ; int i ; { hw = & adapter->hw; i = 0; goto ldv_58191; ldv_58190: ping = 256U; if ((int )(adapter->vf_data + (unsigned long )i)->flags & 1) { ping = ping | 536870912U; } else { } igb_write_mbx(hw, & ping, 1, (int )((u16 )i)); i = i + 1; ldv_58191: ; if ((unsigned int )i < adapter->vfs_allocated_count) { goto ldv_58190; } else { } return; } } static int igb_set_vf_promisc(struct igb_adapter *adapter , u32 *msgbuf , u32 vf ) { struct e1000_hw *hw ; u32 vmolr ; u32 tmp ; struct vf_data_storage *vf_data ; int j ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { hw = & adapter->hw; tmp = igb_rd32(hw, (vf + 5812U) * 4U); vmolr = tmp; vf_data = adapter->vf_data + (unsigned long )vf; vf_data->flags = vf_data->flags & 4294967289U; vmolr = vmolr & 3925868543U; if ((*msgbuf & 131072U) != 0U) { vmolr = vmolr | 268435456U; vf_data->flags = vf_data->flags | 4U; *msgbuf = *msgbuf & 4294836223U; } else if ((unsigned int )vf_data->num_vf_mc_hashes > 30U) { vmolr = vmolr | 268435456U; } else if ((unsigned int )vf_data->num_vf_mc_hashes != 0U) { vmolr = vmolr | 33554432U; j = 0; goto ldv_58203; ldv_58202: igb_mta_set(hw, (u32 )vf_data->vf_mc_hashes[j]); j = j + 1; ldv_58203: ; if ((int )vf_data->num_vf_mc_hashes > j) { goto ldv_58202; } else { } } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(vmolr, (void volatile *)hw_addr + (unsigned long )((vf + 5812U) * 4U)); } else { } if ((*msgbuf & 16711680U) != 0U) { return (-22); } else { } return (0); } } static int igb_set_vf_multicasts(struct igb_adapter *adapter , u32 *msgbuf , u32 vf ) { int n ; u16 *hash_list ; struct vf_data_storage *vf_data ; int i ; { n = (int )((*msgbuf & 16711680U) >> 16); hash_list = (u16 *)msgbuf + 1U; vf_data = adapter->vf_data + (unsigned long )vf; vf_data->num_vf_mc_hashes = (u16 )n; if (n > 30) { n = 30; } else { } i = 0; goto ldv_58218; ldv_58217: vf_data->vf_mc_hashes[i] = *(hash_list + (unsigned long )i); i = i + 1; ldv_58218: ; if (i < n) { goto ldv_58217; } else { } igb_set_rx_mode(adapter->netdev); return (0); } } static void igb_restore_vf_multicasts(struct igb_adapter *adapter ) { struct e1000_hw *hw ; struct vf_data_storage *vf_data ; int i ; int j ; u32 vmolr ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { hw = & adapter->hw; i = 0; goto ldv_58235; ldv_58234: tmp = igb_rd32(hw, (u32 )((i + 5812) * 4)); vmolr = tmp; vmolr = vmolr & 3992977407U; vf_data = adapter->vf_data + (unsigned long )i; if ((unsigned int )vf_data->num_vf_mc_hashes > 30U || (vf_data->flags & 4U) != 0U) { vmolr = vmolr | 268435456U; } else if ((unsigned int )vf_data->num_vf_mc_hashes != 0U) { vmolr = vmolr | 33554432U; j = 0; goto ldv_58229; ldv_58228: igb_mta_set(hw, (u32 )vf_data->vf_mc_hashes[j]); j = j + 1; ldv_58229: ; if ((int )vf_data->num_vf_mc_hashes > j) { goto ldv_58228; } else { } } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(vmolr, (void volatile *)hw_addr + (unsigned long )((i + 5812) * 4)); } else { } i = i + 1; ldv_58235: ; if ((unsigned int )i < adapter->vfs_allocated_count) { goto ldv_58234; } else { } return; } } static void igb_clear_vf_vfta(struct igb_adapter *adapter , u32 vf ) { struct e1000_hw *hw ; u32 pool_mask ; u32 reg ; u32 vid ; int i ; u8 *hw_addr ; u8 *__var ; long tmp ; { hw = & adapter->hw; pool_mask = (u32 )(1 << (int )(vf + 12U)); i = 0; goto ldv_58250; ldv_58249: reg = igb_rd32(hw, (u32 )((i + 5952) * 4)); reg = ~ pool_mask & reg; if ((reg & 1044480U) == 0U && (int )reg < 0) { reg = 0U; vid = reg & 4095U; igb_vfta_set(hw, vid, 0); } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(reg, (void volatile *)hw_addr + (unsigned long )((i + 5952) * 4)); } else { } i = i + 1; ldv_58250: ; if (i <= 31) { goto ldv_58249; } else { } (adapter->vf_data + (unsigned long )vf)->vlans_enabled = 0U; return; } } static s32 igb_vlvf_set(struct igb_adapter *adapter , u32 vid , bool add , u32 vf ) { struct e1000_hw *hw ; u32 reg ; u32 i ; u8 *hw_addr ; u8 *__var ; long tmp ; u32 size ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; u32 size___0 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___2 ; { hw = & adapter->hw; if ((unsigned int )hw->mac.type <= 1U) { return (-1); } else { } if (adapter->vfs_allocated_count == 0U) { return (-1); } else { } i = 0U; goto ldv_58263; ldv_58262: reg = igb_rd32(hw, (i + 5952U) * 4U); if ((int )reg < 0 && (reg & 4095U) == vid) { goto ldv_58261; } else { } i = i + 1U; ldv_58263: ; if (i <= 31U) { goto ldv_58262; } else { } ldv_58261: ; if ((int )add) { if (i == 32U) { i = 0U; goto ldv_58266; ldv_58265: reg = igb_rd32(hw, (i + 5952U) * 4U); if ((int )reg >= 0) { goto ldv_58264; } else { } i = i + 1U; ldv_58266: ; if (i <= 31U) { goto ldv_58265; } else { } ldv_58264: ; } else { } if (i <= 31U) { reg = (u32 )(1 << (int )(vf + 12U)) | reg; if ((int )reg >= 0) { igb_vfta_set(hw, vid, 1); reg = reg | 2147483648U; } else { } reg = reg & 4294963200U; reg = reg | vid; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(reg, (void volatile *)hw_addr + (unsigned long )((i + 5952U) * 4U)); } else { } if (adapter->vfs_allocated_count <= vf) { return (0); } else { } if ((unsigned int )(adapter->vf_data + (unsigned long )vf)->vlans_enabled == 0U) { reg = igb_rd32(hw, (vf + 5812U) * 4U); size = reg & 16383U; size = size + 4U; reg = reg & 4294950912U; reg = reg | size; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(reg, (void volatile *)hw_addr___0 + (unsigned long )((vf + 5812U) * 4U)); } else { } } else { } (adapter->vf_data + (unsigned long )vf)->vlans_enabled = (u16 )((int )(adapter->vf_data + (unsigned long )vf)->vlans_enabled + 1); } else { } } else if (i <= 31U) { reg = (u32 )(~ (1 << (int )(vf + 12U))) & reg; if ((reg & 1044480U) == 0U) { reg = 0U; igb_vfta_set(hw, vid, 0); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(reg, (void volatile *)hw_addr___1 + (unsigned long )((i + 5952U) * 4U)); } else { } if (adapter->vfs_allocated_count <= vf) { return (0); } else { } (adapter->vf_data + (unsigned long )vf)->vlans_enabled = (u16 )((int )(adapter->vf_data + (unsigned long )vf)->vlans_enabled - 1); if ((unsigned int )(adapter->vf_data + (unsigned long )vf)->vlans_enabled == 0U) { reg = igb_rd32(hw, (vf + 5812U) * 4U); size___0 = reg & 16383U; size___0 = size___0 - 4U; reg = reg & 4294950912U; reg = reg | size___0; __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(reg, (void volatile *)hw_addr___2 + (unsigned long )((vf + 5812U) * 4U)); } else { } } else { } } else { } return (0); } } static void igb_set_vmvir(struct igb_adapter *adapter , u32 vid , u32 vf ) { struct e1000_hw *hw ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { hw = & adapter->hw; if (vid != 0U) { __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(vid | 1073741824U, (void volatile *)hw_addr + (unsigned long )((vf + 3520U) * 4U)); } else { } } else { __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(0U, (void volatile *)hw_addr___0 + (unsigned long )((vf + 3520U) * 4U)); } else { } } return; } } static int igb_ndo_set_vf_vlan(struct net_device *netdev , int vf , u16 vlan , u8 qos ) { int err ; struct igb_adapter *adapter ; void *tmp ; int tmp___0 ; { err = 0; tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; if (((unsigned int )vf >= adapter->vfs_allocated_count || (unsigned int )vlan > 4095U) || (unsigned int )qos > 7U) { return (-22); } else { } if ((unsigned int )vlan != 0U || (unsigned int )qos != 0U) { err = igb_vlvf_set(adapter, (u32 )vlan, (unsigned int )vlan != 0U, (u32 )vf); if (err != 0) { goto out; } else { } igb_set_vmvir(adapter, (u32 )((int )vlan | ((int )qos << 13)), (u32 )vf); igb_set_vmolr(adapter, vf, (unsigned int )vlan == 0U); (adapter->vf_data + (unsigned long )vf)->pf_vlan = vlan; (adapter->vf_data + (unsigned long )vf)->pf_qos = (u16 )qos; _dev_info((struct device const *)(& (adapter->pdev)->dev), "Setting VLAN %d, QOS 0x%x on VF %d\n", (int )vlan, (int )qos, vf); tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 != 0) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "The VF VLAN has been set, but the PF device is not up.\n"); dev_warn((struct device const *)(& (adapter->pdev)->dev), "Bring the PF device up before attempting to use the VF device.\n"); } else { } } else { igb_vlvf_set(adapter, (u32 )(adapter->vf_data + (unsigned long )vf)->pf_vlan, 0, (u32 )vf); igb_set_vmvir(adapter, (u32 )vlan, (u32 )vf); igb_set_vmolr(adapter, vf, 1); (adapter->vf_data + (unsigned long )vf)->pf_vlan = 0U; (adapter->vf_data + (unsigned long )vf)->pf_qos = 0U; } out: ; return (err); } } static int igb_find_vlvf_entry(struct igb_adapter *adapter , int vid ) { struct e1000_hw *hw ; int i ; u32 reg ; { hw = & adapter->hw; i = 0; goto ldv_58311; ldv_58310: reg = igb_rd32(hw, (u32 )((i + 5952) * 4)); if ((int )reg < 0 && (u32 )vid == (reg & 4095U)) { goto ldv_58309; } else { } i = i + 1; ldv_58311: ; if (i <= 31) { goto ldv_58310; } else { } ldv_58309: ; if (i > 31) { i = -1; } else { } return (i); } } static int igb_set_vf_vlan(struct igb_adapter *adapter , u32 *msgbuf , u32 vf ) { struct e1000_hw *hw ; int add ; int vid ; int err ; u32 vlvf ; u32 bits ; int regndx ; int tmp ; int tmp___0 ; { hw = & adapter->hw; add = (int )((*msgbuf & 16711680U) >> 16); vid = (int )*(msgbuf + 1UL) & 4095; err = 0; if (add != 0 && ((adapter->netdev)->flags & 256U) != 0U) { err = igb_vlvf_set(adapter, (u32 )vid, add != 0, adapter->vfs_allocated_count); } else { } if (err != 0) { goto out; } else { } err = igb_vlvf_set(adapter, (u32 )vid, add != 0, vf); if (err != 0) { goto out; } else { } if (add == 0 && ((adapter->netdev)->flags & 256U) != 0U) { tmp = igb_find_vlvf_entry(adapter, vid); regndx = tmp; if (regndx < 0) { goto out; } else { } bits = igb_rd32(hw, (u32 )((regndx + 5952) * 4)); vlvf = bits; bits = (u32 )(1 << (int )(adapter->vfs_allocated_count + 12U)) & bits; if ((vlvf & 4095U) == (u32 )vid) { tmp___0 = variable_test_bit((long )vid, (unsigned long const volatile *)(& adapter->active_vlans)); if (tmp___0 == 0) { if (bits == 0U) { igb_vlvf_set(adapter, (u32 )vid, add != 0, adapter->vfs_allocated_count); } else { } } else { } } else { } } else { } out: ; return (err); } } __inline static void igb_vf_reset(struct igb_adapter *adapter , u32 vf ) { { (adapter->vf_data + (unsigned long )vf)->flags = (adapter->vf_data + (unsigned long )vf)->flags & 8U; (adapter->vf_data + (unsigned long )vf)->last_nack = jiffies; igb_set_vmolr(adapter, (int )vf, 1); igb_clear_vf_vfta(adapter, vf); if ((unsigned int )(adapter->vf_data + (unsigned long )vf)->pf_vlan != 0U) { igb_ndo_set_vf_vlan(adapter->netdev, (int )vf, (int )(adapter->vf_data + (unsigned long )vf)->pf_vlan, (int )((u8 )(adapter->vf_data + (unsigned long )vf)->pf_qos)); } else { igb_clear_vf_vfta(adapter, vf); } (adapter->vf_data + (unsigned long )vf)->num_vf_mc_hashes = 0U; igb_set_rx_mode(adapter->netdev); return; } } static void igb_vf_reset_event(struct igb_adapter *adapter , u32 vf ) { unsigned char *vf_mac ; { vf_mac = (unsigned char *)(& (adapter->vf_data + (unsigned long )vf)->vf_mac_addresses); if (((adapter->vf_data + (unsigned long )vf)->flags & 8U) == 0U) { eth_zero_addr(vf_mac); } else { } igb_vf_reset(adapter, vf); return; } } static void igb_vf_reset_msg(struct igb_adapter *adapter , u32 vf ) { struct e1000_hw *hw ; unsigned char *vf_mac ; int rar_entry ; u32 reg ; u32 msgbuf[3U] ; u8 *addr ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; { hw = & adapter->hw; vf_mac = (unsigned char *)(& (adapter->vf_data + (unsigned long )vf)->vf_mac_addresses); rar_entry = (int )(((u32 )hw->mac.rar_entry_count - vf) - 1U); addr = (u8 *)(& msgbuf) + 1U; igb_vf_reset(adapter, vf); igb_rar_set_qsel(adapter, vf_mac, (u32 )rar_entry, (int )((u8 )vf)); reg = igb_rd32(hw, 3216U); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel((u32 )(1 << (int )vf) | reg, (void volatile *)hw_addr + 3216U); } else { } reg = igb_rd32(hw, 3212U); __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel((u32 )(1 << (int )vf) | reg, (void volatile *)hw_addr___0 + 3212U); } else { } (adapter->vf_data + (unsigned long )vf)->flags = (adapter->vf_data + (unsigned long )vf)->flags | 1U; tmp___1 = is_zero_ether_addr((u8 const *)vf_mac); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { msgbuf[0] = 2147483649U; memcpy((void *)addr, (void const *)vf_mac, 6UL); } else { msgbuf[0] = 1073741825U; } igb_write_mbx(hw, (u32 *)(& msgbuf), 3, (int )((u16 )vf)); return; } } static int igb_set_vf_mac_addr(struct igb_adapter *adapter , u32 *msg , int vf ) { unsigned char *addr ; int err ; bool tmp ; { addr = (unsigned char *)msg + 1U; err = -1; tmp = is_valid_ether_addr((u8 const *)addr); if ((int )tmp) { err = igb_set_vf_mac(adapter, vf, addr); } else { } return (err); } } static void igb_rcv_ack_from_vf(struct igb_adapter *adapter , u32 vf ) { struct e1000_hw *hw ; struct vf_data_storage *vf_data ; u32 msg ; { hw = & adapter->hw; vf_data = adapter->vf_data + (unsigned long )vf; msg = 1073741824U; if ((vf_data->flags & 1U) == 0U && (long )((vf_data->last_nack - (unsigned long )jiffies) + 500UL) < 0L) { igb_write_mbx(hw, & msg, 1, (int )((u16 )vf)); vf_data->last_nack = jiffies; } else { } return; } } static void igb_rcv_msg_from_vf(struct igb_adapter *adapter , u32 vf ) { struct pci_dev *pdev ; u32 msgbuf[16U] ; struct e1000_hw *hw ; struct vf_data_storage *vf_data ; s32 retval ; { pdev = adapter->pdev; hw = & adapter->hw; vf_data = adapter->vf_data + (unsigned long )vf; retval = igb_read_mbx(hw, (u32 *)(& msgbuf), 16, (int )((u16 )vf)); if (retval != 0) { dev_err((struct device const *)(& pdev->dev), "Error receiving message from VF\n"); vf_data->flags = vf_data->flags & 4294967294U; if ((long )((vf_data->last_nack - (unsigned long )jiffies) + 500UL) >= 0L) { return; } else { } goto out; } else { } if ((msgbuf[0] & 3221225472U) != 0U) { return; } else { } if (msgbuf[0] == 1U) { igb_vf_reset_msg(adapter, vf); return; } else { } if ((vf_data->flags & 1U) == 0U) { if ((long )((vf_data->last_nack - (unsigned long )jiffies) + 500UL) >= 0L) { return; } else { } retval = -1; goto out; } else { } switch (msgbuf[0] & 65535U) { case 2U: retval = -22; if ((vf_data->flags & 8U) == 0U) { retval = igb_set_vf_mac_addr(adapter, (u32 *)(& msgbuf), (int )vf); } else { dev_warn((struct device const *)(& pdev->dev), "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", vf); } goto ldv_58393; case 6U: retval = igb_set_vf_promisc(adapter, (u32 *)(& msgbuf), vf); goto ldv_58393; case 3U: retval = igb_set_vf_multicasts(adapter, (u32 *)(& msgbuf), vf); goto ldv_58393; case 5U: retval = igb_set_vf_rlpml(adapter, (int )msgbuf[1], (int )vf); goto ldv_58393; case 4U: retval = -1; if ((unsigned int )vf_data->pf_vlan != 0U) { dev_warn((struct device const *)(& pdev->dev), "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", vf); } else { retval = igb_set_vf_vlan(adapter, (u32 *)(& msgbuf), vf); } goto ldv_58393; default: dev_err((struct device const *)(& pdev->dev), "Unhandled Msg %08x\n", msgbuf[0]); retval = -1; goto ldv_58393; } ldv_58393: msgbuf[0] = msgbuf[0] | 536870912U; out: ; if (retval != 0) { msgbuf[0] = msgbuf[0] | 1073741824U; } else { msgbuf[0] = msgbuf[0] | 2147483648U; } igb_write_mbx(hw, (u32 *)(& msgbuf), 1, (int )((u16 )vf)); return; } } static void igb_msg_task(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 vf ; s32 tmp ; s32 tmp___0 ; s32 tmp___1 ; { hw = & adapter->hw; vf = 0U; goto ldv_58405; ldv_58404: tmp = igb_check_for_rst(hw, (int )((u16 )vf)); if (tmp == 0) { igb_vf_reset_event(adapter, vf); } else { } tmp___0 = igb_check_for_msg(hw, (int )((u16 )vf)); if (tmp___0 == 0) { igb_rcv_msg_from_vf(adapter, vf); } else { } tmp___1 = igb_check_for_ack(hw, (int )((u16 )vf)); if (tmp___1 == 0) { igb_rcv_ack_from_vf(adapter, vf); } else { } vf = vf + 1U; ldv_58405: ; if (adapter->vfs_allocated_count > vf) { goto ldv_58404; } else { } return; } } static void igb_set_uta(struct igb_adapter *adapter ) { struct e1000_hw *hw ; int i ; u8 *hw_addr ; u8 *__var ; long tmp ; { hw = & adapter->hw; if ((unsigned int )hw->mac.type <= 1U) { return; } else { } if (adapter->vfs_allocated_count == 0U) { return; } else { } i = 0; goto ldv_58416; ldv_58415: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(4294967295U, (void volatile *)hw_addr + (unsigned long )((i << 2) + 40960)); } else { } i = i + 1; ldv_58416: ; if ((int )hw->mac.uta_reg_count > i) { goto ldv_58415; } else { } return; } } static irqreturn_t igb_intr_msi(int irq , void *data ) { struct igb_adapter *adapter ; struct igb_q_vector *q_vector ; struct e1000_hw *hw ; u32 icr ; u32 tmp ; int tmp___0 ; { adapter = (struct igb_adapter *)data; q_vector = adapter->q_vector[0]; hw = & adapter->hw; tmp = igb_rd32(hw, 192U); icr = tmp; igb_write_itr(q_vector); if ((icr & 1073741824U) != 0U) { schedule_work(& adapter->reset_task); } else { } if ((icr & 268435456U) != 0U) { adapter->stats.doosync = adapter->stats.doosync + 1ULL; } else { } if ((icr & 12U) != 0U) { hw->mac.get_link_status = 1; tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 == 0) { ldv_mod_timer_43(& adapter->watchdog_timer, (unsigned long )jiffies + 1UL); } else { } } else { } if ((icr & 524288U) != 0U) { igb_tsync_interrupt(adapter); } else { } napi_schedule(& q_vector->napi); return (1); } } static irqreturn_t igb_intr(int irq , void *data ) { struct igb_adapter *adapter ; struct igb_q_vector *q_vector ; struct e1000_hw *hw ; u32 icr ; u32 tmp ; int tmp___0 ; { adapter = (struct igb_adapter *)data; q_vector = adapter->q_vector[0]; hw = & adapter->hw; tmp = igb_rd32(hw, 192U); icr = tmp; if ((int )icr >= 0) { return (0); } else { } igb_write_itr(q_vector); if ((icr & 1073741824U) != 0U) { schedule_work(& adapter->reset_task); } else { } if ((icr & 268435456U) != 0U) { adapter->stats.doosync = adapter->stats.doosync + 1ULL; } else { } if ((icr & 12U) != 0U) { hw->mac.get_link_status = 1; tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 == 0) { ldv_mod_timer_44(& adapter->watchdog_timer, (unsigned long )jiffies + 1UL); } else { } } else { } if ((icr & 524288U) != 0U) { igb_tsync_interrupt(adapter); } else { } napi_schedule(& q_vector->napi); return (1); } } static void igb_ring_irq_enable(struct igb_q_vector *q_vector ) { struct igb_adapter *adapter ; struct e1000_hw *hw ; u8 *hw_addr ; u8 *__var ; long tmp ; int tmp___0 ; { adapter = q_vector->adapter; hw = & adapter->hw; if (((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0) && (adapter->rx_itr_setting & 3U) != 0U) || ((unsigned long )q_vector->rx.ring == (unsigned long )((struct igb_ring *)0) && (adapter->tx_itr_setting & 3U) != 0U)) { if (adapter->num_q_vectors == 1U && (unsigned long )adapter->vf_data == (unsigned long )((struct vf_data_storage *)0)) { igb_set_itr(q_vector); } else { igb_update_ring_itr(q_vector); } } else { } tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___0 == 0) { if ((adapter->flags & 8192U) != 0U) { __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(q_vector->eims_value, (void volatile *)hw_addr + 5412U); } else { } } else { igb_irq_enable(adapter); } } else { } return; } } static int igb_poll(struct napi_struct *napi , int budget ) { struct igb_q_vector *q_vector ; struct napi_struct const *__mptr ; bool clean_complete ; bool tmp ; { __mptr = (struct napi_struct const *)napi; q_vector = (struct igb_q_vector *)__mptr + 0xffffffffffffffb0UL; clean_complete = 1; if (((q_vector->adapter)->flags & 2U) != 0U) { igb_update_dca(q_vector); } else { } if ((unsigned long )q_vector->tx.ring != (unsigned long )((struct igb_ring *)0)) { clean_complete = igb_clean_tx_irq(q_vector); } else { } if ((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0)) { tmp = igb_clean_rx_irq(q_vector, budget); clean_complete = ((int )clean_complete & (int )tmp) != 0; } else { } if (! clean_complete) { return (budget); } else { } napi_complete(napi); igb_ring_irq_enable(q_vector); return (0); } } static bool igb_clean_tx_irq(struct igb_q_vector *q_vector ) { struct igb_adapter *adapter ; struct igb_ring *tx_ring ; struct igb_tx_buffer *tx_buffer ; union e1000_adv_tx_desc *tx_desc ; unsigned int total_bytes ; unsigned int total_packets ; unsigned int budget ; unsigned int i ; int tmp ; union e1000_adv_tx_desc *eop_desc ; long tmp___0 ; long tmp___1 ; long tmp___2 ; struct netdev_queue *tmp___3 ; struct e1000_hw *hw ; unsigned int tmp___4 ; u32 tmp___5 ; u32 tmp___6 ; int tmp___7 ; bool tmp___8 ; int tmp___9 ; bool tmp___10 ; int tmp___11 ; int tmp___12 ; long tmp___13 ; { adapter = q_vector->adapter; tx_ring = q_vector->tx.ring; total_bytes = 0U; total_packets = 0U; budget = (unsigned int )q_vector->tx.work_limit; i = (unsigned int )tx_ring->next_to_clean; tmp = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp != 0) { return (1); } else { } tx_buffer = tx_ring->__annonCompField117.tx_buffer_info + (unsigned long )i; tx_desc = (union e1000_adv_tx_desc *)tx_ring->desc + (unsigned long )i; i = i - (unsigned int )tx_ring->count; ldv_58466: eop_desc = tx_buffer->next_to_watch; if ((unsigned long )eop_desc == (unsigned long )((union e1000_adv_tx_desc *)0)) { goto ldv_58462; } else { } if ((eop_desc->wb.status & 1U) == 0U) { goto ldv_58462; } else { } tx_buffer->next_to_watch = (union e1000_adv_tx_desc *)0; total_bytes = tx_buffer->bytecount + total_bytes; total_packets = (unsigned int )tx_buffer->gso_segs + total_packets; dev_consume_skb_any(tx_buffer->skb); dma_unmap_single_attrs(tx_ring->dev, tx_buffer->dma, (size_t )tx_buffer->len, 1, (struct dma_attrs *)0); tx_buffer->skb = (struct sk_buff *)0; tx_buffer->len = 0U; goto ldv_58464; ldv_58463: tx_buffer = tx_buffer + 1; tx_desc = tx_desc + 1; i = i + 1U; tmp___0 = ldv__builtin_expect(i == 0U, 0L); if (tmp___0 != 0L) { i = i - (unsigned int )tx_ring->count; tx_buffer = tx_ring->__annonCompField117.tx_buffer_info; tx_desc = (union e1000_adv_tx_desc *)tx_ring->desc; } else { } if (tx_buffer->len != 0U) { dma_unmap_page(tx_ring->dev, tx_buffer->dma, (size_t )tx_buffer->len, 1); tx_buffer->len = 0U; } else { } ldv_58464: ; if ((unsigned long )tx_desc != (unsigned long )eop_desc) { goto ldv_58463; } else { } tx_buffer = tx_buffer + 1; tx_desc = tx_desc + 1; i = i + 1U; tmp___1 = ldv__builtin_expect(i == 0U, 0L); if (tmp___1 != 0L) { i = i - (unsigned int )tx_ring->count; tx_buffer = tx_ring->__annonCompField117.tx_buffer_info; tx_desc = (union e1000_adv_tx_desc *)tx_ring->desc; } else { } __builtin_prefetch((void const *)tx_desc); budget = budget - 1U; tmp___2 = ldv__builtin_expect(budget != 0U, 1L); if (tmp___2 != 0L) { goto ldv_58466; } else { } ldv_58462: tmp___3 = txring_txq((struct igb_ring const *)tx_ring); netdev_tx_completed_queue(tmp___3, total_packets, total_bytes); i = (unsigned int )tx_ring->count + i; tx_ring->next_to_clean = (u16 )i; u64_stats_init(& tx_ring->__annonCompField120.__annonCompField118.tx_syncp); tx_ring->__annonCompField120.__annonCompField118.tx_stats.bytes = tx_ring->__annonCompField120.__annonCompField118.tx_stats.bytes + (u64 )total_bytes; tx_ring->__annonCompField120.__annonCompField118.tx_stats.packets = tx_ring->__annonCompField120.__annonCompField118.tx_stats.packets + (u64 )total_packets; u64_stats_init(& tx_ring->__annonCompField120.__annonCompField118.tx_syncp); q_vector->tx.total_bytes = q_vector->tx.total_bytes + total_bytes; q_vector->tx.total_packets = q_vector->tx.total_packets + total_packets; tmp___7 = constant_test_bit(3L, (unsigned long const volatile *)(& tx_ring->flags)); if (tmp___7 != 0) { hw = & adapter->hw; clear_bit(3L, (unsigned long volatile *)(& tx_ring->flags)); if ((unsigned long )tx_buffer->next_to_watch != (unsigned long )((union e1000_adv_tx_desc *)0) && (long )((tx_buffer->time_stamp + (unsigned long )((int )adapter->tx_timeout_factor * 250)) - (unsigned long )jiffies) < 0L) { tmp___6 = igb_rd32(hw, 8U); if ((tmp___6 & 16U) == 0U) { tmp___4 = readl((void const volatile *)tx_ring->tail); tmp___5 = igb_rd32(hw, (u32 )((unsigned int )tx_ring->reg_idx <= 3U ? (int )tx_ring->reg_idx * 256 + 14352 : (int )tx_ring->reg_idx * 64 + 57360)); dev_err((struct device const *)tx_ring->dev, "Detected Tx Unit Hang\n Tx Queue <%d>\n TDH <%x>\n TDT <%x>\n next_to_use <%x>\n next_to_clean <%x>\nbuffer_info[next_to_clean]\n time_stamp <%lx>\n next_to_watch <%p>\n jiffies <%lx>\n desc.status <%x>\n", (int )tx_ring->queue_index, tmp___5, tmp___4, (int )tx_ring->next_to_use, (int )tx_ring->next_to_clean, tx_buffer->time_stamp, tx_buffer->next_to_watch, jiffies, (tx_buffer->next_to_watch)->wb.status); netif_stop_subqueue(tx_ring->netdev, (int )tx_ring->queue_index); return (1); } else { } } else { } } else { } if (total_packets != 0U) { tmp___10 = netif_carrier_ok((struct net_device const *)tx_ring->netdev); if ((int )tmp___10) { tmp___11 = igb_desc_unused(tx_ring); if ((unsigned int )tmp___11 > 41U) { tmp___12 = 1; } else { tmp___12 = 0; } } else { tmp___12 = 0; } } else { tmp___12 = 0; } tmp___13 = ldv__builtin_expect((long )tmp___12, 0L); if (tmp___13 != 0L) { __asm__ volatile ("mfence": : : "memory"); tmp___8 = __netif_subqueue_stopped((struct net_device const *)tx_ring->netdev, (int )tx_ring->queue_index); if ((int )tmp___8) { tmp___9 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___9 == 0) { netif_wake_subqueue(tx_ring->netdev, (int )tx_ring->queue_index); u64_stats_init(& tx_ring->__annonCompField120.__annonCompField118.tx_syncp); tx_ring->__annonCompField120.__annonCompField118.tx_stats.restart_queue = tx_ring->__annonCompField120.__annonCompField118.tx_stats.restart_queue + 1ULL; u64_stats_init(& tx_ring->__annonCompField120.__annonCompField118.tx_syncp); } else { } } else { } } else { } return (budget != 0U); } } static void igb_reuse_rx_page(struct igb_ring *rx_ring , struct igb_rx_buffer *old_buff ) { struct igb_rx_buffer *new_buff ; u16 nta ; { nta = rx_ring->next_to_alloc; new_buff = rx_ring->__annonCompField117.rx_buffer_info + (unsigned long )nta; nta = (u16 )((int )nta + 1); rx_ring->next_to_alloc = (int )rx_ring->count > (int )nta ? nta : 0U; *new_buff = *old_buff; dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, (unsigned long )old_buff->page_offset, 2048UL, 2); return; } } __inline static bool igb_page_is_reserved(struct page *page ) { int tmp ; int tmp___0 ; { tmp = page_to_nid((struct page const *)page); tmp___0 = numa_mem_id(); return ((bool )(tmp != tmp___0 || (int )page->__annonCompField42.__annonCompField37.pfmemalloc)); } } static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer , struct page *page , unsigned int truesize ) { bool tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; { tmp = igb_page_is_reserved(page); tmp___0 = ldv__builtin_expect((long )tmp, 0L); if (tmp___0 != 0L) { return (0); } else { } tmp___1 = page_count(page); tmp___2 = ldv__builtin_expect(tmp___1 != 1, 0L); if (tmp___2 != 0L) { return (0); } else { } rx_buffer->page_offset = rx_buffer->page_offset ^ 2048U; atomic_inc(& page->__annonCompField42.__annonCompField41.__annonCompField40._count); return (1); } } static bool igb_add_rx_frag(struct igb_ring *rx_ring , struct igb_rx_buffer *rx_buffer , union e1000_adv_rx_desc *rx_desc , struct sk_buff *skb ) { struct page *page ; unsigned int size ; unsigned int truesize ; unsigned char *va ; void *tmp ; __le32 tmp___0 ; unsigned char *tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; bool tmp___5 ; int tmp___6 ; unsigned char *tmp___7 ; bool tmp___8 ; { page = rx_buffer->page; size = (unsigned int )rx_desc->wb.upper.length; truesize = 2048U; if (size <= 256U) { tmp___5 = skb_is_nonlinear((struct sk_buff const *)skb); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { tmp = lowmem_page_address((struct page const *)page); va = (unsigned char *)tmp + (unsigned long )rx_buffer->page_offset; tmp___0 = igb_test_staterr(rx_desc, 32768U); if (tmp___0 != 0U) { igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); va = va + 16UL; size = size - 16U; } else { } tmp___1 = __skb_put(skb, size); memcpy((void *)tmp___1, (void const *)va, (size_t )(size + 7U) & 4294967288UL); tmp___2 = igb_page_is_reserved(page); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } tmp___4 = ldv__builtin_expect((long )tmp___3, 1L); if (tmp___4 != 0L) { return (1); } else { } __free_pages(page, 0U); return (0); } else { } } else { } tmp___7 = skb_end_pointer((struct sk_buff const *)skb); skb_add_rx_frag(skb, (int )((struct skb_shared_info *)tmp___7)->nr_frags, page, (int )rx_buffer->page_offset, (int )size, truesize); tmp___8 = igb_can_reuse_rx_page(rx_buffer, page, truesize); return (tmp___8); } } static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring , union e1000_adv_rx_desc *rx_desc , struct sk_buff *skb ) { struct igb_rx_buffer *rx_buffer ; struct page *page ; void *page_addr ; void *tmp ; long tmp___0 ; long tmp___1 ; bool tmp___2 ; { rx_buffer = rx_ring->__annonCompField117.rx_buffer_info + (unsigned long )rx_ring->next_to_clean; page = rx_buffer->page; prefetchw((void const *)page); tmp___1 = ldv__builtin_expect((unsigned long )skb == (unsigned long )((struct sk_buff *)0), 1L); if (tmp___1 != 0L) { tmp = lowmem_page_address((struct page const *)page); page_addr = tmp + (unsigned long )rx_buffer->page_offset; __builtin_prefetch((void const *)page_addr); __builtin_prefetch((void const *)page_addr + 64U); skb = napi_alloc_skb(& (rx_ring->q_vector)->napi, 256U); tmp___0 = ldv__builtin_expect((unsigned long )skb == (unsigned long )((struct sk_buff *)0), 0L); if (tmp___0 != 0L) { rx_ring->__annonCompField120.__annonCompField119.rx_stats.alloc_failed = rx_ring->__annonCompField120.__annonCompField119.rx_stats.alloc_failed + 1ULL; return ((struct sk_buff *)0); } else { } prefetchw((void const *)skb->data); } else { } dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, (unsigned long )rx_buffer->page_offset, 2048UL, 2); tmp___2 = igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb); if ((int )tmp___2) { igb_reuse_rx_page(rx_ring, rx_buffer); } else { dma_unmap_page(rx_ring->dev, rx_buffer->dma, 4096UL, 2); } rx_buffer->page = (struct page *)0; return (skb); } } __inline static void igb_rx_checksum(struct igb_ring *ring , union e1000_adv_rx_desc *rx_desc , struct sk_buff *skb ) { __le32 tmp ; int tmp___0 ; __le32 tmp___1 ; __le32 tmp___2 ; struct _ddebug descriptor ; long tmp___3 ; { skb_checksum_none_assert((struct sk_buff const *)skb); tmp = igb_test_staterr(rx_desc, 4U); if (tmp != 0U) { return; } else { } if (((ring->netdev)->features & 17179869184ULL) == 0ULL) { return; } else { } tmp___1 = igb_test_staterr(rx_desc, 1610612736U); if (tmp___1 != 0U) { if (skb->len != 60U) { u64_stats_init(& ring->__annonCompField120.__annonCompField119.rx_syncp); ring->__annonCompField120.__annonCompField119.rx_stats.csum_err = ring->__annonCompField120.__annonCompField119.rx_stats.csum_err + 1ULL; u64_stats_init(& ring->__annonCompField120.__annonCompField119.rx_syncp); } else { tmp___0 = constant_test_bit(0L, (unsigned long const volatile *)(& ring->flags)); if (tmp___0 == 0) { u64_stats_init(& ring->__annonCompField120.__annonCompField119.rx_syncp); ring->__annonCompField120.__annonCompField119.rx_stats.csum_err = ring->__annonCompField120.__annonCompField119.rx_stats.csum_err + 1ULL; u64_stats_init(& ring->__annonCompField120.__annonCompField119.rx_syncp); } else { } } return; } else { } tmp___2 = igb_test_staterr(rx_desc, 48U); if (tmp___2 != 0U) { skb->ip_summed = 1U; } else { } descriptor.modname = "igb"; descriptor.function = "igb_rx_checksum"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/igb_main.c"; descriptor.format = "cksum success: bits %08X\n"; descriptor.lineno = 6752U; descriptor.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_dev_dbg(& descriptor, (struct device const *)ring->dev, "cksum success: bits %08X\n", rx_desc->wb.upper.status_error); } else { } return; } } __inline static void igb_rx_hash(struct igb_ring *ring , union e1000_adv_rx_desc *rx_desc , struct sk_buff *skb ) { { if (((ring->netdev)->features & 8589934592ULL) != 0ULL) { skb_set_hash(skb, rx_desc->wb.lower.hi_dword.rss, 2); } else { } return; } } static bool igb_is_non_eop(struct igb_ring *rx_ring , union e1000_adv_rx_desc *rx_desc ) { u32 ntc ; __le32 tmp ; long tmp___0 ; { ntc = (u32 )((int )rx_ring->next_to_clean + 1); ntc = (u32 )rx_ring->count > ntc ? ntc : 0U; rx_ring->next_to_clean = (u16 )ntc; __builtin_prefetch((void const *)rx_ring->desc + (unsigned long )ntc); tmp = igb_test_staterr(rx_desc, 2U); tmp___0 = ldv__builtin_expect(tmp != 0U, 1L); if (tmp___0 != 0L) { return (0); } else { } return (1); } } static void igb_pull_tail(struct igb_ring *rx_ring , union e1000_adv_rx_desc *rx_desc , struct sk_buff *skb ) { struct skb_frag_struct *frag ; unsigned char *tmp ; unsigned char *va ; unsigned int pull_len ; void *tmp___0 ; __le32 tmp___1 ; { tmp = skb_end_pointer((struct sk_buff const *)skb); frag = (struct skb_frag_struct *)(& ((struct skb_shared_info *)tmp)->frags); tmp___0 = skb_frag_address((skb_frag_t const *)frag); va = (unsigned char *)tmp___0; tmp___1 = igb_test_staterr(rx_desc, 32768U); if (tmp___1 != 0U) { igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); skb_frag_size_sub(frag, 16); frag->page_offset = frag->page_offset + 16U; skb->data_len = skb->data_len - 16U; skb->len = skb->len - 16U; va = va + 16UL; } else { } pull_len = eth_get_headlen((void *)va, 256U); skb_copy_to_linear_data(skb, (void const *)va, (pull_len + 7U) & 4294967288U); skb_frag_size_sub(frag, (int )pull_len); frag->page_offset = frag->page_offset + pull_len; skb->data_len = skb->data_len - pull_len; skb->tail = skb->tail + pull_len; return; } } static bool igb_cleanup_headers(struct igb_ring *rx_ring , union e1000_adv_rx_desc *rx_desc , struct sk_buff *skb ) { struct net_device *netdev ; __le32 tmp ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; { tmp = igb_test_staterr(rx_desc, 2533359616U); tmp___0 = ldv__builtin_expect(tmp != 0U, 0L); if (tmp___0 != 0L) { netdev = rx_ring->netdev; if ((netdev->features & 274877906944ULL) == 0ULL) { dev_kfree_skb_any(skb); return (1); } else { } } else { } tmp___1 = skb_is_nonlinear((struct sk_buff const *)skb); if ((int )tmp___1) { igb_pull_tail(rx_ring, rx_desc, skb); } else { } tmp___2 = eth_skb_pad(skb); if (tmp___2 != 0) { return (1); } else { } return (0); } } static void igb_process_skb_fields(struct igb_ring *rx_ring , union e1000_adv_rx_desc *rx_desc , struct sk_buff *skb ) { struct net_device *dev ; __le32 tmp ; __le32 tmp___0 ; u16 vid ; __u16 tmp___1 ; __le32 tmp___2 ; int tmp___3 ; __le32 tmp___4 ; { dev = rx_ring->netdev; igb_rx_hash(rx_ring, rx_desc, skb); igb_rx_checksum(rx_ring, rx_desc, skb); tmp = igb_test_staterr(rx_desc, 65536U); if (tmp != 0U) { tmp___0 = igb_test_staterr(rx_desc, 32768U); if (tmp___0 == 0U) { igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); } else { } } else { } if ((dev->features & 256ULL) != 0ULL) { tmp___4 = igb_test_staterr(rx_desc, 8U); if (tmp___4 != 0U) { tmp___2 = igb_test_staterr(rx_desc, 262144U); if (tmp___2 != 0U) { tmp___3 = constant_test_bit(1L, (unsigned long const volatile *)(& rx_ring->flags)); if (tmp___3 != 0) { tmp___1 = __fswab16((int )rx_desc->wb.upper.vlan); vid = tmp___1; } else { vid = rx_desc->wb.upper.vlan; } } else { vid = rx_desc->wb.upper.vlan; } __vlan_hwaccel_put_tag(skb, 129, (int )vid); } else { } } else { } skb_record_rx_queue(skb, (int )rx_ring->queue_index); skb->protocol = eth_type_trans(skb, rx_ring->netdev); return; } } static bool igb_clean_rx_irq(struct igb_q_vector *q_vector , int const budget ) { struct igb_ring *rx_ring ; struct sk_buff *skb ; unsigned int total_bytes ; unsigned int total_packets ; u16 cleaned_count ; int tmp ; union e1000_adv_rx_desc *rx_desc ; bool tmp___0 ; bool tmp___1 ; long tmp___2 ; { rx_ring = q_vector->rx.ring; skb = rx_ring->__annonCompField120.__annonCompField119.skb; total_bytes = 0U; total_packets = 0U; tmp = igb_desc_unused(rx_ring); cleaned_count = (u16 )tmp; goto ldv_58555; ldv_58556: ; if ((unsigned int )cleaned_count > 15U) { igb_alloc_rx_buffers(rx_ring, (int )cleaned_count); cleaned_count = 0U; } else { } rx_desc = (union e1000_adv_rx_desc *)rx_ring->desc + (unsigned long )rx_ring->next_to_clean; if (rx_desc->wb.upper.status_error == 0U) { goto ldv_58554; } else { } __asm__ volatile ("": : : "memory"); skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); if ((unsigned long )skb == (unsigned long )((struct sk_buff *)0)) { goto ldv_58554; } else { } cleaned_count = (u16 )((int )cleaned_count + 1); tmp___0 = igb_is_non_eop(rx_ring, rx_desc); if ((int )tmp___0) { goto ldv_58555; } else { } tmp___1 = igb_cleanup_headers(rx_ring, rx_desc, skb); if ((int )tmp___1) { skb = (struct sk_buff *)0; goto ldv_58555; } else { } total_bytes = skb->len + total_bytes; igb_process_skb_fields(rx_ring, rx_desc, skb); napi_gro_receive(& q_vector->napi, skb); skb = (struct sk_buff *)0; total_packets = total_packets + 1U; ldv_58555: tmp___2 = ldv__builtin_expect((unsigned int )budget > total_packets, 1L); if (tmp___2 != 0L) { goto ldv_58556; } else { } ldv_58554: rx_ring->__annonCompField120.__annonCompField119.skb = skb; u64_stats_init(& rx_ring->__annonCompField120.__annonCompField119.rx_syncp); rx_ring->__annonCompField120.__annonCompField119.rx_stats.packets = rx_ring->__annonCompField120.__annonCompField119.rx_stats.packets + (u64 )total_packets; rx_ring->__annonCompField120.__annonCompField119.rx_stats.bytes = rx_ring->__annonCompField120.__annonCompField119.rx_stats.bytes + (u64 )total_bytes; u64_stats_init(& rx_ring->__annonCompField120.__annonCompField119.rx_syncp); q_vector->rx.total_packets = q_vector->rx.total_packets + total_packets; q_vector->rx.total_bytes = q_vector->rx.total_bytes + total_bytes; if ((unsigned int )cleaned_count != 0U) { igb_alloc_rx_buffers(rx_ring, (int )cleaned_count); } else { } return ((unsigned int )budget > total_packets); } } static bool igb_alloc_mapped_page(struct igb_ring *rx_ring , struct igb_rx_buffer *bi ) { struct page *page ; dma_addr_t dma ; long tmp ; long tmp___0 ; int tmp___1 ; { page = bi->page; tmp = ldv__builtin_expect((unsigned long )page != (unsigned long )((struct page *)0), 1L); if (tmp != 0L) { return (1); } else { } page = dev_alloc_page(); tmp___0 = ldv__builtin_expect((unsigned long )page == (unsigned long )((struct page *)0), 0L); if (tmp___0 != 0L) { rx_ring->__annonCompField120.__annonCompField119.rx_stats.alloc_failed = rx_ring->__annonCompField120.__annonCompField119.rx_stats.alloc_failed + 1ULL; return (0); } else { } dma = dma_map_page(rx_ring->dev, page, 0UL, 4096UL, 2); tmp___1 = dma_mapping_error(rx_ring->dev, dma); if (tmp___1 != 0) { __free_pages(page, 0U); rx_ring->__annonCompField120.__annonCompField119.rx_stats.alloc_failed = rx_ring->__annonCompField120.__annonCompField119.rx_stats.alloc_failed + 1ULL; return (0); } else { } bi->dma = dma; bi->page = page; bi->page_offset = 0U; return (1); } } void igb_alloc_rx_buffers(struct igb_ring *rx_ring , u16 cleaned_count ) { union e1000_adv_rx_desc *rx_desc ; struct igb_rx_buffer *bi ; u16 i ; bool tmp ; int tmp___0 ; long tmp___1 ; { i = rx_ring->next_to_use; if ((unsigned int )cleaned_count == 0U) { return; } else { } rx_desc = (union e1000_adv_rx_desc *)rx_ring->desc + (unsigned long )i; bi = rx_ring->__annonCompField117.rx_buffer_info + (unsigned long )i; i = (int )i - (int )rx_ring->count; ldv_58571: tmp = igb_alloc_mapped_page(rx_ring, bi); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_58570; } else { } rx_desc->read.pkt_addr = bi->dma + (dma_addr_t )bi->page_offset; rx_desc = rx_desc + 1; bi = bi + 1; i = (u16 )((int )i + 1); tmp___1 = ldv__builtin_expect((unsigned int )i == 0U, 0L); if (tmp___1 != 0L) { rx_desc = (union e1000_adv_rx_desc *)rx_ring->desc; bi = rx_ring->__annonCompField117.rx_buffer_info; i = (int )i - (int )rx_ring->count; } else { } rx_desc->wb.upper.status_error = 0U; cleaned_count = (u16 )((int )cleaned_count - 1); if ((unsigned int )cleaned_count != 0U) { goto ldv_58571; } else { } ldv_58570: i = (int )rx_ring->count + (int )i; if ((int )rx_ring->next_to_use != (int )i) { rx_ring->next_to_use = i; rx_ring->next_to_alloc = i; __asm__ volatile ("sfence": : : "memory"); writel((unsigned int )i, (void volatile *)rx_ring->tail); } else { } return; } } static int igb_mii_ioctl(struct net_device *netdev , struct ifreq *ifr , int cmd ) { struct igb_adapter *adapter ; void *tmp ; struct mii_ioctl_data *data ; struct mii_ioctl_data *tmp___0 ; s32 tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; tmp___0 = if_mii(ifr); data = tmp___0; if ((unsigned int )adapter->hw.phy.media_type != 1U) { return (-95); } else { } switch (cmd) { case 35143: data->phy_id = (__u16 )adapter->hw.phy.addr; goto ldv_58580; case 35144: tmp___1 = igb_read_phy_reg(& adapter->hw, (u32 )data->reg_num & 31U, & data->val_out); if (tmp___1 != 0) { return (-5); } else { } goto ldv_58580; case 35145: ; default: ; return (-95); } ldv_58580: ; return (0); } } static int igb_ioctl(struct net_device *netdev , struct ifreq *ifr , int cmd ) { int tmp ; int tmp___0 ; int tmp___1 ; { switch (cmd) { case 35143: ; case 35144: ; case 35145: tmp = igb_mii_ioctl(netdev, ifr, cmd); return (tmp); case 35249: tmp___0 = igb_ptp_get_ts_config(netdev, ifr); return (tmp___0); case 35248: tmp___1 = igb_ptp_set_ts_config(netdev, ifr); return (tmp___1); default: ; return (-95); } } } void igb_read_pci_cfg(struct e1000_hw *hw , u32 reg , u16 *value ) { struct igb_adapter *adapter ; { adapter = (struct igb_adapter *)hw->back; pci_read_config_word((struct pci_dev const *)adapter->pdev, (int )reg, value); return; } } void igb_write_pci_cfg(struct e1000_hw *hw , u32 reg , u16 *value ) { struct igb_adapter *adapter ; { adapter = (struct igb_adapter *)hw->back; pci_write_config_word((struct pci_dev const *)adapter->pdev, (int )reg, (int )*value); return; } } s32 igb_read_pcie_cap_reg(struct e1000_hw *hw , u32 reg , u16 *value ) { struct igb_adapter *adapter ; int tmp ; { adapter = (struct igb_adapter *)hw->back; tmp = pcie_capability_read_word(adapter->pdev, (int )reg, value); if (tmp != 0) { return (-3); } else { } return (0); } } s32 igb_write_pcie_cap_reg(struct e1000_hw *hw , u32 reg , u16 *value ) { struct igb_adapter *adapter ; int tmp ; { adapter = (struct igb_adapter *)hw->back; tmp = pcie_capability_write_word(adapter->pdev, (int )reg, (int )*value); if (tmp != 0) { return (-3); } else { } return (0); } } static void igb_vlan_mode(struct net_device *netdev , netdev_features_t features ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; u32 ctrl ; u32 rctl ; bool enable ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; enable = (features & 256ULL) != 0ULL; if ((int )enable) { ctrl = igb_rd32(hw, 0U); ctrl = ctrl | 1073741824U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(ctrl, (void volatile *)hw_addr); } else { } rctl = igb_rd32(hw, 256U); rctl = rctl & 4294443007U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(rctl, (void volatile *)hw_addr___0 + 256U); } else { } } else { ctrl = igb_rd32(hw, 0U); ctrl = ctrl & 3221225471U; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(ctrl, (void volatile *)hw_addr___1); } else { } } igb_rlpml_set(adapter); return; } } static int igb_vlan_rx_add_vid(struct net_device *netdev , __be16 proto , u16 vid ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; int pf_id ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; pf_id = (int )adapter->vfs_allocated_count; igb_vlvf_set(adapter, (u32 )vid, 1, (u32 )pf_id); igb_vfta_set(hw, (u32 )vid, 1); set_bit((long )vid, (unsigned long volatile *)(& adapter->active_vlans)); return (0); } } static int igb_vlan_rx_kill_vid(struct net_device *netdev , __be16 proto , u16 vid ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; int pf_id ; s32 err ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; pf_id = (int )adapter->vfs_allocated_count; err = igb_vlvf_set(adapter, (u32 )vid, 0, (u32 )pf_id); if (err != 0) { igb_vfta_set(hw, (u32 )vid, 0); } else { } clear_bit((long )vid, (unsigned long volatile *)(& adapter->active_vlans)); return (0); } } static void igb_restore_vlan(struct igb_adapter *adapter ) { u16 vid ; unsigned long tmp ; unsigned long tmp___0 ; { igb_vlan_mode(adapter->netdev, (adapter->netdev)->features); tmp = find_first_bit((unsigned long const *)(& adapter->active_vlans), 4096UL); vid = (u16 )tmp; goto ldv_58659; ldv_58658: igb_vlan_rx_add_vid(adapter->netdev, 129, (int )vid); tmp___0 = find_next_bit((unsigned long const *)(& adapter->active_vlans), 4096UL, (unsigned long )((int )vid + 1)); vid = (u16 )tmp___0; ldv_58659: ; if ((unsigned int )vid <= 4095U) { goto ldv_58658; } else { } return; } } int igb_set_spd_dplx(struct igb_adapter *adapter , u32 spd , u8 dplx ) { struct pci_dev *pdev ; struct e1000_mac_info *mac ; { pdev = adapter->pdev; mac = & adapter->hw.mac; mac->autoneg = 0; if ((int )spd & 1 || ((int )dplx & -2) != 0) { goto err_inval; } else { } if ((unsigned int )adapter->hw.phy.media_type == 3U) { switch ((u32 )dplx + spd) { case 10U: ; case 11U: ; case 100U: ; goto err_inval; default: ; goto ldv_58673; } ldv_58673: ; } else { } switch ((u32 )dplx + spd) { case 10U: mac->forced_speed_duplex = 1U; goto ldv_58675; case 11U: mac->forced_speed_duplex = 2U; goto ldv_58675; case 100U: mac->forced_speed_duplex = 4U; goto ldv_58675; case 101U: mac->forced_speed_duplex = 8U; goto ldv_58675; case 1001U: mac->autoneg = 1; adapter->hw.phy.autoneg_advertised = 32U; goto ldv_58675; case 1000U: ; default: ; goto err_inval; } ldv_58675: adapter->hw.phy.mdix = 0U; return (0); err_inval: dev_err((struct device const *)(& pdev->dev), "Unsupported Speed/Duplex configuration\n"); return (-22); } } static int __igb_shutdown(struct pci_dev *pdev , bool *enable_wake , bool runtime ) { struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; struct e1000_hw *hw ; u32 ctrl ; u32 rctl ; u32 status ; u32 wufc ; int retval ; bool tmp___1 ; u8 *hw_addr ; u8 *__var ; long tmp___2 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___3 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___4 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___5 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___6 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___7 ; { tmp = pci_get_drvdata(pdev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; hw = & adapter->hw; wufc = (int )runtime ? 1U : adapter->wol; retval = 0; netif_device_detach(netdev); tmp___1 = netif_running((struct net_device const *)netdev); if ((int )tmp___1) { __igb_close(netdev, 1); } else { } igb_clear_interrupt_scheme(adapter); retval = pci_save_state(pdev); if (retval != 0) { return (retval); } else { } status = igb_rd32(hw, 8U); if ((status & 2U) != 0U) { wufc = wufc & 4294967294U; } else { } if (wufc != 0U) { igb_setup_rctl(adapter); igb_set_rx_mode(netdev); if ((wufc & 8U) != 0U) { rctl = igb_rd32(hw, 256U); rctl = rctl | 16U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(rctl, (void volatile *)hw_addr + 256U); } else { } } else { } ctrl = igb_rd32(hw, 0U); ctrl = ctrl | 1048576U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(ctrl, (void volatile *)hw_addr___0); } else { } igb_disable_pcie_master(hw); __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(2U, (void volatile *)hw_addr___1 + 22528U); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(wufc, (void volatile *)hw_addr___2 + 22536U); } else { } } else { __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(0U, (void volatile *)hw_addr___3 + 22528U); } else { } __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(0U, (void volatile *)hw_addr___4 + 22536U); } else { } } *enable_wake = (bool )(wufc != 0U || adapter->en_mng_pt != 0U); if (! *enable_wake) { igb_power_down_link(adapter); } else { igb_power_up_link(adapter); } igb_release_hw_control(adapter); pci_disable_device(pdev); return (0); } } static int igb_suspend(struct device *dev ) { int retval ; bool wake ; struct pci_dev *pdev ; struct device const *__mptr ; { __mptr = (struct device const *)dev; pdev = (struct pci_dev *)__mptr + 0xffffffffffffff68UL; retval = __igb_shutdown(pdev, & wake, 0); if (retval != 0) { return (retval); } else { } if ((int )wake) { pci_prepare_to_sleep(pdev); } else { pci_wake_from_d3(pdev, 0); pci_set_power_state(pdev, 3); } return (0); } } static int igb_resume(struct device *dev ) { struct pci_dev *pdev ; struct device const *__mptr ; struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; struct e1000_hw *hw ; u32 err ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; u8 *hw_addr ; u8 *__var ; long tmp___5 ; int tmp___6 ; { __mptr = (struct device const *)dev; pdev = (struct pci_dev *)__mptr + 0xffffffffffffff68UL; tmp = pci_get_drvdata(pdev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; hw = & adapter->hw; pci_set_power_state(pdev, 0); pci_restore_state(pdev); pci_save_state(pdev); tmp___1 = pci_device_is_present(pdev); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (-19); } else { } tmp___3 = pci_enable_device_mem(pdev); err = (u32 )tmp___3; if (err != 0U) { dev_err((struct device const *)(& pdev->dev), "igb: Cannot enable PCI device from suspend\n"); return ((int )err); } else { } pci_set_master(pdev); pci_enable_wake(pdev, 3, 0); pci_enable_wake(pdev, 4, 0); tmp___4 = igb_init_interrupt_scheme(adapter, 1); if (tmp___4 != 0) { dev_err((struct device const *)(& pdev->dev), "Unable to allocate memory for queues\n"); return (-12); } else { } igb_reset(adapter); igb_get_hw_control(adapter); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(4294967295U, (void volatile *)hw_addr + 22544U); } else { } if ((int )netdev->flags & 1) { rtnl_lock(); tmp___6 = __igb_open(netdev, 1); err = (u32 )tmp___6; rtnl_unlock(); if (err != 0U) { return ((int )err); } else { } } else { } netif_device_attach(netdev); return (0); } } static int igb_runtime_idle(struct device *dev ) { struct pci_dev *pdev ; struct device const *__mptr ; struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; bool tmp___1 ; int tmp___2 ; { __mptr = (struct device const *)dev; pdev = (struct pci_dev *)__mptr + 0xffffffffffffff68UL; tmp = pci_get_drvdata(pdev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; tmp___1 = igb_has_link(adapter); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { pm_schedule_suspend(dev, 5000U); } else { } return (-16); } } static int igb_runtime_suspend(struct device *dev ) { struct pci_dev *pdev ; struct device const *__mptr ; int retval ; bool wake ; { __mptr = (struct device const *)dev; pdev = (struct pci_dev *)__mptr + 0xffffffffffffff68UL; retval = __igb_shutdown(pdev, & wake, 1); if (retval != 0) { return (retval); } else { } if ((int )wake) { pci_prepare_to_sleep(pdev); } else { pci_wake_from_d3(pdev, 0); pci_set_power_state(pdev, 3); } return (0); } } static int igb_runtime_resume(struct device *dev ) { int tmp ; { tmp = igb_resume(dev); return (tmp); } } static void igb_shutdown(struct pci_dev *pdev ) { bool wake ; { __igb_shutdown(pdev, & wake, 0); if ((unsigned int )system_state == 3U) { pci_wake_from_d3(pdev, (int )wake); pci_set_power_state(pdev, 3); } else { } return; } } static int igb_sriov_reinit(struct pci_dev *dev ) { struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; struct pci_dev *pdev ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; { tmp = pci_get_drvdata(dev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; pdev = adapter->pdev; rtnl_lock(); tmp___1 = netif_running((struct net_device const *)netdev); if ((int )tmp___1) { igb_close(netdev); } else { igb_reset(adapter); } igb_clear_interrupt_scheme(adapter); igb_init_queue_configuration(adapter); tmp___2 = igb_init_interrupt_scheme(adapter, 1); if (tmp___2 != 0) { dev_err((struct device const *)(& pdev->dev), "Unable to allocate memory for queues\n"); return (-12); } else { } tmp___3 = netif_running((struct net_device const *)netdev); if ((int )tmp___3) { igb_open(netdev); } else { } rtnl_unlock(); return (0); } } static int igb_pci_disable_sriov(struct pci_dev *dev ) { int err ; int tmp ; { tmp = igb_disable_sriov(dev); err = tmp; if (err == 0) { err = igb_sriov_reinit(dev); } else { } return (err); } } static int igb_pci_enable_sriov(struct pci_dev *dev , int num_vfs ) { int err ; int tmp ; { tmp = igb_enable_sriov(dev, num_vfs); err = tmp; if (err != 0) { goto out; } else { } err = igb_sriov_reinit(dev); if (err == 0) { return (num_vfs); } else { } out: ; return (err); } } static int igb_pci_sriov_configure(struct pci_dev *dev , int num_vfs ) { int tmp ; int tmp___0 ; { if (num_vfs == 0) { tmp = igb_pci_disable_sriov(dev); return (tmp); } else { tmp___0 = igb_pci_enable_sriov(dev, num_vfs); return (tmp___0); } return (0); } } static void igb_netpoll(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; struct igb_q_vector *q_vector ; int i ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; i = 0; goto ldv_58788; ldv_58787: q_vector = adapter->q_vector[i]; if ((adapter->flags & 8192U) != 0U) { __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(q_vector->eims_value, (void volatile *)hw_addr + 5416U); } else { } } else { igb_irq_disable(adapter); } napi_schedule(& q_vector->napi); i = i + 1; ldv_58788: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_58787; } else { } return; } } static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev , pci_channel_state_t state ) { struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; bool tmp___1 ; { tmp = pci_get_drvdata(pdev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; netif_device_detach(netdev); if (state == 3U) { return (4U); } else { } tmp___1 = netif_running((struct net_device const *)netdev); if ((int )tmp___1) { igb_down(adapter); } else { } pci_disable_device(pdev); return (3U); } } static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev ) { struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; struct e1000_hw *hw ; pci_ers_result_t result ; int err ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; int tmp___2 ; { tmp = pci_get_drvdata(pdev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; hw = & adapter->hw; tmp___2 = pci_enable_device_mem(pdev); if (tmp___2 != 0) { dev_err((struct device const *)(& pdev->dev), "Cannot re-enable PCI device after reset.\n"); result = 4U; } else { pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_enable_wake(pdev, 3, 0); pci_enable_wake(pdev, 4, 0); igb_reset(adapter); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(4294967295U, (void volatile *)hw_addr + 22544U); } else { } result = 5U; } err = pci_cleanup_aer_uncorrect_error_status(pdev); if (err != 0) { dev_err((struct device const *)(& pdev->dev), "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err); } else { } return (result); } } static void igb_io_resume(struct pci_dev *pdev ) { struct net_device *netdev ; void *tmp ; struct igb_adapter *adapter ; void *tmp___0 ; int tmp___1 ; bool tmp___2 ; { tmp = pci_get_drvdata(pdev); netdev = (struct net_device *)tmp; tmp___0 = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp___0; tmp___2 = netif_running((struct net_device const *)netdev); if ((int )tmp___2) { tmp___1 = igb_up(adapter); if (tmp___1 != 0) { dev_err((struct device const *)(& pdev->dev), "igb_up failed after reset\n"); return; } else { } } else { } netif_device_attach(netdev); igb_get_hw_control(adapter); return; } } static void igb_rar_set_qsel(struct igb_adapter *adapter , u8 *addr , u32 index , u8 qsel ) { u32 rar_low ; u32 rar_high ; struct e1000_hw *hw ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { hw = & adapter->hw; rar_low = (((unsigned int )*addr | ((unsigned int )*(addr + 1UL) << 8)) | ((unsigned int )*(addr + 2UL) << 16)) | ((unsigned int )*(addr + 3UL) << 24); rar_high = (unsigned int )*(addr + 4UL) | ((unsigned int )*(addr + 5UL) << 8); rar_high = rar_high | 2147483648U; if ((unsigned int )hw->mac.type == 1U) { rar_high = (u32 )((int )qsel * 262144) | rar_high; } else { rar_high = (u32 )(262144 << (int )qsel) | rar_high; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(rar_low, (void volatile *)hw_addr + (unsigned long )(index <= 15U ? (index + 2688U) * 8U : (index + 2700U) * 8U)); } else { } igb_rd32(hw, 8U); __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(rar_high, (void volatile *)hw_addr___0 + (unsigned long )(index <= 15U ? index * 8U + 21508U : (index + 536870896U) * 8U + 21732U)); } else { } igb_rd32(hw, 8U); return; } } static int igb_set_vf_mac(struct igb_adapter *adapter , int vf , unsigned char *mac_addr ) { struct e1000_hw *hw ; int rar_entry ; { hw = & adapter->hw; rar_entry = (int )hw->mac.rar_entry_count + ~ vf; memcpy((void *)(& (adapter->vf_data + (unsigned long )vf)->vf_mac_addresses), (void const *)mac_addr, 6UL); igb_rar_set_qsel(adapter, mac_addr, (u32 )rar_entry, (int )((u8 )vf)); return (0); } } static int igb_ndo_set_vf_mac(struct net_device *netdev , int vf , u8 *mac ) { struct igb_adapter *adapter ; void *tmp ; bool tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; tmp___0 = is_valid_ether_addr((u8 const *)mac); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1 || (unsigned int )vf >= adapter->vfs_allocated_count) { return (-22); } else { } (adapter->vf_data + (unsigned long )vf)->flags = (adapter->vf_data + (unsigned long )vf)->flags | 8U; _dev_info((struct device const *)(& (adapter->pdev)->dev), "setting MAC %pM on VF %d\n", mac, vf); _dev_info((struct device const *)(& (adapter->pdev)->dev), "Reload the VF driver to make this change effective."); tmp___2 = constant_test_bit(2L, (unsigned long const volatile *)(& adapter->state)); if (tmp___2 != 0) { dev_warn((struct device const *)(& (adapter->pdev)->dev), "The VF MAC address has been set, but the PF device is not up.\n"); dev_warn((struct device const *)(& (adapter->pdev)->dev), "Bring the PF device up before attempting to use the VF device.\n"); } else { } tmp___3 = igb_set_vf_mac(adapter, vf, mac); return (tmp___3); } } static int igb_link_mbps(int internal_link_speed ) { { switch (internal_link_speed) { case 100: ; return (100); case 1000: ; return (1000); default: ; return (0); } } } static void igb_set_vf_rate_limit(struct e1000_hw *hw , int vf , int tx_rate , int link_speed ) { int rf_dec ; int rf_int ; u32 bcnrc_val ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; { if (tx_rate != 0) { rf_int = link_speed / tx_rate; rf_dec = link_speed - rf_int * tx_rate; rf_dec = (rf_dec * 16384) / tx_rate; bcnrc_val = 2147483648U; bcnrc_val = ((u32 )(rf_int << 14) & 268419072U) | bcnrc_val; bcnrc_val = ((u32 )rf_dec & 16383U) | bcnrc_val; } else { bcnrc_val = 0U; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel((unsigned int )vf, (void volatile *)hw_addr + 13828U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(20U, (void volatile *)hw_addr___0 + 13968U); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(bcnrc_val, (void volatile *)hw_addr___1 + 14000U); } else { } return; } } static void igb_check_vf_rate_limit(struct igb_adapter *adapter ) { int actual_link_speed ; int i ; bool reset_rate ; { reset_rate = 0; if (adapter->vf_rate_link_speed == 0 || (unsigned int )adapter->hw.mac.type != 2U) { return; } else { } actual_link_speed = igb_link_mbps((int )adapter->link_speed); if (adapter->vf_rate_link_speed != actual_link_speed) { reset_rate = 1; adapter->vf_rate_link_speed = 0; _dev_info((struct device const *)(& (adapter->pdev)->dev), "Link speed has been changed. VF Transmit rate is disabled\n"); } else { } i = 0; goto ldv_58871; ldv_58870: ; if ((int )reset_rate) { (adapter->vf_data + (unsigned long )i)->tx_rate = 0U; } else { } igb_set_vf_rate_limit(& adapter->hw, i, (int )(adapter->vf_data + (unsigned long )i)->tx_rate, actual_link_speed); i = i + 1; ldv_58871: ; if ((unsigned int )i < adapter->vfs_allocated_count) { goto ldv_58870; } else { } return; } } static int igb_ndo_set_vf_bw(struct net_device *netdev , int vf , int min_tx_rate , int max_tx_rate ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; int actual_link_speed ; u32 tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; if ((unsigned int )hw->mac.type != 2U) { return (-95); } else { } if (min_tx_rate != 0) { return (-22); } else { } actual_link_speed = igb_link_mbps((int )adapter->link_speed); if ((unsigned int )vf >= adapter->vfs_allocated_count) { return (-22); } else { tmp___0 = igb_rd32(hw, 8U); if ((tmp___0 & 2U) == 0U) { return (-22); } else if (max_tx_rate < 0) { return (-22); } else if (max_tx_rate > actual_link_speed) { return (-22); } else { } } adapter->vf_rate_link_speed = actual_link_speed; (adapter->vf_data + (unsigned long )vf)->tx_rate = (unsigned short )max_tx_rate; igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); return (0); } } static int igb_ndo_set_vf_spoofchk(struct net_device *netdev , int vf , bool setting ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; u32 reg_val ; u32 reg_offset ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; if (adapter->vfs_allocated_count == 0U) { return (-95); } else { } if ((unsigned int )vf >= adapter->vfs_allocated_count) { return (-22); } else { } reg_offset = (unsigned int )hw->mac.type == 2U ? 13568U : 23244U; reg_val = igb_rd32(hw, reg_offset); if ((int )setting) { reg_val = (u32 )((1 << vf) | (1 << (vf + 8))) | reg_val; } else { reg_val = (u32 )(~ ((1 << vf) | (1 << (vf + 8)))) & reg_val; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(reg_val, (void volatile *)hw_addr + (unsigned long )reg_offset); } else { } (adapter->vf_data + (unsigned long )vf)->spoofchk_enabled = setting; return (0); } } static int igb_ndo_get_vf_config(struct net_device *netdev , int vf , struct ifla_vf_info *ivi ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; if ((unsigned int )vf >= adapter->vfs_allocated_count) { return (-22); } else { } ivi->vf = (__u32 )vf; memcpy((void *)(& ivi->mac), (void const *)(& (adapter->vf_data + (unsigned long )vf)->vf_mac_addresses), 6UL); ivi->max_tx_rate = (__u32 )(adapter->vf_data + (unsigned long )vf)->tx_rate; ivi->min_tx_rate = 0U; ivi->vlan = (__u32 )(adapter->vf_data + (unsigned long )vf)->pf_vlan; ivi->qos = (__u32 )(adapter->vf_data + (unsigned long )vf)->pf_qos; ivi->spoofchk = (__u32 )(adapter->vf_data + (unsigned long )vf)->spoofchk_enabled; return (0); } } static void igb_vmm_control(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 reg ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { hw = & adapter->hw; switch ((unsigned int )hw->mac.type) { case 1U: ; case 6U: ; case 7U: ; case 5U: ; default: ; return; case 2U: reg = igb_rd32(hw, 13712U); reg = reg | 8U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(reg, (void volatile *)hw_addr + 13712U); } else { } case 3U: reg = igb_rd32(hw, 23280U); reg = reg | 1073741824U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(reg, (void volatile *)hw_addr___0 + 23280U); } else { } case 4U: ; goto ldv_58919; } ldv_58919: ; if (adapter->vfs_allocated_count != 0U) { igb_vmdq_set_loopback_pf(hw, 1); igb_vmdq_set_replication_pf(hw, 1); igb_vmdq_set_anti_spoofing_pf(hw, 1, (int )adapter->vfs_allocated_count); } else { igb_vmdq_set_loopback_pf(hw, 0); igb_vmdq_set_replication_pf(hw, 0); } return; } } static void igb_init_dmac(struct igb_adapter *adapter , u32 pba ) { struct e1000_hw *hw ; u32 dmac_thr ; u16 hwm ; u32 reg ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___2 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___3 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___4 ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___5 ; u32 reg___0 ; u32 tmp___6 ; u8 *hw_addr___6 ; u8 *__var___6 ; long tmp___7 ; u8 *hw_addr___7 ; u8 *__var___7 ; long tmp___8 ; { hw = & adapter->hw; if ((unsigned int )hw->mac.type > 3U) { if ((adapter->flags & 16U) != 0U) { __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(0U, (void volatile *)hw_addr + 13648U); } else { } hwm = (unsigned int )((u16 )pba) * 64U - (unsigned int )((u16 )(adapter->max_frame_size / 16U)); if ((u32 )hwm < (pba + 67108858U) * 64U) { hwm = (unsigned int )((u16 )(pba + 67108858U)) * 64U; } else { } reg = igb_rd32(hw, 8560U); reg = reg & 4294705167U; reg = ((u32 )((int )hwm << 4) & 262128U) | reg; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(reg, (void volatile *)hw_addr___0 + 8560U); } else { } dmac_thr = pba - adapter->max_frame_size / 512U; if (pba - 10U > dmac_thr) { dmac_thr = pba - 10U; } else { } reg = igb_rd32(hw, 9480U); reg = reg & 4278255615U; reg = ((dmac_thr << 16) & 16711680U) | reg; reg = reg | 2952790016U; reg = reg | 31U; if ((unsigned int )hw->mac.type != 5U) { reg = reg & 4294934527U; } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(reg, (void volatile *)hw_addr___1 + 9480U); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(0U, (void volatile *)hw_addr___2 + 24016U); } else { } reg = 2147483652U; __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(reg, (void volatile *)hw_addr___3 + 9492U); } else { } __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel((16312U - adapter->max_frame_size) >> 6, (void volatile *)hw_addr___4 + 13648U); } else { } reg = igb_rd32(hw, 23480U); reg = reg & 4294967167U; __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(reg, (void volatile *)hw_addr___5 + 23480U); } else { } } else { } } else if ((unsigned int )hw->mac.type == 3U) { tmp___6 = igb_rd32(hw, 23480U); reg___0 = tmp___6; __var___6 = (u8 *)0U; hw_addr___6 = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr___6 == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(reg___0 & 4294967167U, (void volatile *)hw_addr___6 + 23480U); } else { } __var___7 = (u8 *)0U; hw_addr___7 = *((u8 * volatile *)(& hw->hw_addr)); tmp___8 = ldv__builtin_expect((unsigned long )hw_addr___7 == (unsigned long )((u8 *)0U), 0L); if (tmp___8 == 0L) { writel(0U, (void volatile *)hw_addr___7 + 9480U); } else { } } else { } return; } } s32 igb_read_i2c_byte(struct e1000_hw *hw , u8 byte_offset , u8 dev_addr , u8 *data ) { struct igb_adapter *adapter ; struct e1000_hw const *__mptr ; struct i2c_client *this_client ; s32 status ; u16 swfw_mask ; s32 tmp ; { __mptr = (struct e1000_hw const *)hw; adapter = (struct igb_adapter *)__mptr + 0xfffffffffffff920UL; this_client = adapter->i2c_client; swfw_mask = 0U; if ((unsigned long )this_client == (unsigned long )((struct i2c_client *)0)) { return (20); } else { } swfw_mask = 2U; tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, (int )swfw_mask); if (tmp != 0) { return (13); } else { } status = i2c_smbus_read_byte_data((struct i2c_client const *)this_client, (int )byte_offset); (*(hw->mac.ops.release_swfw_sync))(hw, (int )swfw_mask); if (status < 0) { return (20); } else { *data = (u8 )status; return (0); } } } s32 igb_write_i2c_byte(struct e1000_hw *hw , u8 byte_offset , u8 dev_addr , u8 data ) { struct igb_adapter *adapter ; struct e1000_hw const *__mptr ; struct i2c_client *this_client ; s32 status ; u16 swfw_mask ; s32 tmp ; { __mptr = (struct e1000_hw const *)hw; adapter = (struct igb_adapter *)__mptr + 0xfffffffffffff920UL; this_client = adapter->i2c_client; swfw_mask = 2U; if ((unsigned long )this_client == (unsigned long )((struct i2c_client *)0)) { return (20); } else { } tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, (int )swfw_mask); if (tmp != 0) { return (13); } else { } status = i2c_smbus_write_byte_data((struct i2c_client const *)this_client, (int )byte_offset, (int )data); (*(hw->mac.ops.release_swfw_sync))(hw, (int )swfw_mask); if (status != 0) { return (20); } else { return (0); } } } int igb_reinit_queues(struct igb_adapter *adapter ) { struct net_device *netdev ; struct pci_dev *pdev ; int err ; bool tmp ; int tmp___0 ; bool tmp___1 ; { netdev = adapter->netdev; pdev = adapter->pdev; err = 0; tmp = netif_running((struct net_device const *)netdev); if ((int )tmp) { igb_close(netdev); } else { } igb_reset_interrupt_capability(adapter); tmp___0 = igb_init_interrupt_scheme(adapter, 1); if (tmp___0 != 0) { dev_err((struct device const *)(& pdev->dev), "Unable to allocate memory for queues\n"); return (-12); } else { } tmp___1 = netif_running((struct net_device const *)netdev); if ((int )tmp___1) { err = igb_open(netdev); } else { } return (err); } } int ldv_retval_20 ; extern int ldv_freeze_late_25(void) ; extern int ldv_poweroff_noirq_25(void) ; extern int ldv_restore_early_25(void) ; int ldv_retval_18 ; extern int ldv_freeze_noirq_25(void) ; int ldv_retval_2 ; int ldv_retval_5 ; int ldv_retval_0 ; extern int ldv_complete_25(void) ; int ldv_retval_23 ; int ldv_retval_11 ; int ldv_retval_1 ; int ldv_retval_22 ; int ldv_retval_15 ; int ldv_retval_16 ; extern int ldv_suspend_23(void) ; int ldv_retval_24 ; extern int ldv_release_23(void) ; extern int ldv_resume_early_25(void) ; extern int ldv_ndo_uninit_20(void) ; void ldv_check_final_state(void) ; int ldv_retval_8 ; int ldv_retval_7 ; int ldv_retval_19 ; int ldv_retval_14 ; int ldv_retval_17 ; extern int ldv_poweroff_late_25(void) ; int ldv_retval_12 ; extern void ldv_initialize(void) ; int ldv_retval_6 ; extern int ldv_thaw_noirq_25(void) ; extern int ldv_prepare_25(void) ; extern int ldv_suspend_noirq_25(void) ; extern int ldv_probe_23(void) ; extern int ldv_suspend_late_25(void) ; int ldv_retval_21 ; int ldv_retval_13 ; extern int ldv_thaw_early_25(void) ; int ldv_retval_9 ; int ldv_retval_10 ; extern int ldv_resume_noirq_25(void) ; extern int ldv_ndo_init_20(void) ; int ldv_retval_4 ; int ldv_retval_3 ; extern int ldv_restore_noirq_25(void) ; void activate_suitable_irq_4(int line , void *data ) { { if (ldv_irq_4_0 == 0) { ldv_irq_line_4_0 = line; ldv_irq_data_4_0 = data; ldv_irq_4_0 = 1; return; } else { } if (ldv_irq_4_1 == 0) { ldv_irq_line_4_1 = line; ldv_irq_data_4_1 = data; ldv_irq_4_1 = 1; return; } else { } if (ldv_irq_4_2 == 0) { ldv_irq_line_4_2 = line; ldv_irq_data_4_2 = data; ldv_irq_4_2 = 1; return; } else { } if (ldv_irq_4_3 == 0) { ldv_irq_line_4_3 = line; ldv_irq_data_4_3 = data; ldv_irq_4_3 = 1; return; } else { } return; } } int ldv_irq_3(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = igb_intr_msi(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_59064; default: ldv_stop(); } ldv_59064: ; } else { } return (state); } } void choose_timer_13(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_timer_13_0 == 1) { ldv_timer_13_0 = 2; ldv_timer_13(ldv_timer_13_0, ldv_timer_list_13_0); } else { } goto ldv_59070; case 1: ; if (ldv_timer_13_1 == 1) { ldv_timer_13_1 = 2; ldv_timer_13(ldv_timer_13_1, ldv_timer_list_13_1); } else { } goto ldv_59070; case 2: ; if (ldv_timer_13_2 == 1) { ldv_timer_13_2 = 2; ldv_timer_13(ldv_timer_13_2, ldv_timer_list_13_2); } else { } goto ldv_59070; case 3: ; if (ldv_timer_13_3 == 1) { ldv_timer_13_3 = 2; ldv_timer_13(ldv_timer_13_3, ldv_timer_list_13_3); } else { } goto ldv_59070; default: ldv_stop(); } ldv_59070: ; return; } } void disable_suitable_irq_2(int line , void *data ) { { if (ldv_irq_2_0 != 0 && line == ldv_irq_line_2_0) { ldv_irq_2_0 = 0; return; } else { } if (ldv_irq_2_1 != 0 && line == ldv_irq_line_2_1) { ldv_irq_2_1 = 0; return; } else { } if (ldv_irq_2_2 != 0 && line == ldv_irq_line_2_2) { ldv_irq_2_2 = 0; return; } else { } if (ldv_irq_2_3 != 0 && line == ldv_irq_line_2_3) { ldv_irq_2_3 = 0; return; } else { } return; } } void activate_suitable_irq_3(int line , void *data ) { { if (ldv_irq_3_0 == 0) { ldv_irq_line_3_0 = line; ldv_irq_data_3_0 = data; ldv_irq_3_0 = 1; return; } else { } if (ldv_irq_3_1 == 0) { ldv_irq_line_3_1 = line; ldv_irq_data_3_1 = data; ldv_irq_3_1 = 1; return; } else { } if (ldv_irq_3_2 == 0) { ldv_irq_line_3_2 = line; ldv_irq_data_3_2 = data; ldv_irq_3_2 = 1; return; } else { } if (ldv_irq_3_3 == 0) { ldv_irq_line_3_3 = line; ldv_irq_data_3_3 = data; ldv_irq_3_3 = 1; return; } else { } return; } } int reg_check_1(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& igb_msix_other)) { return (1); } else { } return (0); } } void choose_interrupt_4(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_4_0 = ldv_irq_4(ldv_irq_4_0, ldv_irq_line_4_0, ldv_irq_data_4_0); goto ldv_59092; case 1: ldv_irq_4_0 = ldv_irq_4(ldv_irq_4_1, ldv_irq_line_4_1, ldv_irq_data_4_1); goto ldv_59092; case 2: ldv_irq_4_0 = ldv_irq_4(ldv_irq_4_2, ldv_irq_line_4_2, ldv_irq_data_4_2); goto ldv_59092; case 3: ldv_irq_4_0 = ldv_irq_4(ldv_irq_4_3, ldv_irq_line_4_3, ldv_irq_data_4_3); goto ldv_59092; default: ldv_stop(); } ldv_59092: ; return; } } void call_and_disable_work_10(struct work_struct *work ) { { if ((ldv_work_10_0 == 2 || ldv_work_10_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_0) { igb_watchdog_task(work); ldv_work_10_0 = 1; return; } else { } if ((ldv_work_10_1 == 2 || ldv_work_10_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_1) { igb_watchdog_task(work); ldv_work_10_1 = 1; return; } else { } if ((ldv_work_10_2 == 2 || ldv_work_10_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_2) { igb_watchdog_task(work); ldv_work_10_2 = 1; return; } else { } if ((ldv_work_10_3 == 2 || ldv_work_10_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_3) { igb_watchdog_task(work); ldv_work_10_3 = 1; return; } else { } return; } } void work_init_9(void) { { ldv_work_9_0 = 0; ldv_work_9_1 = 0; ldv_work_9_2 = 0; ldv_work_9_3 = 0; return; } } void ldv_initialize_pci_error_handlers_23(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); igb_err_handler_group0 = (struct pci_dev *)tmp; return; } } void invoke_work_10(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_10_0 == 2 || ldv_work_10_0 == 3) { ldv_work_10_0 = 4; igb_watchdog_task(ldv_work_struct_10_0); ldv_work_10_0 = 1; } else { } goto ldv_59114; case 1: ; if (ldv_work_10_1 == 2 || ldv_work_10_1 == 3) { ldv_work_10_1 = 4; igb_watchdog_task(ldv_work_struct_10_0); ldv_work_10_1 = 1; } else { } goto ldv_59114; case 2: ; if (ldv_work_10_2 == 2 || ldv_work_10_2 == 3) { ldv_work_10_2 = 4; igb_watchdog_task(ldv_work_struct_10_0); ldv_work_10_2 = 1; } else { } goto ldv_59114; case 3: ; if (ldv_work_10_3 == 2 || ldv_work_10_3 == 3) { ldv_work_10_3 = 4; igb_watchdog_task(ldv_work_struct_10_0); ldv_work_10_3 = 1; } else { } goto ldv_59114; default: ldv_stop(); } ldv_59114: ; return; } } void activate_pending_timer_13(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_13_0 == (unsigned long )timer) { if (ldv_timer_13_0 == 2 || pending_flag != 0) { ldv_timer_list_13_0 = timer; ldv_timer_list_13_0->data = data; ldv_timer_13_0 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_13_1 == (unsigned long )timer) { if (ldv_timer_13_1 == 2 || pending_flag != 0) { ldv_timer_list_13_1 = timer; ldv_timer_list_13_1->data = data; ldv_timer_13_1 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_13_2 == (unsigned long )timer) { if (ldv_timer_13_2 == 2 || pending_flag != 0) { ldv_timer_list_13_2 = timer; ldv_timer_list_13_2->data = data; ldv_timer_13_2 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_13_3 == (unsigned long )timer) { if (ldv_timer_13_3 == 2 || pending_flag != 0) { ldv_timer_list_13_3 = timer; ldv_timer_list_13_3->data = data; ldv_timer_13_3 = 1; } else { } return; } else { } activate_suitable_timer_13(timer, data); return; } } void activate_suitable_irq_2(int line , void *data ) { { if (ldv_irq_2_0 == 0) { ldv_irq_line_2_0 = line; ldv_irq_data_2_0 = data; ldv_irq_2_0 = 1; return; } else { } if (ldv_irq_2_1 == 0) { ldv_irq_line_2_1 = line; ldv_irq_data_2_1 = data; ldv_irq_2_1 = 1; return; } else { } if (ldv_irq_2_2 == 0) { ldv_irq_line_2_2 = line; ldv_irq_data_2_2 = data; ldv_irq_2_2 = 1; return; } else { } if (ldv_irq_2_3 == 0) { ldv_irq_line_2_3 = line; ldv_irq_data_2_3 = data; ldv_irq_2_3 = 1; return; } else { } return; } } void ldv_timer_14(int state , struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; igb_update_phy_info(timer->data); LDV_IN_INTERRUPT = 1; return; } } void call_and_disable_all_9(int state ) { { if (ldv_work_9_0 == state) { call_and_disable_work_9(ldv_work_struct_9_0); } else { } if (ldv_work_9_1 == state) { call_and_disable_work_9(ldv_work_struct_9_1); } else { } if (ldv_work_9_2 == state) { call_and_disable_work_9(ldv_work_struct_9_2); } else { } if (ldv_work_9_3 == state) { call_and_disable_work_9(ldv_work_struct_9_3); } else { } return; } } void choose_interrupt_1(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_0, ldv_irq_line_1_0, ldv_irq_data_1_0); goto ldv_59139; case 1: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_1, ldv_irq_line_1_1, ldv_irq_data_1_1); goto ldv_59139; case 2: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_2, ldv_irq_line_1_2, ldv_irq_data_1_2); goto ldv_59139; case 3: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_3, ldv_irq_line_1_3, ldv_irq_data_1_3); goto ldv_59139; default: ldv_stop(); } ldv_59139: ; return; } } void work_init_10(void) { { ldv_work_10_0 = 0; ldv_work_10_1 = 0; ldv_work_10_2 = 0; ldv_work_10_3 = 0; return; } } int reg_check_2(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& igb_msix_ring)) { return (1); } else { } return (0); } } int reg_check_3(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& igb_intr_msi)) { return (1); } else { } return (0); } } void invoke_work_9(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_9_0 == 2 || ldv_work_9_0 == 3) { ldv_work_9_0 = 4; igb_reset_task(ldv_work_struct_9_0); ldv_work_9_0 = 1; } else { } goto ldv_59163; case 1: ; if (ldv_work_9_1 == 2 || ldv_work_9_1 == 3) { ldv_work_9_1 = 4; igb_reset_task(ldv_work_struct_9_0); ldv_work_9_1 = 1; } else { } goto ldv_59163; case 2: ; if (ldv_work_9_2 == 2 || ldv_work_9_2 == 3) { ldv_work_9_2 = 4; igb_reset_task(ldv_work_struct_9_0); ldv_work_9_2 = 1; } else { } goto ldv_59163; case 3: ; if (ldv_work_9_3 == 2 || ldv_work_9_3 == 3) { ldv_work_9_3 = 4; igb_reset_task(ldv_work_struct_9_0); ldv_work_9_3 = 1; } else { } goto ldv_59163; default: ldv_stop(); } ldv_59163: ; return; } } void activate_work_9(struct work_struct *work , int state ) { { if (ldv_work_9_0 == 0) { ldv_work_struct_9_0 = work; ldv_work_9_0 = state; return; } else { } if (ldv_work_9_1 == 0) { ldv_work_struct_9_1 = work; ldv_work_9_1 = state; return; } else { } if (ldv_work_9_2 == 0) { ldv_work_struct_9_2 = work; ldv_work_9_2 = state; return; } else { } if (ldv_work_9_3 == 0) { ldv_work_struct_9_3 = work; ldv_work_9_3 = state; return; } else { } return; } } void activate_pending_timer_14(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_14_0 == (unsigned long )timer) { if (ldv_timer_14_0 == 2 || pending_flag != 0) { ldv_timer_list_14_0 = timer; ldv_timer_list_14_0->data = data; ldv_timer_14_0 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_14_1 == (unsigned long )timer) { if (ldv_timer_14_1 == 2 || pending_flag != 0) { ldv_timer_list_14_1 = timer; ldv_timer_list_14_1->data = data; ldv_timer_14_1 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_14_2 == (unsigned long )timer) { if (ldv_timer_14_2 == 2 || pending_flag != 0) { ldv_timer_list_14_2 = timer; ldv_timer_list_14_2->data = data; ldv_timer_14_2 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_14_3 == (unsigned long )timer) { if (ldv_timer_14_3 == 2 || pending_flag != 0) { ldv_timer_list_14_3 = timer; ldv_timer_list_14_3->data = data; ldv_timer_14_3 = 1; } else { } return; } else { } activate_suitable_timer_14(timer, data); return; } } void activate_suitable_timer_13(struct timer_list *timer , unsigned long data ) { { if (ldv_timer_13_0 == 0 || ldv_timer_13_0 == 2) { ldv_timer_list_13_0 = timer; ldv_timer_list_13_0->data = data; ldv_timer_13_0 = 1; return; } else { } if (ldv_timer_13_1 == 0 || ldv_timer_13_1 == 2) { ldv_timer_list_13_1 = timer; ldv_timer_list_13_1->data = data; ldv_timer_13_1 = 1; return; } else { } if (ldv_timer_13_2 == 0 || ldv_timer_13_2 == 2) { ldv_timer_list_13_2 = timer; ldv_timer_list_13_2->data = data; ldv_timer_13_2 = 1; return; } else { } if (ldv_timer_13_3 == 0 || ldv_timer_13_3 == 2) { ldv_timer_list_13_3 = timer; ldv_timer_list_13_3->data = data; ldv_timer_13_3 = 1; return; } else { } return; } } void ldv_timer_13(int state , struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; igb_watchdog(timer->data); LDV_IN_INTERRUPT = 1; return; } } void disable_suitable_timer_13(struct timer_list *timer ) { { if (ldv_timer_13_0 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_13_0) { ldv_timer_13_0 = 0; return; } else { } if (ldv_timer_13_1 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_13_1) { ldv_timer_13_1 = 0; return; } else { } if (ldv_timer_13_2 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_13_2) { ldv_timer_13_2 = 0; return; } else { } if (ldv_timer_13_3 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_13_3) { ldv_timer_13_3 = 0; return; } else { } return; } } int ldv_irq_4(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = igb_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_59195; default: ldv_stop(); } ldv_59195: ; } else { } return (state); } } void ldv_net_device_ops_20(void) { void *tmp ; { tmp = ldv_init_zalloc(3008UL); igb_netdev_ops_group1 = (struct net_device *)tmp; return; } } void call_and_disable_work_9(struct work_struct *work ) { { if ((ldv_work_9_0 == 2 || ldv_work_9_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_0) { igb_reset_task(work); ldv_work_9_0 = 1; return; } else { } if ((ldv_work_9_1 == 2 || ldv_work_9_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_1) { igb_reset_task(work); ldv_work_9_1 = 1; return; } else { } if ((ldv_work_9_2 == 2 || ldv_work_9_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_2) { igb_reset_task(work); ldv_work_9_2 = 1; return; } else { } if ((ldv_work_9_3 == 2 || ldv_work_9_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_3) { igb_reset_task(work); ldv_work_9_3 = 1; return; } else { } return; } } void disable_suitable_irq_1(int line , void *data ) { { if (ldv_irq_1_0 != 0 && line == ldv_irq_line_1_0) { ldv_irq_1_0 = 0; return; } else { } if (ldv_irq_1_1 != 0 && line == ldv_irq_line_1_1) { ldv_irq_1_1 = 0; return; } else { } if (ldv_irq_1_2 != 0 && line == ldv_irq_line_1_2) { ldv_irq_1_2 = 0; return; } else { } if (ldv_irq_1_3 != 0 && line == ldv_irq_line_1_3) { ldv_irq_1_3 = 0; return; } else { } return; } } void activate_suitable_irq_1(int line , void *data ) { { if (ldv_irq_1_0 == 0) { ldv_irq_line_1_0 = line; ldv_irq_data_1_0 = data; ldv_irq_1_0 = 1; return; } else { } if (ldv_irq_1_1 == 0) { ldv_irq_line_1_1 = line; ldv_irq_data_1_1 = data; ldv_irq_1_1 = 1; return; } else { } if (ldv_irq_1_2 == 0) { ldv_irq_line_1_2 = line; ldv_irq_data_1_2 = data; ldv_irq_1_2 = 1; return; } else { } if (ldv_irq_1_3 == 0) { ldv_irq_line_1_3 = line; ldv_irq_data_1_3 = data; ldv_irq_1_3 = 1; return; } else { } return; } } int reg_check_4(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& igb_intr)) { return (1); } else { } return (0); } } void ldv_dev_pm_ops_25(void) { void *tmp ; { tmp = ldv_init_zalloc(1416UL); igb_pm_ops_group1 = (struct device *)tmp; return; } } void timer_init_13(void) { { ldv_timer_13_0 = 0; ldv_timer_13_1 = 0; ldv_timer_13_2 = 0; ldv_timer_13_3 = 0; return; } } int ldv_irq_2(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = igb_msix_ring(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_59231; default: ldv_stop(); } ldv_59231: ; } else { } return (state); } } int reg_timer_14(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) { { if ((unsigned long )function == (unsigned long )(& igb_update_phy_info)) { activate_suitable_timer_14(timer, data); } else { } return (0); } } void choose_interrupt_2(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_2_0 = ldv_irq_2(ldv_irq_2_0, ldv_irq_line_2_0, ldv_irq_data_2_0); goto ldv_59243; case 1: ldv_irq_2_0 = ldv_irq_2(ldv_irq_2_1, ldv_irq_line_2_1, ldv_irq_data_2_1); goto ldv_59243; case 2: ldv_irq_2_0 = ldv_irq_2(ldv_irq_2_2, ldv_irq_line_2_2, ldv_irq_data_2_2); goto ldv_59243; case 3: ldv_irq_2_0 = ldv_irq_2(ldv_irq_2_3, ldv_irq_line_2_3, ldv_irq_data_2_3); goto ldv_59243; default: ldv_stop(); } ldv_59243: ; return; } } void disable_work_9(struct work_struct *work ) { { if ((ldv_work_9_0 == 3 || ldv_work_9_0 == 2) && (unsigned long )ldv_work_struct_9_0 == (unsigned long )work) { ldv_work_9_0 = 1; } else { } if ((ldv_work_9_1 == 3 || ldv_work_9_1 == 2) && (unsigned long )ldv_work_struct_9_1 == (unsigned long )work) { ldv_work_9_1 = 1; } else { } if ((ldv_work_9_2 == 3 || ldv_work_9_2 == 2) && (unsigned long )ldv_work_struct_9_2 == (unsigned long )work) { ldv_work_9_2 = 1; } else { } if ((ldv_work_9_3 == 3 || ldv_work_9_3 == 2) && (unsigned long )ldv_work_struct_9_3 == (unsigned long )work) { ldv_work_9_3 = 1; } else { } return; } } void disable_suitable_irq_4(int line , void *data ) { { if (ldv_irq_4_0 != 0 && line == ldv_irq_line_4_0) { ldv_irq_4_0 = 0; return; } else { } if (ldv_irq_4_1 != 0 && line == ldv_irq_line_4_1) { ldv_irq_4_1 = 0; return; } else { } if (ldv_irq_4_2 != 0 && line == ldv_irq_line_4_2) { ldv_irq_4_2 = 0; return; } else { } if (ldv_irq_4_3 != 0 && line == ldv_irq_line_4_3) { ldv_irq_4_3 = 0; return; } else { } return; } } void disable_suitable_timer_14(struct timer_list *timer ) { { if (ldv_timer_14_0 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_14_0) { ldv_timer_14_0 = 0; return; } else { } if (ldv_timer_14_1 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_14_1) { ldv_timer_14_1 = 0; return; } else { } if (ldv_timer_14_2 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_14_2) { ldv_timer_14_2 = 0; return; } else { } if (ldv_timer_14_3 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_14_3) { ldv_timer_14_3 = 0; return; } else { } return; } } void disable_work_10(struct work_struct *work ) { { if ((ldv_work_10_0 == 3 || ldv_work_10_0 == 2) && (unsigned long )ldv_work_struct_10_0 == (unsigned long )work) { ldv_work_10_0 = 1; } else { } if ((ldv_work_10_1 == 3 || ldv_work_10_1 == 2) && (unsigned long )ldv_work_struct_10_1 == (unsigned long )work) { ldv_work_10_1 = 1; } else { } if ((ldv_work_10_2 == 3 || ldv_work_10_2 == 2) && (unsigned long )ldv_work_struct_10_2 == (unsigned long )work) { ldv_work_10_2 = 1; } else { } if ((ldv_work_10_3 == 3 || ldv_work_10_3 == 2) && (unsigned long )ldv_work_struct_10_3 == (unsigned long )work) { ldv_work_10_3 = 1; } else { } return; } } void ldv_pci_driver_22(void) { void *tmp ; { tmp = ldv_init_zalloc(2976UL); igb_driver_group1 = (struct pci_dev *)tmp; return; } } void activate_work_10(struct work_struct *work , int state ) { { if (ldv_work_10_0 == 0) { ldv_work_struct_10_0 = work; ldv_work_10_0 = state; return; } else { } if (ldv_work_10_1 == 0) { ldv_work_struct_10_1 = work; ldv_work_10_1 = state; return; } else { } if (ldv_work_10_2 == 0) { ldv_work_struct_10_2 = work; ldv_work_10_2 = state; return; } else { } if (ldv_work_10_3 == 0) { ldv_work_struct_10_3 = work; ldv_work_10_3 = state; return; } else { } return; } } void activate_suitable_timer_14(struct timer_list *timer , unsigned long data ) { { if (ldv_timer_14_0 == 0 || ldv_timer_14_0 == 2) { ldv_timer_list_14_0 = timer; ldv_timer_list_14_0->data = data; ldv_timer_14_0 = 1; return; } else { } if (ldv_timer_14_1 == 0 || ldv_timer_14_1 == 2) { ldv_timer_list_14_1 = timer; ldv_timer_list_14_1->data = data; ldv_timer_14_1 = 1; return; } else { } if (ldv_timer_14_2 == 0 || ldv_timer_14_2 == 2) { ldv_timer_list_14_2 = timer; ldv_timer_list_14_2->data = data; ldv_timer_14_2 = 1; return; } else { } if (ldv_timer_14_3 == 0 || ldv_timer_14_3 == 2) { ldv_timer_list_14_3 = timer; ldv_timer_list_14_3->data = data; ldv_timer_14_3 = 1; return; } else { } return; } } void timer_init_14(void) { { ldv_timer_14_0 = 0; ldv_timer_14_1 = 0; ldv_timer_14_2 = 0; ldv_timer_14_3 = 0; return; } } void disable_suitable_irq_3(int line , void *data ) { { if (ldv_irq_3_0 != 0 && line == ldv_irq_line_3_0) { ldv_irq_3_0 = 0; return; } else { } if (ldv_irq_3_1 != 0 && line == ldv_irq_line_3_1) { ldv_irq_3_1 = 0; return; } else { } if (ldv_irq_3_2 != 0 && line == ldv_irq_line_3_2) { ldv_irq_3_2 = 0; return; } else { } if (ldv_irq_3_3 != 0 && line == ldv_irq_line_3_3) { ldv_irq_3_3 = 0; return; } else { } return; } } int reg_timer_13(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) { { if ((unsigned long )function == (unsigned long )(& igb_watchdog)) { activate_suitable_timer_13(timer, data); } else { } return (0); } } void call_and_disable_all_10(int state ) { { if (ldv_work_10_0 == state) { call_and_disable_work_10(ldv_work_struct_10_0); } else { } if (ldv_work_10_1 == state) { call_and_disable_work_10(ldv_work_struct_10_1); } else { } if (ldv_work_10_2 == state) { call_and_disable_work_10(ldv_work_struct_10_2); } else { } if (ldv_work_10_3 == state) { call_and_disable_work_10(ldv_work_struct_10_3); } else { } return; } } void choose_timer_14(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_timer_14_0 == 1) { ldv_timer_14_0 = 2; ldv_timer_14(ldv_timer_14_0, ldv_timer_list_14_0); } else { } goto ldv_59292; case 1: ; if (ldv_timer_14_1 == 1) { ldv_timer_14_1 = 2; ldv_timer_14(ldv_timer_14_1, ldv_timer_list_14_1); } else { } goto ldv_59292; case 2: ; if (ldv_timer_14_2 == 1) { ldv_timer_14_2 = 2; ldv_timer_14(ldv_timer_14_2, ldv_timer_list_14_2); } else { } goto ldv_59292; case 3: ; if (ldv_timer_14_3 == 1) { ldv_timer_14_3 = 2; ldv_timer_14(ldv_timer_14_3, ldv_timer_list_14_3); } else { } goto ldv_59292; default: ldv_stop(); } ldv_59292: ; return; } } int ldv_irq_1(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = igb_msix_other(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_59304; default: ldv_stop(); } ldv_59304: ; } else { } return (state); } } void choose_interrupt_3(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_3_0 = ldv_irq_3(ldv_irq_3_0, ldv_irq_line_3_0, ldv_irq_data_3_0); goto ldv_59312; case 1: ldv_irq_3_0 = ldv_irq_3(ldv_irq_3_1, ldv_irq_line_3_1, ldv_irq_data_3_1); goto ldv_59312; case 2: ldv_irq_3_0 = ldv_irq_3(ldv_irq_3_2, ldv_irq_line_3_2, ldv_irq_data_3_2); goto ldv_59312; case 3: ldv_irq_3_0 = ldv_irq_3(ldv_irq_3_3, ldv_irq_line_3_3, ldv_irq_data_3_3); goto ldv_59312; default: ldv_stop(); } ldv_59312: ; return; } } void ldv_main_exported_19(void) ; void ldv_main_exported_18(void) ; void ldv_main_exported_16(void) ; void ldv_main_exported_17(void) ; void ldv_main_exported_15(void) ; int main(void) { int ldvarg1 ; void *ldvarg4 ; void *tmp ; int ldvarg3 ; void *ldvarg0 ; void *tmp___0 ; void *ldvarg5 ; void *tmp___1 ; void *ldvarg2 ; void *tmp___2 ; struct pci_device_id *ldvarg13 ; void *tmp___3 ; int ldvarg12 ; enum pci_channel_state ldvarg18 ; struct sk_buff *ldvarg39 ; void *tmp___4 ; u16 ldvarg32 ; u8 ldvarg31 ; int ldvarg41 ; __be16 ldvarg43 ; u16 ldvarg42 ; int ldvarg49 ; int ldvarg50 ; int ldvarg46 ; bool ldvarg37 ; netdev_features_t ldvarg36 ; int ldvarg29 ; struct ifla_vf_info *ldvarg40 ; void *tmp___5 ; u16 ldvarg44 ; __be16 ldvarg45 ; u8 *ldvarg26 ; void *tmp___6 ; int ldvarg27 ; netdev_features_t ldvarg35 ; int ldvarg38 ; netdev_features_t ldvarg30 ; int ldvarg33 ; int ldvarg48 ; struct rtnl_link_stats64 *ldvarg25 ; void *tmp___7 ; void *ldvarg28 ; void *tmp___8 ; struct sk_buff *ldvarg34 ; void *tmp___9 ; struct ifreq *ldvarg47 ; void *tmp___10 ; unsigned long ldvarg54 ; void *ldvarg53 ; void *tmp___11 ; struct notifier_block *ldvarg52 ; void *tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; { tmp = ldv_init_zalloc(1UL); ldvarg4 = tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg0 = tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg5 = tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg2 = tmp___2; tmp___3 = ldv_init_zalloc(32UL); ldvarg13 = (struct pci_device_id *)tmp___3; tmp___4 = ldv_init_zalloc(232UL); ldvarg39 = (struct sk_buff *)tmp___4; tmp___5 = ldv_init_zalloc(64UL); ldvarg40 = (struct ifla_vf_info *)tmp___5; tmp___6 = ldv_init_zalloc(1UL); ldvarg26 = (u8 *)tmp___6; tmp___7 = ldv_init_zalloc(184UL); ldvarg25 = (struct rtnl_link_stats64 *)tmp___7; tmp___8 = ldv_init_zalloc(1UL); ldvarg28 = tmp___8; tmp___9 = ldv_init_zalloc(232UL); ldvarg34 = (struct sk_buff *)tmp___9; tmp___10 = ldv_init_zalloc(40UL); ldvarg47 = (struct ifreq *)tmp___10; tmp___11 = ldv_init_zalloc(1UL); ldvarg53 = tmp___11; tmp___12 = ldv_init_zalloc(24UL); ldvarg52 = (struct notifier_block *)tmp___12; ldv_initialize(); ldv_memset((void *)(& ldvarg1), 0, 4UL); ldv_memset((void *)(& ldvarg3), 0, 4UL); ldv_memset((void *)(& ldvarg12), 0, 4UL); ldv_memset((void *)(& ldvarg18), 0, 4UL); ldv_memset((void *)(& ldvarg32), 0, 2UL); ldv_memset((void *)(& ldvarg31), 0, 1UL); ldv_memset((void *)(& ldvarg41), 0, 4UL); ldv_memset((void *)(& ldvarg43), 0, 2UL); ldv_memset((void *)(& ldvarg42), 0, 2UL); ldv_memset((void *)(& ldvarg49), 0, 4UL); ldv_memset((void *)(& ldvarg50), 0, 4UL); ldv_memset((void *)(& ldvarg46), 0, 4UL); ldv_memset((void *)(& ldvarg37), 0, 1UL); ldv_memset((void *)(& ldvarg36), 0, 8UL); ldv_memset((void *)(& ldvarg29), 0, 4UL); ldv_memset((void *)(& ldvarg44), 0, 2UL); ldv_memset((void *)(& ldvarg45), 0, 2UL); ldv_memset((void *)(& ldvarg27), 0, 4UL); ldv_memset((void *)(& ldvarg35), 0, 8UL); ldv_memset((void *)(& ldvarg38), 0, 4UL); ldv_memset((void *)(& ldvarg30), 0, 8UL); ldv_memset((void *)(& ldvarg33), 0, 4UL); ldv_memset((void *)(& ldvarg48), 0, 4UL); ldv_memset((void *)(& ldvarg54), 0, 8UL); work_init_11(); ldv_state_variable_11 = 1; ldv_state_variable_21 = 0; ldv_state_variable_7 = 1; ldv_state_variable_17 = 0; ldv_state_variable_2 = 1; ldv_state_variable_22 = 0; ldv_state_variable_1 = 1; ldv_state_variable_18 = 0; ref_cnt = 0; ldv_state_variable_0 = 1; ldv_state_variable_23 = 0; ldv_state_variable_16 = 0; timer_init_13(); ldv_state_variable_13 = 1; ldv_state_variable_25 = 0; ldv_state_variable_6 = 1; ldv_state_variable_3 = 1; work_init_9(); ldv_state_variable_9 = 1; work_init_12(); ldv_state_variable_12 = 1; ldv_state_variable_20 = 0; timer_init_14(); ldv_state_variable_14 = 1; ldv_state_variable_15 = 0; ldv_state_variable_8 = 1; ldv_state_variable_4 = 1; ldv_state_variable_24 = 0; ldv_state_variable_19 = 0; work_init_10(); ldv_state_variable_10 = 1; ldv_state_variable_5 = 1; ldv_59488: tmp___13 = __VERIFIER_nondet_int(); switch (tmp___13) { case 0: ; goto ldv_59383; case 1: ; if (ldv_state_variable_21 != 0) { tmp___14 = __VERIFIER_nondet_int(); switch (tmp___14) { case 0: ; if (ldv_state_variable_21 == 1) { igb_get_i2c_clk(ldvarg5); ldv_state_variable_21 = 1; } else { } goto ldv_59386; case 1: ; if (ldv_state_variable_21 == 1) { igb_get_i2c_data(ldvarg4); ldv_state_variable_21 = 1; } else { } goto ldv_59386; case 2: ; if (ldv_state_variable_21 == 1) { igb_set_i2c_data(ldvarg2, ldvarg3); ldv_state_variable_21 = 1; } else { } goto ldv_59386; case 3: ; if (ldv_state_variable_21 == 1) { igb_set_i2c_clk(ldvarg0, ldvarg1); ldv_state_variable_21 = 1; } else { } goto ldv_59386; default: ldv_stop(); } ldv_59386: ; } else { } goto ldv_59383; case 2: ; goto ldv_59383; case 3: ; if (ldv_state_variable_17 != 0) { ldv_main_exported_17(); } else { } goto ldv_59383; case 4: ; if (ldv_state_variable_2 != 0) { choose_interrupt_2(); } else { } goto ldv_59383; case 5: ; if (ldv_state_variable_22 != 0) { tmp___15 = __VERIFIER_nondet_int(); switch (tmp___15) { case 0: ; if (ldv_state_variable_22 == 1) { ldv_retval_0 = igb_probe(igb_driver_group1, (struct pci_device_id const *)ldvarg13); if (ldv_retval_0 == 0) { ldv_state_variable_22 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_59396; case 1: ; if (ldv_state_variable_22 == 2) { igb_shutdown(igb_driver_group1); ldv_state_variable_22 = 2; } else { } goto ldv_59396; case 2: ; if (ldv_state_variable_22 == 1) { igb_pci_sriov_configure(igb_driver_group1, ldvarg12); ldv_state_variable_22 = 1; } else { } if (ldv_state_variable_22 == 2) { igb_pci_sriov_configure(igb_driver_group1, ldvarg12); ldv_state_variable_22 = 2; } else { } goto ldv_59396; case 3: ; if (ldv_state_variable_22 == 2) { igb_remove(igb_driver_group1); ldv_state_variable_22 = 1; } else { } goto ldv_59396; default: ldv_stop(); } ldv_59396: ; } else { } goto ldv_59383; case 6: ; if (ldv_state_variable_1 != 0) { choose_interrupt_1(); } else { } goto ldv_59383; case 7: ; if (ldv_state_variable_18 != 0) { ldv_main_exported_18(); } else { } goto ldv_59383; case 8: ; if (ldv_state_variable_0 != 0) { tmp___16 = __VERIFIER_nondet_int(); switch (tmp___16) { case 0: ; if (ldv_state_variable_0 == 3 && ref_cnt == 0) { igb_exit_module(); ldv_state_variable_0 = 2; goto ldv_final; } else { } goto ldv_59406; case 1: ; if (ldv_state_variable_0 == 1) { ldv_retval_1 = igb_init_module(); if (ldv_retval_1 == 0) { ldv_state_variable_0 = 3; ldv_state_variable_23 = 1; ldv_initialize_pci_error_handlers_23(); ldv_state_variable_19 = 1; ldv_initialize_ethtool_ops_19(); ldv_state_variable_16 = 1; ldv_initialize_e1000_nvm_operations_16(); ldv_state_variable_24 = 1; ldv_state_variable_18 = 1; ldv_initialize_e1000_mac_operations_18(); ldv_state_variable_15 = 1; ldv_state_variable_17 = 1; ldv_initialize_e1000_phy_operations_17(); ldv_state_variable_21 = 1; ldv_state_variable_25 = 1; ldv_dev_pm_ops_25(); } else { } if (ldv_retval_1 != 0) { ldv_state_variable_0 = 2; goto ldv_final; } else { } } else { } goto ldv_59406; default: ldv_stop(); } ldv_59406: ; } else { } goto ldv_59383; case 9: ; if (ldv_state_variable_23 != 0) { tmp___17 = __VERIFIER_nondet_int(); switch (tmp___17) { case 0: ; if (ldv_state_variable_23 == 1) { igb_io_slot_reset(igb_err_handler_group0); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 3) { igb_io_slot_reset(igb_err_handler_group0); ldv_state_variable_23 = 3; } else { } if (ldv_state_variable_23 == 2) { igb_io_slot_reset(igb_err_handler_group0); ldv_state_variable_23 = 2; } else { } goto ldv_59411; case 1: ; if (ldv_state_variable_23 == 1) { igb_io_error_detected(igb_err_handler_group0, (pci_channel_state_t )ldvarg18); ldv_state_variable_23 = 1; } else { } if (ldv_state_variable_23 == 3) { igb_io_error_detected(igb_err_handler_group0, (pci_channel_state_t )ldvarg18); ldv_state_variable_23 = 3; } else { } if (ldv_state_variable_23 == 2) { igb_io_error_detected(igb_err_handler_group0, (pci_channel_state_t )ldvarg18); ldv_state_variable_23 = 2; } else { } goto ldv_59411; case 2: ; if (ldv_state_variable_23 == 3) { igb_io_resume(igb_err_handler_group0); ldv_state_variable_23 = 2; } else { } goto ldv_59411; case 3: ; if (ldv_state_variable_23 == 2) { ldv_suspend_23(); ldv_state_variable_23 = 3; } else { } goto ldv_59411; case 4: ; if (ldv_state_variable_23 == 3) { ldv_release_23(); ldv_state_variable_23 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_23 == 2) { ldv_release_23(); ldv_state_variable_23 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_59411; case 5: ; if (ldv_state_variable_23 == 1) { ldv_probe_23(); ldv_state_variable_23 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_59411; default: ldv_stop(); } ldv_59411: ; } else { } goto ldv_59383; case 10: ; if (ldv_state_variable_16 != 0) { ldv_main_exported_16(); } else { } goto ldv_59383; case 11: ; if (ldv_state_variable_13 != 0) { choose_timer_13(); } else { } goto ldv_59383; case 12: ; if (ldv_state_variable_25 != 0) { tmp___18 = __VERIFIER_nondet_int(); switch (tmp___18) { case 0: ; if (ldv_state_variable_25 == 15) { ldv_retval_22 = igb_resume(igb_pm_ops_group1); if (ldv_retval_22 == 0) { ldv_state_variable_25 = 16; } else { } } else { } goto ldv_59422; case 1: ; if (ldv_state_variable_25 == 2) { igb_runtime_idle(igb_pm_ops_group1); ldv_state_variable_25 = 2; } else { } if (ldv_state_variable_25 == 1) { igb_runtime_idle(igb_pm_ops_group1); ldv_state_variable_25 = 1; } else { } goto ldv_59422; case 2: ; if (ldv_state_variable_25 == 2) { ldv_retval_21 = igb_runtime_resume(igb_pm_ops_group1); if (ldv_retval_21 == 0) { ldv_state_variable_25 = 1; ref_cnt = ref_cnt - 1; } else { } } else { } goto ldv_59422; case 3: ; if (ldv_state_variable_25 == 3) { ldv_retval_20 = igb_suspend(igb_pm_ops_group1); if (ldv_retval_20 == 0) { ldv_state_variable_25 = 4; } else { } } else { } goto ldv_59422; case 4: ; if (ldv_state_variable_25 == 14) { ldv_retval_19 = igb_resume(igb_pm_ops_group1); if (ldv_retval_19 == 0) { ldv_state_variable_25 = 16; } else { } } else { } goto ldv_59422; case 5: ; if (ldv_state_variable_25 == 1) { ldv_retval_18 = igb_runtime_suspend(igb_pm_ops_group1); if (ldv_retval_18 == 0) { ldv_state_variable_25 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_59422; case 6: ; if (ldv_state_variable_25 == 3) { ldv_retval_17 = igb_suspend(igb_pm_ops_group1); if (ldv_retval_17 == 0) { ldv_state_variable_25 = 5; } else { } } else { } goto ldv_59422; case 7: ; if (ldv_state_variable_25 == 3) { ldv_retval_16 = igb_suspend(igb_pm_ops_group1); if (ldv_retval_16 == 0) { ldv_state_variable_25 = 6; } else { } } else { } goto ldv_59422; case 8: ; if (ldv_state_variable_25 == 13) { ldv_retval_15 = igb_resume(igb_pm_ops_group1); if (ldv_retval_15 == 0) { ldv_state_variable_25 = 16; } else { } } else { } goto ldv_59422; case 9: ; if (ldv_state_variable_25 == 4) { ldv_retval_14 = ldv_suspend_late_25(); if (ldv_retval_14 == 0) { ldv_state_variable_25 = 7; } else { } } else { } goto ldv_59422; case 10: ; if (ldv_state_variable_25 == 10) { ldv_retval_13 = ldv_restore_early_25(); if (ldv_retval_13 == 0) { ldv_state_variable_25 = 14; } else { } } else { } goto ldv_59422; case 11: ; if (ldv_state_variable_25 == 7) { ldv_retval_12 = ldv_resume_early_25(); if (ldv_retval_12 == 0) { ldv_state_variable_25 = 13; } else { } } else { } goto ldv_59422; case 12: ; if (ldv_state_variable_25 == 12) { ldv_retval_11 = ldv_thaw_early_25(); if (ldv_retval_11 == 0) { ldv_state_variable_25 = 15; } else { } } else { } goto ldv_59422; case 13: ; if (ldv_state_variable_25 == 8) { ldv_retval_10 = ldv_resume_noirq_25(); if (ldv_retval_10 == 0) { ldv_state_variable_25 = 13; } else { } } else { } goto ldv_59422; case 14: ; if (ldv_state_variable_25 == 6) { ldv_retval_9 = ldv_freeze_noirq_25(); if (ldv_retval_9 == 0) { ldv_state_variable_25 = 11; } else { } } else { } goto ldv_59422; case 15: ; if (ldv_state_variable_25 == 1) { ldv_retval_8 = ldv_prepare_25(); if (ldv_retval_8 == 0) { ldv_state_variable_25 = 3; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_59422; case 16: ; if (ldv_state_variable_25 == 6) { ldv_retval_7 = ldv_freeze_late_25(); if (ldv_retval_7 == 0) { ldv_state_variable_25 = 12; } else { } } else { } goto ldv_59422; case 17: ; if (ldv_state_variable_25 == 11) { ldv_retval_6 = ldv_thaw_noirq_25(); if (ldv_retval_6 == 0) { ldv_state_variable_25 = 15; } else { } } else { } goto ldv_59422; case 18: ; if (ldv_state_variable_25 == 5) { ldv_retval_5 = ldv_poweroff_noirq_25(); if (ldv_retval_5 == 0) { ldv_state_variable_25 = 9; } else { } } else { } goto ldv_59422; case 19: ; if (ldv_state_variable_25 == 5) { ldv_retval_4 = ldv_poweroff_late_25(); if (ldv_retval_4 == 0) { ldv_state_variable_25 = 10; } else { } } else { } goto ldv_59422; case 20: ; if (ldv_state_variable_25 == 9) { ldv_retval_3 = ldv_restore_noirq_25(); if (ldv_retval_3 == 0) { ldv_state_variable_25 = 14; } else { } } else { } goto ldv_59422; case 21: ; if (ldv_state_variable_25 == 4) { ldv_retval_2 = ldv_suspend_noirq_25(); if (ldv_retval_2 == 0) { ldv_state_variable_25 = 8; } else { } } else { } goto ldv_59422; case 22: ; if (ldv_state_variable_25 == 16) { ldv_complete_25(); ldv_state_variable_25 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_59422; default: ldv_stop(); } ldv_59422: ; } else { } goto ldv_59383; case 13: ; goto ldv_59383; case 14: ; if (ldv_state_variable_3 != 0) { choose_interrupt_3(); } else { } goto ldv_59383; case 15: ; if (ldv_state_variable_9 != 0) { invoke_work_9(); } else { } goto ldv_59383; case 16: ; goto ldv_59383; case 17: ; if (ldv_state_variable_20 != 0) { tmp___19 = __VERIFIER_nondet_int(); switch (tmp___19) { case 0: ; if (ldv_state_variable_20 == 2) { igb_ndo_set_vf_bw(igb_netdev_ops_group1, ldvarg50, ldvarg49, ldvarg48); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_ndo_set_vf_bw(igb_netdev_ops_group1, ldvarg50, ldvarg49, ldvarg48); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_ndo_set_vf_bw(igb_netdev_ops_group1, ldvarg50, ldvarg49, ldvarg48); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 1: ; if (ldv_state_variable_20 == 2) { igb_ioctl(igb_netdev_ops_group1, ldvarg47, ldvarg46); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_ioctl(igb_netdev_ops_group1, ldvarg47, ldvarg46); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_ioctl(igb_netdev_ops_group1, ldvarg47, ldvarg46); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 2: ; if (ldv_state_variable_20 == 2) { igb_vlan_rx_kill_vid(igb_netdev_ops_group1, (int )ldvarg45, (int )ldvarg44); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_vlan_rx_kill_vid(igb_netdev_ops_group1, (int )ldvarg45, (int )ldvarg44); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_vlan_rx_kill_vid(igb_netdev_ops_group1, (int )ldvarg45, (int )ldvarg44); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 3: ; if (ldv_state_variable_20 == 2) { igb_vlan_rx_add_vid(igb_netdev_ops_group1, (int )ldvarg43, (int )ldvarg42); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_vlan_rx_add_vid(igb_netdev_ops_group1, (int )ldvarg43, (int )ldvarg42); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_vlan_rx_add_vid(igb_netdev_ops_group1, (int )ldvarg43, (int )ldvarg42); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 4: ; if (ldv_state_variable_20 == 2) { igb_ndo_get_vf_config(igb_netdev_ops_group1, ldvarg41, ldvarg40); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_ndo_get_vf_config(igb_netdev_ops_group1, ldvarg41, ldvarg40); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_ndo_get_vf_config(igb_netdev_ops_group1, ldvarg41, ldvarg40); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 5: ; if (ldv_state_variable_20 == 2) { ldv_retval_24 = igb_open(igb_netdev_ops_group1); if (ldv_retval_24 == 0) { ldv_state_variable_20 = 3; } else { } } else { } goto ldv_59452; case 6: ; if (ldv_state_variable_20 == 3) { igb_xmit_frame(ldvarg39, igb_netdev_ops_group1); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 7: ; if (ldv_state_variable_20 == 2) { igb_ndo_set_vf_spoofchk(igb_netdev_ops_group1, ldvarg38, (int )ldvarg37); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_ndo_set_vf_spoofchk(igb_netdev_ops_group1, ldvarg38, (int )ldvarg37); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_ndo_set_vf_spoofchk(igb_netdev_ops_group1, ldvarg38, (int )ldvarg37); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 8: ; if (ldv_state_variable_20 == 2) { igb_fix_features(igb_netdev_ops_group1, ldvarg36); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_fix_features(igb_netdev_ops_group1, ldvarg36); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_fix_features(igb_netdev_ops_group1, ldvarg36); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 9: ; if (ldv_state_variable_20 == 3) { igb_close(igb_netdev_ops_group1); ldv_state_variable_20 = 2; } else { } goto ldv_59452; case 10: ; if (ldv_state_variable_20 == 2) { igb_set_rx_mode(igb_netdev_ops_group1); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_set_rx_mode(igb_netdev_ops_group1); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_set_rx_mode(igb_netdev_ops_group1); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 11: ; if (ldv_state_variable_20 == 2) { eth_validate_addr(igb_netdev_ops_group1); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { eth_validate_addr(igb_netdev_ops_group1); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { eth_validate_addr(igb_netdev_ops_group1); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 12: ; if (ldv_state_variable_20 == 2) { passthru_features_check(ldvarg34, igb_netdev_ops_group1, ldvarg35); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { passthru_features_check(ldvarg34, igb_netdev_ops_group1, ldvarg35); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { passthru_features_check(ldvarg34, igb_netdev_ops_group1, ldvarg35); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 13: ; if (ldv_state_variable_20 == 2) { igb_ndo_set_vf_vlan(igb_netdev_ops_group1, ldvarg33, (int )ldvarg32, (int )ldvarg31); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_ndo_set_vf_vlan(igb_netdev_ops_group1, ldvarg33, (int )ldvarg32, (int )ldvarg31); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_ndo_set_vf_vlan(igb_netdev_ops_group1, ldvarg33, (int )ldvarg32, (int )ldvarg31); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 14: ; if (ldv_state_variable_20 == 2) { igb_netpoll(igb_netdev_ops_group1); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_netpoll(igb_netdev_ops_group1); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_netpoll(igb_netdev_ops_group1); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 15: ; if (ldv_state_variable_20 == 2) { igb_set_features(igb_netdev_ops_group1, ldvarg30); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_set_features(igb_netdev_ops_group1, ldvarg30); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_set_features(igb_netdev_ops_group1, ldvarg30); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 16: ; if (ldv_state_variable_20 == 2) { igb_change_mtu(igb_netdev_ops_group1, ldvarg29); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 3) { igb_change_mtu(igb_netdev_ops_group1, ldvarg29); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 17: ; if (ldv_state_variable_20 == 2) { igb_set_mac(igb_netdev_ops_group1, ldvarg28); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_set_mac(igb_netdev_ops_group1, ldvarg28); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_set_mac(igb_netdev_ops_group1, ldvarg28); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 18: ; if (ldv_state_variable_20 == 2) { igb_ndo_set_vf_mac(igb_netdev_ops_group1, ldvarg27, ldvarg26); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_ndo_set_vf_mac(igb_netdev_ops_group1, ldvarg27, ldvarg26); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_ndo_set_vf_mac(igb_netdev_ops_group1, ldvarg27, ldvarg26); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 19: ; if (ldv_state_variable_20 == 2) { igb_get_stats64(igb_netdev_ops_group1, ldvarg25); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_get_stats64(igb_netdev_ops_group1, ldvarg25); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_get_stats64(igb_netdev_ops_group1, ldvarg25); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 20: ; if (ldv_state_variable_20 == 2) { igb_tx_timeout(igb_netdev_ops_group1); ldv_state_variable_20 = 2; } else { } if (ldv_state_variable_20 == 1) { igb_tx_timeout(igb_netdev_ops_group1); ldv_state_variable_20 = 1; } else { } if (ldv_state_variable_20 == 3) { igb_tx_timeout(igb_netdev_ops_group1); ldv_state_variable_20 = 3; } else { } goto ldv_59452; case 21: ; if (ldv_state_variable_20 == 1) { ldv_retval_23 = ldv_ndo_init_20(); if (ldv_retval_23 == 0) { ldv_state_variable_20 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_59452; case 22: ; if (ldv_state_variable_20 == 2) { ldv_ndo_uninit_20(); ldv_state_variable_20 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_59452; default: ldv_stop(); } ldv_59452: ; } else { } goto ldv_59383; case 18: ; if (ldv_state_variable_14 != 0) { choose_timer_14(); } else { } goto ldv_59383; case 19: ; if (ldv_state_variable_15 != 0) { ldv_main_exported_15(); } else { } goto ldv_59383; case 20: ; goto ldv_59383; case 21: ; if (ldv_state_variable_4 != 0) { choose_interrupt_4(); } else { } goto ldv_59383; case 22: ; if (ldv_state_variable_24 != 0) { tmp___20 = __VERIFIER_nondet_int(); switch (tmp___20) { case 0: ; if (ldv_state_variable_24 == 1) { igb_notify_dca(ldvarg52, ldvarg54, ldvarg53); ldv_state_variable_24 = 1; } else { } goto ldv_59482; default: ldv_stop(); } ldv_59482: ; } else { } goto ldv_59383; case 23: ; if (ldv_state_variable_19 != 0) { ldv_main_exported_19(); } else { } goto ldv_59383; case 24: ; if (ldv_state_variable_10 != 0) { invoke_work_10(); } else { } goto ldv_59383; case 25: ; goto ldv_59383; default: ldv_stop(); } ldv_59383: ; goto ldv_59488; ldv_final: ldv_check_final_state(); return 0; } } bool ldv_queue_work_on_5(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_6(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_7(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_8(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_9(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_10(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_11(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_12(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_13(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_14(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_15(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_16(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv___pci_register_driver_17(struct pci_driver *ldv_func_arg1 , struct module *ldv_func_arg2 , char const *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; { tmp = __pci_register_driver(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; ldv_state_variable_22 = 1; ldv_pci_driver_22(); return (ldv_func_res); } } void ldv_pci_unregister_driver_18(struct pci_driver *ldv_func_arg1 ) { { pci_unregister_driver(ldv_func_arg1); ldv_state_variable_22 = 0; return; } } __inline static int ldv_request_irq_19(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___5 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_7(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_7((int )irq, dev); } else { } return (ldv_func_res); } } __inline static int ldv_request_irq_20(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_7(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_7((int )irq, dev); } else { } return (ldv_func_res); } } void ldv_free_irq_21(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } void ldv_free_irq_22(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } __inline static int ldv_request_irq_23(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___7 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_7(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_7((int )irq, dev); } else { } return (ldv_func_res); } } __inline static int ldv_request_irq_24(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___8 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_7(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_7((int )irq, dev); } else { } return (ldv_func_res); } } void ldv_free_irq_25(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } void ldv_free_irq_26(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } void ldv_free_irq_27(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } int ldv_del_timer_sync_28(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___9 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_13(ldv_func_arg1); return (ldv_func_res); } } int ldv_del_timer_sync_29(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___10 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_13(ldv_func_arg1); return (ldv_func_res); } } int ldv_register_netdev_30(struct net_device *dev ) { ldv_func_ret_type___11 ldv_func_res ; int tmp ; { tmp = register_netdev(dev); ldv_func_res = tmp; ldv_state_variable_20 = 1; ldv_net_device_ops_20(); return (ldv_func_res); } } void ldv_free_netdev_31(struct net_device *dev ) { { free_netdev(dev); ldv_state_variable_20 = 0; return; } } int ldv_del_timer_sync_32(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___12 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_13(ldv_func_arg1); return (ldv_func_res); } } int ldv_del_timer_sync_33(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___13 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_13(ldv_func_arg1); return (ldv_func_res); } } bool ldv_cancel_work_sync_34(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___14 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_11(ldv_func_arg1); return (ldv_func_res); } } bool ldv_cancel_work_sync_35(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___15 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_11(ldv_func_arg1); return (ldv_func_res); } } void ldv_unregister_netdev_36(struct net_device *dev ) { { unregister_netdev(dev); ldv_state_variable_20 = 0; return; } } void ldv_free_netdev_37(struct net_device *dev ) { { free_netdev(dev); ldv_state_variable_20 = 0; return; } } int ldv_mod_timer_38(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___16 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_13(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_mod_timer_39(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___17 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_13(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_mod_timer_40(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___18 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_13(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_mod_timer_41(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___19 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_13(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_mod_timer_42(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___20 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_13(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_mod_timer_43(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___21 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_13(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_mod_timer_44(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___22 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_13(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static __u16 __swab16p(__u16 const *p ) { __u16 tmp ; { tmp = __fswab16((int )*p); return (tmp); } } __inline static void __swab16s(__u16 *p ) { { *p = __swab16p((__u16 const *)p); return; } } extern void __dynamic_netdev_dbg(struct _ddebug * , struct net_device const * , char const * , ...) ; extern void __might_sleep(char const * , int , int ) ; int ldv_mutex_trylock_97(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_95(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_98(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_99(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_94(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_96(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_100(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_89(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_91(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_90(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_93(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_92(struct workqueue_struct *ldv_func_arg1 ) ; int ldv_irq_6(int state , int line , void *data ) ; void choose_interrupt_8(void) ; void activate_suitable_irq_6(int line , void *data ) ; int reg_check_8(irqreturn_t (*handler)(int , void * ) ) ; void activate_suitable_irq_8(int line , void *data ) ; void choose_interrupt_5(void) ; void disable_suitable_irq_5(int line , void *data ) ; int ldv_irq_5(int state , int line , void *data ) ; int reg_check_6(irqreturn_t (*handler)(int , void * ) ) ; void choose_interrupt_6(void) ; void disable_suitable_irq_6(int line , void *data ) ; void activate_suitable_irq_5(int line , void *data ) ; int ldv_irq_8(int state , int line , void *data ) ; void disable_suitable_irq_8(int line , void *data ) ; int ldv_irq_7(int state , int line , void *data ) ; void choose_interrupt_7(void) ; int reg_check_5(irqreturn_t (*handler)(int , void * ) ) ; extern void *vmalloc(unsigned long ) ; extern unsigned long msleep_interruptible(unsigned int ) ; extern void debug_dma_sync_single_for_cpu(struct device * , dma_addr_t , size_t , int ) ; extern void debug_dma_sync_single_for_device(struct device * , dma_addr_t , size_t , int ) ; __inline static void dma_sync_single_for_cpu(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (108), "i" (12UL)); ldv_31107: ; goto ldv_31107; } else { } if ((unsigned long )ops->sync_single_for_cpu != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction ))0)) { (*(ops->sync_single_for_cpu))(dev, addr, size, dir); } else { } debug_dma_sync_single_for_cpu(dev, addr, size, (int )dir); return; } } __inline static void dma_sync_single_for_device(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (120), "i" (12UL)); ldv_31115: ; goto ldv_31115; } else { } if ((unsigned long )ops->sync_single_for_device != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction ))0)) { (*(ops->sync_single_for_device))(dev, addr, size, dir); } else { } debug_dma_sync_single_for_device(dev, addr, size, (int )dir); return; } } extern void kfree_skb(struct sk_buff * ) ; extern struct sk_buff *__alloc_skb(unsigned int , gfp_t , int , int ) ; __inline static struct sk_buff *alloc_skb(unsigned int size , gfp_t priority ) { struct sk_buff *tmp ; { tmp = __alloc_skb(size, priority, 0, -1); return (tmp); } } extern unsigned char *skb_put(struct sk_buff * , unsigned int ) ; __inline static void ethtool_cmd_speed_set(struct ethtool_cmd *ep , __u32 speed ) { { ep->speed = (unsigned short )speed; ep->speed_hi = (unsigned short )(speed >> 16); return; } } __inline static __u32 ethtool_cmd_speed(struct ethtool_cmd const *ep ) { { return ((__u32 )(((int )ep->speed_hi << 16) | (int )ep->speed)); } } __inline static int ldv_request_irq_101(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_19(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_20(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_23(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; void ldv_free_irq_105(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; void ldv_free_irq_106(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; extern int dev_open(struct net_device * ) ; extern int dev_close(struct net_device * ) ; __inline static void *kmap(struct page *page ) { void *tmp ; { __might_sleep("include/linux/highmem.h", 58, 0); tmp = lowmem_page_address((struct page const *)page); return (tmp); } } __inline static void kunmap(struct page *page ) { { return; } } __inline static u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv ) { u32 adv ; { adv = 0U; if (((int )eee_adv & 2) != 0) { adv = adv | 8U; } else { } if (((int )eee_adv & 4) != 0) { adv = adv | 32U; } else { } if (((int )eee_adv & 8) != 0) { adv = adv | 4096U; } else { } if (((int )eee_adv & 16) != 0) { adv = adv | 131072U; } else { } if (((int )eee_adv & 32) != 0) { adv = adv | 262144U; } else { } if (((int )eee_adv & 64) != 0) { adv = adv | 524288U; } else { } return (adv); } } __inline static u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv ) { u16 reg ; { reg = 0U; if ((adv & 8U) != 0U) { reg = (u16 )((unsigned int )reg | 2U); } else { } if ((adv & 32U) != 0U) { reg = (u16 )((unsigned int )reg | 4U); } else { } if ((adv & 4096U) != 0U) { reg = (u16 )((unsigned int )reg | 8U); } else { } if ((adv & 131072U) != 0U) { reg = (u16 )((unsigned int )reg | 16U); } else { } if ((adv & 262144U) != 0U) { reg = (u16 )((unsigned int )reg | 32U); } else { } if ((adv & 524288U) != 0U) { reg = (u16 )((unsigned int )reg | 64U); } else { } return (reg); } } s32 igb_phy_sw_reset(struct e1000_hw *hw ) ; s32 igb_read_phy_reg_i2c(struct e1000_hw *hw , u32 offset , u16 *data ) ; s32 igb_read_xmdio_reg(struct e1000_hw *hw , u16 addr , u8 dev_addr , u16 *data ) ; s32 igb_blink_led(struct e1000_hw *hw ) ; s32 igb_cleanup_led(struct e1000_hw *hw ) ; s32 igb_led_off(struct e1000_hw *hw ) ; s32 igb_read_emi_reg(struct e1000_hw *hw , u16 addr , u16 *data ) ; s32 igb_get_eee_status_i354(struct e1000_hw *hw , bool *status ) ; extern int ptp_clock_index(struct ptp_clock * ) ; __inline static s32 igb_write_phy_reg(struct e1000_hw *hw , u32 offset , u16 data ) { s32 tmp ; { if ((unsigned long )hw->phy.ops.write_reg != (unsigned long )((s32 (*)(struct e1000_hw * , u32 , u16 ))0)) { tmp = (*(hw->phy.ops.write_reg))(hw, offset, (int )data); return (tmp); } else { } return (0); } } static struct igb_stats const igb_gstrings_stats[41U] = { {{'r', 'x', '_', 'p', 'a', 'c', 'k', 'e', 't', 's', '\000'}, 8, 3144}, {{'t', 'x', '_', 'p', 'a', 'c', 'k', 'e', 't', 's', '\000'}, 8, 3168}, {{'r', 'x', '_', 'b', 'y', 't', 'e', 's', '\000'}, 8, 3176}, {{'t', 'x', '_', 'b', 'y', 't', 'e', 's', '\000'}, 8, 3184}, {{'r', 'x', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a', 's', 't', '\000'}, 8, 3152}, {{'t', 'x', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a', 's', 't', '\000'}, 8, 3344}, {{'r', 'x', '_', 'm', 'u', 'l', 't', 'i', 'c', 'a', 's', 't', '\000'}, 8, 3160}, {{'t', 'x', '_', 'm', 'u', 'l', 't', 'i', 'c', 'a', 's', 't', '\000'}, 8, 3336}, {{'m', 'u', 'l', 't', 'i', 'c', 'a', 's', 't', '\000'}, 8, 3160}, {{'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', 's', '\000'}, 8, 3008}, {{'r', 'x', '_', 'c', 'r', 'c', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 2936}, {{'r', 'x', '_', 'n', 'o', '_', 'b', 'u', 'f', 'f', 'e', 'r', '_', 'c', 'o', 'u', 'n', 't', '\000'}, 8, 3192}, {{'r', 'x', '_', 'm', 'i', 's', 's', 'e', 'd', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 2968}, {{'t', 'x', '_', 'a', 'b', 'o', 'r', 't', 'e', 'd', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 2984}, {{'t', 'x', '_', 'c', 'a', 'r', 'r', 'i', 'e', 'r', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 3024}, {{'t', 'x', '_', 'w', 'i', 'n', 'd', 'o', 'w', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 3000}, {{'t', 'x', '_', 'a', 'b', 'o', 'r', 't', '_', 'l', 'a', 't', 'e', '_', 'c', 'o', 'l', 'l', '\000'}, 8, 3000}, {{'t', 'x', '_', 'd', 'e', 'f', 'e', 'r', 'r', 'e', 'd', '_', 'o', 'k', '\000'}, 8, 3016}, {{'t', 'x', '_', 's', 'i', 'n', 'g', 'l', 'e', '_', 'c', 'o', 'l', 'l', '_', 'o', 'k', '\000'}, 8, 2976}, {{'t', 'x', '_', 'm', 'u', 'l', 't', 'i', '_', 'c', 'o', 'l', 'l', '_', 'o', 'k', '\000'}, 8, 2992}, {{'t', 'x', '_', 't', 'i', 'm', 'e', 'o', 'u', 't', '_', 'c', 'o', 'u', 'n', 't', '\000'}, 4, 632}, {{'r', 'x', '_', 'l', 'o', 'n', 'g', '_', 'l', 'e', 'n', 'g', 't', 'h', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 3216}, {{'r', 'x', '_', 's', 'h', 'o', 'r', 't', '_', 'l', 'e', 'n', 'g', 't', 'h', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 3200}, {{'r', 'x', '_', 'a', 'l', 'i', 'g', 'n', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 2944}, {{'t', 'x', '_', 't', 'c', 'p', '_', 's', 'e', 'g', '_', 'g', 'o', 'o', 'd', '\000'}, 8, 3352}, {{'t', 'x', '_', 't', 'c', 'p', '_', 's', 'e', 'g', '_', 'f', 'a', 'i', 'l', 'e', 'd', '\000'}, 8, 3360}, {{'r', 'x', '_', 'f', 'l', 'o', 'w', '_', 'c', 'o', 'n', 't', 'r', 'o', 'l', '_', 'x', 'o', 'n', '\000'}, 8, 3056}, {{'r', 'x', '_', 'f', 'l', 'o', 'w', '_', 'c', 'o', 'n', 't', 'r', 'o', 'l', '_', 'x', 'o', 'f', 'f', '\000'}, 8, 3072}, {{'t', 'x', '_', 'f', 'l', 'o', 'w', '_', 'c', 'o', 'n', 't', 'r', 'o', 'l', '_', 'x', 'o', 'n', '\000'}, 8, 3064}, {{'t', 'x', '_', 'f', 'l', 'o', 'w', '_', 'c', 'o', 'n', 't', 'r', 'o', 'l', '_', 'x', 'o', 'f', 'f', '\000'}, 8, 3080}, {{'r', 'x', '_', 'l', 'o', 'n', 'g', '_', 'b', 'y', 't', 'e', '_', 'c', 'o', 'u', 'n', 't', '\000'}, 8, 3176}, {{'t', 'x', '_', 'd', 'm', 'a', '_', 'o', 'u', 't', '_', 'o', 'f', '_', 's', 'y', 'n', 'c', '\000'}, 8, 3536}, {{'t', 'x', '_', 's', 'm', 'b', 'u', 's', '\000'}, 8, 3248}, {{'r', 'x', '_', 's', 'm', 'b', 'u', 's', '\000'}, 8, 3232}, {{'d', 'r', 'o', 'p', 'p', 'e', 'd', '_', 's', 'm', 'b', 'u', 's', '\000'}, 8, 3240}, {{'o', 's', '2', 'b', 'm', 'c', '_', 'r', 'x', '_', 'b', 'y', '_', 'b', 'm', 'c', '\000'}, 8, 3544}, {{'o', 's', '2', 'b', 'm', 'c', '_', 't', 'x', '_', 'b', 'y', '_', 'b', 'm', 'c', '\000'}, 8, 3560}, {{'o', 's', '2', 'b', 'm', 'c', '_', 't', 'x', '_', 'b', 'y', '_', 'h', 'o', 's', 't', '\000'}, 8, 3552}, {{'o', 's', '2', 'b', 'm', 'c', '_', 'r', 'x', '_', 'b', 'y', '_', 'h', 'o', 's', 't', '\000'}, 8, 3568}, {{'t', 'x', '_', 'h', 'w', 't', 's', 't', 'a', 'm', 'p', '_', 't', 'i', 'm', 'e', 'o', 'u', 't', 's', '\000'}, 4, 13008}, {{'r', 'x', '_', 'h', 'w', 't', 's', 't', 'a', 'm', 'p', '_', 'c', 'l', 'e', 'a', 'r', 'e', 'd', '\000'}, 4, 13012}}; static struct igb_stats const igb_gstrings_net_stats[9U] = { {{'r', 'x', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 32}, {{'t', 'x', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 40}, {{'t', 'x', '_', 'd', 'r', 'o', 'p', 'p', 'e', 'd', '\000'}, 8, 56}, {{'r', 'x', '_', 'l', 'e', 'n', 'g', 't', 'h', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 80}, {{'r', 'x', '_', 'o', 'v', 'e', 'r', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 88}, {{'r', 'x', '_', 'f', 'r', 'a', 'm', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 104}, {{'r', 'x', '_', 'f', 'i', 'f', 'o', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 112}, {{'t', 'x', '_', 'f', 'i', 'f', 'o', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 144}, {{'t', 'x', '_', 'h', 'e', 'a', 'r', 't', 'b', 'e', 'a', 't', '_', 'e', 'r', 'r', 'o', 'r', 's', '\000'}, 8, 152}}; static char const igb_gstrings_test[5U][32U] = { { 'R', 'e', 'g', 'i', 's', 't', 'e', 'r', ' ', 't', 'e', 's', 't', ' ', ' ', '(', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}, { 'E', 'e', 'p', 'r', 'o', 'm', ' ', 't', 'e', 's', 't', ' ', ' ', ' ', ' ', '(', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}, { 'I', 'n', 't', 'e', 'r', 'r', 'u', 'p', 't', ' ', 't', 'e', 's', 't', ' ', '(', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}, { 'L', 'o', 'o', 'p', 'b', 'a', 'c', 'k', ' ', 't', 'e', 's', 't', ' ', ' ', '(', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}, { 'L', 'i', 'n', 'k', ' ', 't', 'e', 's', 't', ' ', ' ', ' ', '(', 'o', 'n', '/', 'o', 'f', 'f', 'l', 'i', 'n', 'e', ')', '\000'}}; static int igb_get_settings(struct net_device *netdev , struct ethtool_cmd *ecmd ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; struct e1000_dev_spec_82575 *dev_spec ; struct e1000_sfp_flags *eth_flags ; u32 status ; u32 speed ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; dev_spec = & hw->dev_spec._82575; eth_flags = & dev_spec->eth_flags; status = igb_rd32(hw, 8U); if ((unsigned int )hw->phy.media_type == 1U) { ecmd->supported = 8431U; ecmd->advertising = 128U; if ((int )hw->mac.autoneg) { ecmd->advertising = ecmd->advertising | 64U; ecmd->advertising = ecmd->advertising | (__u32 )hw->phy.autoneg_advertised; } else { } ecmd->port = 0U; ecmd->phy_address = (__u8 )hw->phy.addr; ecmd->transceiver = 0U; } else { ecmd->supported = 140352U; ecmd->advertising = 132096U; if ((unsigned int )hw->mac.type == 5U) { if ((unsigned int )hw->device_id == 8005U && (status & 8192U) == 0U) { ecmd->supported = ecmd->supported | 32768U; ecmd->supported = ecmd->supported & 4294836223U; ecmd->advertising = ecmd->advertising | 32768U; ecmd->advertising = ecmd->advertising & 4294836223U; } else { } } else { } if ((unsigned int )*((unsigned char *)eth_flags + 0UL) != 0U) { ecmd->supported = ecmd->supported | 8U; ecmd->advertising = ecmd->advertising | 8U; } else { } if ((int )hw->mac.autoneg) { ecmd->advertising = ecmd->advertising | 64U; } else { } ecmd->port = 3U; ecmd->transceiver = 1U; } if (! hw->mac.autoneg) { ecmd->advertising = ecmd->advertising & 4294942719U; } else { } switch ((unsigned int )hw->fc.requested_mode) { case 3U: ecmd->advertising = ecmd->advertising | 8192U; goto ldv_47966; case 1U: ecmd->advertising = ecmd->advertising | 24576U; goto ldv_47966; case 2U: ecmd->advertising = ecmd->advertising | 16384U; goto ldv_47966; default: ecmd->advertising = ecmd->advertising & 4294942719U; } ldv_47966: ; if ((status & 2U) != 0U) { if ((status & 4096U) != 0U && (status & 8192U) == 0U) { speed = 2500U; } else if ((status & 128U) != 0U) { speed = 1000U; } else if ((status & 64U) != 0U) { speed = 100U; } else { speed = 10U; } if ((int )status & 1 || (unsigned int )hw->phy.media_type != 1U) { ecmd->duplex = 1U; } else { ecmd->duplex = 0U; } } else { speed = 4294967295U; ecmd->duplex = 255U; } ethtool_cmd_speed_set(ecmd, speed); if ((unsigned int )hw->phy.media_type == 2U || (int )hw->mac.autoneg) { ecmd->autoneg = 1U; } else { ecmd->autoneg = 0U; } if ((unsigned int )hw->phy.media_type == 1U) { ecmd->eth_tp_mdix = (int )hw->phy.is_mdix ? 2U : 1U; } else { ecmd->eth_tp_mdix = 0U; } if ((unsigned int )hw->phy.mdix == 0U) { ecmd->eth_tp_mdix_ctrl = 3U; } else { ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; } return (0); } } static int igb_set_settings(struct net_device *netdev , struct ethtool_cmd *ecmd ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; s32 tmp___0 ; int tmp___1 ; u32 speed ; __u32 tmp___2 ; int tmp___3 ; bool tmp___4 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; tmp___0 = igb_check_reset_block(hw); if (tmp___0 != 0) { dev_err((struct device const *)(& (adapter->pdev)->dev), "Cannot change link characteristics when SoL/IDER is active.\n"); return (-22); } else { } if ((unsigned int )ecmd->eth_tp_mdix_ctrl != 0U) { if ((unsigned int )hw->phy.media_type != 1U) { return (-95); } else { } if ((unsigned int )ecmd->eth_tp_mdix_ctrl != 3U && (unsigned int )ecmd->autoneg != 1U) { dev_err((struct device const *)(& (adapter->pdev)->dev), "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); return (-22); } else { } } else { } goto ldv_47977; ldv_47976: usleep_range(1000UL, 2000UL); ldv_47977: tmp___1 = test_and_set_bit(1L, (unsigned long volatile *)(& adapter->state)); if (tmp___1 != 0) { goto ldv_47976; } else { } if ((unsigned int )ecmd->autoneg == 1U) { hw->mac.autoneg = 1; if ((unsigned int )hw->phy.media_type == 2U) { hw->phy.autoneg_advertised = (unsigned int )((u16 )ecmd->advertising) | 1088U; switch ((int )adapter->link_speed) { case 2500: hw->phy.autoneg_advertised = 32768U; goto ldv_47980; case 1000: hw->phy.autoneg_advertised = 32U; goto ldv_47980; case 100: hw->phy.autoneg_advertised = 8U; goto ldv_47980; default: ; goto ldv_47980; } ldv_47980: ; } else { hw->phy.autoneg_advertised = (unsigned int )((u16 )ecmd->advertising) | 192U; } ecmd->advertising = (__u32 )hw->phy.autoneg_advertised; if ((int )adapter->fc_autoneg) { hw->fc.requested_mode = 255; } else { } } else { tmp___2 = ethtool_cmd_speed((struct ethtool_cmd const *)ecmd); speed = tmp___2; tmp___3 = igb_set_spd_dplx(adapter, speed, (int )ecmd->duplex); if (tmp___3 != 0) { clear_bit(1L, (unsigned long volatile *)(& adapter->state)); return (-22); } else { } } if ((unsigned int )ecmd->eth_tp_mdix_ctrl != 0U) { if ((unsigned int )ecmd->eth_tp_mdix_ctrl == 3U) { hw->phy.mdix = 0U; } else { hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; } } else { } tmp___4 = netif_running((struct net_device const *)adapter->netdev); if ((int )tmp___4) { igb_down(adapter); igb_up(adapter); } else { igb_reset(adapter); } clear_bit(1L, (unsigned long volatile *)(& adapter->state)); return (0); } } static u32 igb_get_link(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_mac_info *mac ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; mac = & adapter->hw.mac; tmp___0 = netif_carrier_ok((struct net_device const *)netdev); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { mac->get_link_status = 1; } else { } tmp___2 = igb_has_link(adapter); return ((u32 )tmp___2); } } static void igb_get_pauseparam(struct net_device *netdev , struct ethtool_pauseparam *pause ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; pause->autoneg = (__u32 )adapter->fc_autoneg; if ((unsigned int )hw->fc.current_mode == 1U) { pause->rx_pause = 1U; } else if ((unsigned int )hw->fc.current_mode == 2U) { pause->tx_pause = 1U; } else if ((unsigned int )hw->fc.current_mode == 3U) { pause->rx_pause = 1U; pause->tx_pause = 1U; } else { } return; } } static int igb_set_pauseparam(struct net_device *netdev , struct ethtool_pauseparam *pause ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; int retval ; int tmp___0 ; bool tmp___1 ; s32 tmp___2 ; s32 tmp___3 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; retval = 0; if ((unsigned int )*((unsigned char *)hw + 1156UL) != 0U) { return (-22); } else { } adapter->fc_autoneg = pause->autoneg != 0U; goto ldv_48004; ldv_48003: usleep_range(1000UL, 2000UL); ldv_48004: tmp___0 = test_and_set_bit(1L, (unsigned long volatile *)(& adapter->state)); if (tmp___0 != 0) { goto ldv_48003; } else { } if ((int )adapter->fc_autoneg) { hw->fc.requested_mode = 255; tmp___1 = netif_running((struct net_device const *)adapter->netdev); if ((int )tmp___1) { igb_down(adapter); igb_up(adapter); } else { igb_reset(adapter); } } else { if (pause->rx_pause != 0U && pause->tx_pause != 0U) { hw->fc.requested_mode = 3; } else if (pause->rx_pause != 0U && pause->tx_pause == 0U) { hw->fc.requested_mode = 1; } else if (pause->rx_pause == 0U && pause->tx_pause != 0U) { hw->fc.requested_mode = 2; } else if (pause->rx_pause == 0U && pause->tx_pause == 0U) { hw->fc.requested_mode = 0; } else { } hw->fc.current_mode = hw->fc.requested_mode; if ((unsigned int )hw->phy.media_type == 1U) { tmp___2 = igb_force_mac_fc(hw); retval = tmp___2; } else { tmp___3 = igb_setup_link(hw); retval = tmp___3; } } clear_bit(1L, (unsigned long volatile *)(& adapter->state)); return (retval); } } static u32 igb_get_msglevel(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; return ((u32 )adapter->msg_enable); } } static void igb_set_msglevel(struct net_device *netdev , u32 data ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; adapter->msg_enable = (int )data; return; } } static int igb_get_regs_len(struct net_device *netdev ) { { return (2956); } } static void igb_get_regs(struct net_device *netdev , struct ethtool_regs *regs , void *p ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; u32 *regs_buff ; u8 i ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; regs_buff = (u32 *)p; memset(p, 0, 2956UL); regs->version = (__u32 )((((int )hw->revision_id << 16) | 16777216) | (int )hw->device_id); *regs_buff = igb_rd32(hw, 0U); *(regs_buff + 1UL) = igb_rd32(hw, 8U); *(regs_buff + 2UL) = igb_rd32(hw, 24U); *(regs_buff + 3UL) = igb_rd32(hw, 32U); *(regs_buff + 4UL) = igb_rd32(hw, 36U); *(regs_buff + 5UL) = igb_rd32(hw, 52U); *(regs_buff + 6UL) = igb_rd32(hw, 56U); *(regs_buff + 7UL) = igb_rd32(hw, 3584U); *(regs_buff + 8UL) = igb_rd32(hw, 4096U); *(regs_buff + 9UL) = igb_rd32(hw, 4104U); *(regs_buff + 10UL) = igb_rd32(hw, 4168U); *(regs_buff + 11UL) = igb_rd32(hw, 4172U); *(regs_buff + 12UL) = igb_rd32(hw, 16U); *(regs_buff + 13UL) = igb_rd32(hw, 5408U); *(regs_buff + 14UL) = igb_rd32(hw, 5408U); *(regs_buff + 15UL) = igb_rd32(hw, 5412U); *(regs_buff + 16UL) = igb_rd32(hw, 5416U); *(regs_buff + 17UL) = igb_rd32(hw, 5420U); *(regs_buff + 18UL) = igb_rd32(hw, 5424U); *(regs_buff + 19UL) = igb_rd32(hw, 200U); *(regs_buff + 20UL) = igb_rd32(hw, 200U); *(regs_buff + 21UL) = igb_rd32(hw, 208U); *(regs_buff + 22UL) = igb_rd32(hw, 216U); *(regs_buff + 23UL) = igb_rd32(hw, 16640U); *(regs_buff + 24UL) = igb_rd32(hw, 224U); *(regs_buff + 25UL) = igb_rd32(hw, 23232U); *(regs_buff + 26UL) = igb_rd32(hw, 40U); *(regs_buff + 27UL) = igb_rd32(hw, 44U); *(regs_buff + 28UL) = igb_rd32(hw, 368U); *(regs_buff + 29UL) = igb_rd32(hw, 8544U); *(regs_buff + 30UL) = igb_rd32(hw, 8552U); *(regs_buff + 31UL) = igb_rd32(hw, 9312U); *(regs_buff + 32UL) = igb_rd32(hw, 256U); *(regs_buff + 33UL) = igb_rd32(hw, 20480U); *(regs_buff + 34UL) = igb_rd32(hw, 20484U); *(regs_buff + 35UL) = igb_rd32(hw, 20488U); *(regs_buff + 36UL) = igb_rd32(hw, 22552U); *(regs_buff + 37UL) = igb_rd32(hw, 22556U); *(regs_buff + 38UL) = igb_rd32(hw, 1024U); *(regs_buff + 39UL) = igb_rd32(hw, 1028U); *(regs_buff + 40UL) = igb_rd32(hw, 1040U); *(regs_buff + 41UL) = igb_rd32(hw, 13712U); *(regs_buff + 42UL) = igb_rd32(hw, 22528U); *(regs_buff + 43UL) = igb_rd32(hw, 22536U); *(regs_buff + 44UL) = igb_rd32(hw, 22544U); *(regs_buff + 45UL) = igb_rd32(hw, 22584U); *(regs_buff + 46UL) = igb_rd32(hw, 22784U); *(regs_buff + 47UL) = igb_rd32(hw, 16896U); *(regs_buff + 48UL) = igb_rd32(hw, 16904U); *(regs_buff + 49UL) = igb_rd32(hw, 16908U); *(regs_buff + 50UL) = igb_rd32(hw, 16920U); *(regs_buff + 51UL) = igb_rd32(hw, 16924U); *(regs_buff + 52UL) = igb_rd32(hw, 16928U); *(regs_buff + 53UL) = igb_rd32(hw, 16932U); *(regs_buff + 54UL) = (u32 )adapter->stats.crcerrs; *(regs_buff + 55UL) = (u32 )adapter->stats.algnerrc; *(regs_buff + 56UL) = (u32 )adapter->stats.symerrs; *(regs_buff + 57UL) = (u32 )adapter->stats.rxerrc; *(regs_buff + 58UL) = (u32 )adapter->stats.mpc; *(regs_buff + 59UL) = (u32 )adapter->stats.scc; *(regs_buff + 60UL) = (u32 )adapter->stats.ecol; *(regs_buff + 61UL) = (u32 )adapter->stats.mcc; *(regs_buff + 62UL) = (u32 )adapter->stats.latecol; *(regs_buff + 63UL) = (u32 )adapter->stats.colc; *(regs_buff + 64UL) = (u32 )adapter->stats.dc; *(regs_buff + 65UL) = (u32 )adapter->stats.tncrs; *(regs_buff + 66UL) = (u32 )adapter->stats.sec; *(regs_buff + 67UL) = (u32 )adapter->stats.htdpmc; *(regs_buff + 68UL) = (u32 )adapter->stats.rlec; *(regs_buff + 69UL) = (u32 )adapter->stats.xonrxc; *(regs_buff + 70UL) = (u32 )adapter->stats.xontxc; *(regs_buff + 71UL) = (u32 )adapter->stats.xoffrxc; *(regs_buff + 72UL) = (u32 )adapter->stats.xofftxc; *(regs_buff + 73UL) = (u32 )adapter->stats.fcruc; *(regs_buff + 74UL) = (u32 )adapter->stats.prc64; *(regs_buff + 75UL) = (u32 )adapter->stats.prc127; *(regs_buff + 76UL) = (u32 )adapter->stats.prc255; *(regs_buff + 77UL) = (u32 )adapter->stats.prc511; *(regs_buff + 78UL) = (u32 )adapter->stats.prc1023; *(regs_buff + 79UL) = (u32 )adapter->stats.prc1522; *(regs_buff + 80UL) = (u32 )adapter->stats.gprc; *(regs_buff + 81UL) = (u32 )adapter->stats.bprc; *(regs_buff + 82UL) = (u32 )adapter->stats.mprc; *(regs_buff + 83UL) = (u32 )adapter->stats.gptc; *(regs_buff + 84UL) = (u32 )adapter->stats.gorc; *(regs_buff + 86UL) = (u32 )adapter->stats.gotc; *(regs_buff + 88UL) = (u32 )adapter->stats.rnbc; *(regs_buff + 89UL) = (u32 )adapter->stats.ruc; *(regs_buff + 90UL) = (u32 )adapter->stats.rfc; *(regs_buff + 91UL) = (u32 )adapter->stats.roc; *(regs_buff + 92UL) = (u32 )adapter->stats.rjc; *(regs_buff + 93UL) = (u32 )adapter->stats.mgprc; *(regs_buff + 94UL) = (u32 )adapter->stats.mgpdc; *(regs_buff + 95UL) = (u32 )adapter->stats.mgptc; *(regs_buff + 96UL) = (u32 )adapter->stats.tor; *(regs_buff + 98UL) = (u32 )adapter->stats.tot; *(regs_buff + 100UL) = (u32 )adapter->stats.tpr; *(regs_buff + 101UL) = (u32 )adapter->stats.tpt; *(regs_buff + 102UL) = (u32 )adapter->stats.ptc64; *(regs_buff + 103UL) = (u32 )adapter->stats.ptc127; *(regs_buff + 104UL) = (u32 )adapter->stats.ptc255; *(regs_buff + 105UL) = (u32 )adapter->stats.ptc511; *(regs_buff + 106UL) = (u32 )adapter->stats.ptc1023; *(regs_buff + 107UL) = (u32 )adapter->stats.ptc1522; *(regs_buff + 108UL) = (u32 )adapter->stats.mptc; *(regs_buff + 109UL) = (u32 )adapter->stats.bptc; *(regs_buff + 110UL) = (u32 )adapter->stats.tsctc; *(regs_buff + 111UL) = (u32 )adapter->stats.iac; *(regs_buff + 112UL) = (u32 )adapter->stats.rpthc; *(regs_buff + 113UL) = (u32 )adapter->stats.hgptc; *(regs_buff + 114UL) = (u32 )adapter->stats.hgorc; *(regs_buff + 116UL) = (u32 )adapter->stats.hgotc; *(regs_buff + 118UL) = (u32 )adapter->stats.lenerrs; *(regs_buff + 119UL) = (u32 )adapter->stats.scvpc; *(regs_buff + 120UL) = (u32 )adapter->stats.hrmpc; i = 0U; goto ldv_48028; ldv_48027: *(regs_buff + (unsigned long )((int )i + 121)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 10252 : (int )i * 64 + 49164)); i = (u8 )((int )i + 1); ldv_48028: ; if ((unsigned int )i <= 3U) { goto ldv_48027; } else { } i = 0U; goto ldv_48031; ldv_48030: *(regs_buff + (unsigned long )((int )i + 125)) = igb_rd32(hw, (u32 )(((int )i + 5408) * 4)); i = (u8 )((int )i + 1); ldv_48031: ; if ((unsigned int )i <= 3U) { goto ldv_48030; } else { } i = 0U; goto ldv_48034; ldv_48033: *(regs_buff + (unsigned long )((int )i + 129)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? ((int )i + 40) * 256 : ((int )i + 768) * 64)); i = (u8 )((int )i + 1); ldv_48034: ; if ((unsigned int )i <= 3U) { goto ldv_48033; } else { } i = 0U; goto ldv_48037; ldv_48036: *(regs_buff + (unsigned long )((int )i + 133)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 10244 : (int )i * 64 + 49156)); i = (u8 )((int )i + 1); ldv_48037: ; if ((unsigned int )i <= 3U) { goto ldv_48036; } else { } i = 0U; goto ldv_48040; ldv_48039: *(regs_buff + (unsigned long )((int )i + 137)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 10248 : (int )i * 64 + 49160)); i = (u8 )((int )i + 1); ldv_48040: ; if ((unsigned int )i <= 3U) { goto ldv_48039; } else { } i = 0U; goto ldv_48043; ldv_48042: *(regs_buff + (unsigned long )((int )i + 141)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 10256 : (int )i * 64 + 49168)); i = (u8 )((int )i + 1); ldv_48043: ; if ((unsigned int )i <= 3U) { goto ldv_48042; } else { } i = 0U; goto ldv_48046; ldv_48045: *(regs_buff + (unsigned long )((int )i + 145)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 10264 : (int )i * 64 + 49176)); i = (u8 )((int )i + 1); ldv_48046: ; if ((unsigned int )i <= 3U) { goto ldv_48045; } else { } i = 0U; goto ldv_48049; ldv_48048: *(regs_buff + (unsigned long )((int )i + 149)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 10280 : (int )i * 64 + 49192)); i = (u8 )((int )i + 1); ldv_48049: ; if ((unsigned int )i <= 3U) { goto ldv_48048; } else { } i = 0U; goto ldv_48052; ldv_48051: *(regs_buff + (unsigned long )((int )i + 153)) = igb_rd32(hw, (u32 )(((int )i + 1440) * 4)); i = (u8 )((int )i + 1); ldv_48052: ; if ((unsigned int )i <= 9U) { goto ldv_48051; } else { } i = 0U; goto ldv_48055; ldv_48054: *(regs_buff + (unsigned long )((int )i + 163)) = igb_rd32(hw, (u32 )(((int )i + 5792) * 4)); i = (u8 )((int )i + 1); ldv_48055: ; if ((unsigned int )i <= 7U) { goto ldv_48054; } else { } i = 0U; goto ldv_48058; ldv_48057: *(regs_buff + (unsigned long )((int )i + 171)) = igb_rd32(hw, (u32 )(((int )i + 5800) * 4)); i = (u8 )((int )i + 1); ldv_48058: ; if ((unsigned int )i <= 7U) { goto ldv_48057; } else { } i = 0U; goto ldv_48061; ldv_48060: *(regs_buff + (unsigned long )((int )i + 179)) = igb_rd32(hw, (u32 )((unsigned int )i <= 15U ? ((int )i + 2688) * 8 : ((int )i + 2700) * 8)); i = (u8 )((int )i + 1); ldv_48061: ; if ((unsigned int )i <= 15U) { goto ldv_48060; } else { } i = 0U; goto ldv_48064; ldv_48063: *(regs_buff + (unsigned long )((int )i + 195)) = igb_rd32(hw, (u32 )((unsigned int )i <= 15U ? (int )i * 8 + 21508 : ((int )i + -16) * 8 + 21732)); i = (u8 )((int )i + 1); ldv_48064: ; if ((unsigned int )i <= 15U) { goto ldv_48063; } else { } i = 0U; goto ldv_48067; ldv_48066: *(regs_buff + (unsigned long )((int )i + 211)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? ((int )i + 56) * 256 : ((int )i + 896) * 64)); i = (u8 )((int )i + 1); ldv_48067: ; if ((unsigned int )i <= 3U) { goto ldv_48066; } else { } i = 0U; goto ldv_48070; ldv_48069: *(regs_buff + (unsigned long )((int )i + 215)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 14340 : (int )i * 64 + 57348)); i = (u8 )((int )i + 1); ldv_48070: ; if ((unsigned int )i <= 3U) { goto ldv_48069; } else { } i = 0U; goto ldv_48073; ldv_48072: *(regs_buff + (unsigned long )((int )i + 219)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 14344 : (int )i * 64 + 57352)); i = (u8 )((int )i + 1); ldv_48073: ; if ((unsigned int )i <= 3U) { goto ldv_48072; } else { } i = 0U; goto ldv_48076; ldv_48075: *(regs_buff + (unsigned long )((int )i + 223)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 14352 : (int )i * 64 + 57360)); i = (u8 )((int )i + 1); ldv_48076: ; if ((unsigned int )i <= 3U) { goto ldv_48075; } else { } i = 0U; goto ldv_48079; ldv_48078: *(regs_buff + (unsigned long )((int )i + 227)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 14360 : (int )i * 64 + 57368)); i = (u8 )((int )i + 1); ldv_48079: ; if ((unsigned int )i <= 3U) { goto ldv_48078; } else { } i = 0U; goto ldv_48082; ldv_48081: *(regs_buff + (unsigned long )((int )i + 231)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 14376 : (int )i * 64 + 57384)); i = (u8 )((int )i + 1); ldv_48082: ; if ((unsigned int )i <= 3U) { goto ldv_48081; } else { } i = 0U; goto ldv_48085; ldv_48084: *(regs_buff + (unsigned long )((int )i + 235)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 14392 : (int )i * 64 + 57400)); i = (u8 )((int )i + 1); ldv_48085: ; if ((unsigned int )i <= 3U) { goto ldv_48084; } else { } i = 0U; goto ldv_48088; ldv_48087: *(regs_buff + (unsigned long )((int )i + 239)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 14396 : (int )i * 64 + 57404)); i = (u8 )((int )i + 1); ldv_48088: ; if ((unsigned int )i <= 3U) { goto ldv_48087; } else { } i = 0U; goto ldv_48091; ldv_48090: *(regs_buff + (unsigned long )((int )i + 243)) = igb_rd32(hw, (u32 )((unsigned int )i <= 3U ? (int )i * 256 + 14356 : (int )i * 64 + 57364)); i = (u8 )((int )i + 1); ldv_48091: ; if ((unsigned int )i <= 3U) { goto ldv_48090; } else { } i = 0U; goto ldv_48094; ldv_48093: *(regs_buff + (unsigned long )((int )i + 247)) = igb_rd32(hw, (u32 )(((int )i + 2824) * 8)); i = (u8 )((int )i + 1); ldv_48094: ; if ((unsigned int )i <= 3U) { goto ldv_48093; } else { } i = 0U; goto ldv_48097; ldv_48096: *(regs_buff + (unsigned long )((int )i + 251)) = igb_rd32(hw, (u32 )(((int )i + 5664) * 4)); i = (u8 )((int )i + 1); ldv_48097: ; if ((unsigned int )i <= 3U) { goto ldv_48096; } else { } i = 0U; goto ldv_48100; ldv_48099: *(regs_buff + (unsigned long )((int )i + 255)) = igb_rd32(hw, (u32 )(((int )i + 5760) * 4)); i = (u8 )((int )i + 1); ldv_48100: ; if ((unsigned int )i <= 31U) { goto ldv_48099; } else { } i = 0U; goto ldv_48103; ldv_48102: *(regs_buff + (unsigned long )((int )i + 287)) = igb_rd32(hw, (u32 )(((int )i + 4608) * 8)); i = (u8 )((int )i + 1); ldv_48103: ; if ((int )((signed char )i) >= 0) { goto ldv_48102; } else { } i = 0U; goto ldv_48106; ldv_48105: *(regs_buff + (unsigned long )((int )i + 415)) = igb_rd32(hw, (u32 )(((int )i + 4864) * 8)); i = (u8 )((int )i + 1); ldv_48106: ; if ((int )((signed char )i) >= 0) { goto ldv_48105; } else { } i = 0U; goto ldv_48109; ldv_48108: *(regs_buff + (unsigned long )((int )i + 543)) = igb_rd32(hw, (u32 )(((int )i + 3040) * 8)); i = (u8 )((int )i + 1); ldv_48109: ; if ((unsigned int )i <= 3U) { goto ldv_48108; } else { } *(regs_buff + 547UL) = igb_rd32(hw, 13328U); *(regs_buff + 548UL) = igb_rd32(hw, 13336U); *(regs_buff + 549UL) = igb_rd32(hw, 13344U); *(regs_buff + 550UL) = igb_rd32(hw, 13360U); if ((unsigned int )hw->mac.type > 3U) { *(regs_buff + 551UL) = (u32 )adapter->stats.o2bgptc; *(regs_buff + 552UL) = (u32 )adapter->stats.b2ospc; *(regs_buff + 553UL) = (u32 )adapter->stats.o2bspc; *(regs_buff + 554UL) = (u32 )adapter->stats.b2ogprc; } else { } if ((unsigned int )hw->mac.type != 2U) { return; } else { } i = 0U; goto ldv_48112; ldv_48111: *(regs_buff + (unsigned long )((int )i + 555)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 10252 : ((int )i + 4) * 64 + 49164)); i = (u8 )((int )i + 1); ldv_48112: ; if ((unsigned int )i <= 11U) { goto ldv_48111; } else { } i = 0U; goto ldv_48115; ldv_48114: *(regs_buff + (unsigned long )((int )i + 567)) = igb_rd32(hw, (u32 )(((int )i + 5412) * 4)); i = (u8 )((int )i + 1); ldv_48115: ; if ((unsigned int )i <= 3U) { goto ldv_48114; } else { } i = 0U; goto ldv_48118; ldv_48117: *(regs_buff + (unsigned long )((int )i + 571)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 44) * 256 : ((int )i + 772) * 64)); i = (u8 )((int )i + 1); ldv_48118: ; if ((unsigned int )i <= 11U) { goto ldv_48117; } else { } i = 0U; goto ldv_48121; ldv_48120: *(regs_buff + (unsigned long )((int )i + 583)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 10244 : ((int )i + 4) * 64 + 49156)); i = (u8 )((int )i + 1); ldv_48121: ; if ((unsigned int )i <= 11U) { goto ldv_48120; } else { } i = 0U; goto ldv_48124; ldv_48123: *(regs_buff + (unsigned long )((int )i + 595)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 10248 : ((int )i + 4) * 64 + 49160)); i = (u8 )((int )i + 1); ldv_48124: ; if ((unsigned int )i <= 11U) { goto ldv_48123; } else { } i = 0U; goto ldv_48127; ldv_48126: *(regs_buff + (unsigned long )((int )i + 607)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 10256 : ((int )i + 4) * 64 + 49168)); i = (u8 )((int )i + 1); ldv_48127: ; if ((unsigned int )i <= 11U) { goto ldv_48126; } else { } i = 0U; goto ldv_48130; ldv_48129: *(regs_buff + (unsigned long )((int )i + 619)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 10264 : ((int )i + 4) * 64 + 49176)); i = (u8 )((int )i + 1); ldv_48130: ; if ((unsigned int )i <= 11U) { goto ldv_48129; } else { } i = 0U; goto ldv_48133; ldv_48132: *(regs_buff + (unsigned long )((int )i + 631)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 10280 : ((int )i + 4) * 64 + 49192)); i = (u8 )((int )i + 1); ldv_48133: ; if ((unsigned int )i <= 11U) { goto ldv_48132; } else { } i = 0U; goto ldv_48136; ldv_48135: *(regs_buff + (unsigned long )((int )i + 643)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 60) * 256 : ((int )i + 900) * 64)); i = (u8 )((int )i + 1); ldv_48136: ; if ((unsigned int )i <= 11U) { goto ldv_48135; } else { } i = 0U; goto ldv_48139; ldv_48138: *(regs_buff + (unsigned long )((int )i + 655)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 14340 : ((int )i + 4) * 64 + 57348)); i = (u8 )((int )i + 1); ldv_48139: ; if ((unsigned int )i <= 11U) { goto ldv_48138; } else { } i = 0U; goto ldv_48142; ldv_48141: *(regs_buff + (unsigned long )((int )i + 667)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 14344 : ((int )i + 4) * 64 + 57352)); i = (u8 )((int )i + 1); ldv_48142: ; if ((unsigned int )i <= 11U) { goto ldv_48141; } else { } i = 0U; goto ldv_48145; ldv_48144: *(regs_buff + (unsigned long )((int )i + 679)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 14352 : ((int )i + 4) * 64 + 57360)); i = (u8 )((int )i + 1); ldv_48145: ; if ((unsigned int )i <= 11U) { goto ldv_48144; } else { } i = 0U; goto ldv_48148; ldv_48147: *(regs_buff + (unsigned long )((int )i + 691)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 14360 : ((int )i + 4) * 64 + 57368)); i = (u8 )((int )i + 1); ldv_48148: ; if ((unsigned int )i <= 11U) { goto ldv_48147; } else { } i = 0U; goto ldv_48151; ldv_48150: *(regs_buff + (unsigned long )((int )i + 703)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 14376 : ((int )i + 4) * 64 + 57384)); i = (u8 )((int )i + 1); ldv_48151: ; if ((unsigned int )i <= 11U) { goto ldv_48150; } else { } i = 0U; goto ldv_48154; ldv_48153: *(regs_buff + (unsigned long )((int )i + 715)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 14392 : ((int )i + 4) * 64 + 57400)); i = (u8 )((int )i + 1); ldv_48154: ; if ((unsigned int )i <= 11U) { goto ldv_48153; } else { } i = 0U; goto ldv_48157; ldv_48156: *(regs_buff + (unsigned long )((int )i + 727)) = igb_rd32(hw, (u32 )((int )i + 4 <= 3 ? ((int )i + 4) * 256 + 14396 : ((int )i + 4) * 64 + 57404)); i = (u8 )((int )i + 1); ldv_48157: ; if ((unsigned int )i <= 11U) { goto ldv_48156; } else { } return; } } static int igb_get_eeprom_len(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; return ((int )adapter->hw.nvm.word_size * 2); } } static int igb_get_eeprom(struct net_device *netdev , struct ethtool_eeprom *eeprom , u8 *bytes ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; u16 *eeprom_buff ; int first_word ; int last_word ; int ret_val ; u16 i ; void *tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; ret_val = 0; if (eeprom->len == 0U) { return (-22); } else { } eeprom->magic = (__u32 )((int )hw->vendor_id | ((int )hw->device_id << 16)); first_word = (int )(eeprom->offset >> 1); last_word = (int )(((eeprom->offset + eeprom->len) - 1U) >> 1); tmp___0 = kmalloc((unsigned long )((last_word - first_word) + 1) * 2UL, 208U); eeprom_buff = (u16 *)tmp___0; if ((unsigned long )eeprom_buff == (unsigned long )((u16 *)0U)) { return (-12); } else { } if ((unsigned int )hw->nvm.type == 2U) { ret_val = (*(hw->nvm.ops.read))(hw, (int )((u16 )first_word), (int )((unsigned int )((int )((u16 )last_word) - (int )((u16 )first_word)) + 1U), eeprom_buff); } else { i = 0U; goto ldv_48177; ldv_48176: ret_val = (*(hw->nvm.ops.read))(hw, (int )((u16 )first_word) + (int )i, 1, eeprom_buff + (unsigned long )i); if (ret_val != 0) { goto ldv_48175; } else { } i = (u16 )((int )i + 1); ldv_48177: ; if ((int )i < (last_word - first_word) + 1) { goto ldv_48176; } else { } ldv_48175: ; } i = 0U; goto ldv_48179; ldv_48178: i = (u16 )((int )i + 1); ldv_48179: ; if ((int )i < (last_word - first_word) + 1) { goto ldv_48178; } else { } memcpy((void *)bytes, (void const *)eeprom_buff + ((unsigned long )eeprom->offset & 1UL), (size_t )eeprom->len); kfree((void const *)eeprom_buff); return (ret_val); } } static int igb_set_eeprom(struct net_device *netdev , struct ethtool_eeprom *eeprom , u8 *bytes ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; u16 *eeprom_buff ; void *ptr ; int max_len ; int first_word ; int last_word ; int ret_val ; u16 i ; bool tmp___0 ; int tmp___1 ; void *tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; ret_val = 0; if (eeprom->len == 0U) { return (-95); } else { } if ((unsigned int )hw->mac.type > 5U) { tmp___0 = igb_get_flash_presence_i210(hw); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (-95); } else { } } else { } if (eeprom->magic != (__u32 )((int )hw->vendor_id | ((int )hw->device_id << 16))) { return (-14); } else { } max_len = (int )hw->nvm.word_size * 2; first_word = (int )(eeprom->offset >> 1); last_word = (int )(((eeprom->offset + eeprom->len) - 1U) >> 1); tmp___2 = kmalloc((size_t )max_len, 208U); eeprom_buff = (u16 *)tmp___2; if ((unsigned long )eeprom_buff == (unsigned long )((u16 *)0U)) { return (-12); } else { } ptr = (void *)eeprom_buff; if ((int )eeprom->offset & 1) { ret_val = (*(hw->nvm.ops.read))(hw, (int )((u16 )first_word), 1, eeprom_buff); ptr = ptr + 1; } else { } if ((int )(eeprom->offset + eeprom->len) & 1 && ret_val == 0) { ret_val = (*(hw->nvm.ops.read))(hw, (int )((u16 )last_word), 1, eeprom_buff + (unsigned long )(last_word - first_word)); } else { } i = 0U; goto ldv_48196; ldv_48195: i = (u16 )((int )i + 1); ldv_48196: ; if ((int )i < (last_word - first_word) + 1) { goto ldv_48195; } else { } memcpy(ptr, (void const *)bytes, (size_t )eeprom->len); i = 0U; goto ldv_48199; ldv_48198: *(eeprom_buff + (unsigned long )i) = *(eeprom_buff + (unsigned long )i); i = (u16 )((int )i + 1); ldv_48199: ; if ((int )i < (last_word - first_word) + 1) { goto ldv_48198; } else { } ret_val = (*(hw->nvm.ops.write))(hw, (int )((u16 )first_word), (int )((unsigned int )((int )((u16 )last_word) - (int )((u16 )first_word)) + 1U), eeprom_buff); if (ret_val == 0) { (*(hw->nvm.ops.update))(hw); } else { } igb_set_fw_version(adapter); kfree((void const *)eeprom_buff); return (ret_val); } } static void igb_get_drvinfo(struct net_device *netdev , struct ethtool_drvinfo *drvinfo ) { struct igb_adapter *adapter ; void *tmp ; char const *tmp___0 ; void *tmp___1 ; void *tmp___2 ; int tmp___3 ; int tmp___4 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; strlcpy((char *)(& drvinfo->driver), (char const *)(& igb_driver_name), 32UL); strlcpy((char *)(& drvinfo->version), (char const *)(& igb_driver_version), 32UL); strlcpy((char *)(& drvinfo->fw_version), (char const *)(& adapter->fw_version), 32UL); tmp___0 = pci_name((struct pci_dev const *)adapter->pdev); strlcpy((char *)(& drvinfo->bus_info), tmp___0, 32UL); tmp___1 = netdev_priv((struct net_device const *)netdev); tmp___2 = netdev_priv((struct net_device const *)netdev); drvinfo->n_stats = ((__u32 )((unsigned long )((struct igb_adapter *)tmp___1)->num_rx_queues) * 5U + (__u32 )(((struct igb_adapter *)tmp___2)->num_tx_queues * 3)) + 50U; drvinfo->testinfo_len = 5U; tmp___3 = igb_get_regs_len(netdev); drvinfo->regdump_len = (__u32 )tmp___3; tmp___4 = igb_get_eeprom_len(netdev); drvinfo->eedump_len = (__u32 )tmp___4; return; } } static void igb_get_ringparam(struct net_device *netdev , struct ethtool_ringparam *ring ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; ring->rx_max_pending = 4096U; ring->tx_max_pending = 4096U; ring->rx_pending = (__u32 )adapter->rx_ring_count; ring->tx_pending = (__u32 )adapter->tx_ring_count; return; } } static int igb_set_ringparam(struct net_device *netdev , struct ethtool_ringparam *ring ) { struct igb_adapter *adapter ; void *tmp ; struct igb_ring *temp_ring ; int i ; int err ; u16 new_rx_count ; u16 new_tx_count ; u32 __min1 ; u32 __min2 ; u16 __max1 ; u16 __max2 ; u32 __min1___0 ; u32 __min2___0 ; u16 __max1___0 ; u16 __max2___0 ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; void *tmp___3 ; void *tmp___4 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; err = 0; if (ring->rx_mini_pending != 0U || ring->rx_jumbo_pending != 0U) { return (-22); } else { } __min1 = ring->rx_pending; __min2 = 4096U; new_rx_count = (u16 )(__min1 < __min2 ? __min1 : __min2); __max1 = new_rx_count; __max2 = 80U; new_rx_count = (u16 )((int )__max1 > (int )__max2 ? __max1 : __max2); new_rx_count = (unsigned int )((u16 )((unsigned int )new_rx_count + 7U)) & 65528U; __min1___0 = ring->tx_pending; __min2___0 = 4096U; new_tx_count = (u16 )(__min1___0 < __min2___0 ? __min1___0 : __min2___0); __max1___0 = new_tx_count; __max2___0 = 80U; new_tx_count = (u16 )((int )__max1___0 > (int )__max2___0 ? __max1___0 : __max2___0); new_tx_count = (unsigned int )((u16 )((unsigned int )new_tx_count + 7U)) & 65528U; if ((int )adapter->tx_ring_count == (int )new_tx_count && (int )adapter->rx_ring_count == (int )new_rx_count) { return (0); } else { } goto ldv_48234; ldv_48233: usleep_range(1000UL, 2000UL); ldv_48234: tmp___0 = test_and_set_bit(1L, (unsigned long volatile *)(& adapter->state)); if (tmp___0 != 0) { goto ldv_48233; } else { } tmp___1 = netif_running((struct net_device const *)adapter->netdev); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { i = 0; goto ldv_48237; ldv_48236: (adapter->tx_ring[i])->count = new_tx_count; i = i + 1; ldv_48237: ; if (adapter->num_tx_queues > i) { goto ldv_48236; } else { } i = 0; goto ldv_48240; ldv_48239: (adapter->rx_ring[i])->count = new_rx_count; i = i + 1; ldv_48240: ; if (adapter->num_rx_queues > i) { goto ldv_48239; } else { } adapter->tx_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } else { } if (adapter->num_tx_queues > adapter->num_rx_queues) { tmp___3 = vmalloc((unsigned long )adapter->num_tx_queues * 4096UL); temp_ring = (struct igb_ring *)tmp___3; } else { tmp___4 = vmalloc((unsigned long )adapter->num_rx_queues * 4096UL); temp_ring = (struct igb_ring *)tmp___4; } if ((unsigned long )temp_ring == (unsigned long )((struct igb_ring *)0)) { err = -12; goto clear_reset; } else { } igb_down(adapter); if ((int )adapter->tx_ring_count != (int )new_tx_count) { i = 0; goto ldv_48248; ldv_48247: memcpy((void *)temp_ring + (unsigned long )i, (void const *)adapter->tx_ring[i], 4096UL); (temp_ring + (unsigned long )i)->count = new_tx_count; err = igb_setup_tx_resources(temp_ring + (unsigned long )i); if (err != 0) { goto ldv_48244; ldv_48243: i = i - 1; igb_free_tx_resources(temp_ring + (unsigned long )i); ldv_48244: ; if (i != 0) { goto ldv_48243; } else { } goto err_setup; } else { } i = i + 1; ldv_48248: ; if (adapter->num_tx_queues > i) { goto ldv_48247; } else { } i = 0; goto ldv_48251; ldv_48250: igb_free_tx_resources(adapter->tx_ring[i]); memcpy((void *)adapter->tx_ring[i], (void const *)temp_ring + (unsigned long )i, 4096UL); i = i + 1; ldv_48251: ; if (adapter->num_tx_queues > i) { goto ldv_48250; } else { } adapter->tx_ring_count = new_tx_count; } else { } if ((int )adapter->rx_ring_count != (int )new_rx_count) { i = 0; goto ldv_48257; ldv_48256: memcpy((void *)temp_ring + (unsigned long )i, (void const *)adapter->rx_ring[i], 4096UL); (temp_ring + (unsigned long )i)->count = new_rx_count; err = igb_setup_rx_resources(temp_ring + (unsigned long )i); if (err != 0) { goto ldv_48254; ldv_48253: i = i - 1; igb_free_rx_resources(temp_ring + (unsigned long )i); ldv_48254: ; if (i != 0) { goto ldv_48253; } else { } goto err_setup; } else { } i = i + 1; ldv_48257: ; if (adapter->num_rx_queues > i) { goto ldv_48256; } else { } i = 0; goto ldv_48260; ldv_48259: igb_free_rx_resources(adapter->rx_ring[i]); memcpy((void *)adapter->rx_ring[i], (void const *)temp_ring + (unsigned long )i, 4096UL); i = i + 1; ldv_48260: ; if (adapter->num_rx_queues > i) { goto ldv_48259; } else { } adapter->rx_ring_count = new_rx_count; } else { } err_setup: igb_up(adapter); vfree((void const *)temp_ring); clear_reset: clear_bit(1L, (unsigned long volatile *)(& adapter->state)); return (err); } } static struct igb_reg_test reg_test_i210[22U] = { {40U, 256U, 1U, 1U, 4294967295U, 4294967295U}, {44U, 256U, 1U, 1U, 65535U, 4294967295U}, {48U, 256U, 1U, 1U, 65535U, 4294967295U}, {10240U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {10244U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {10248U, 256U, 4U, 1U, 1048448U, 1048575U}, {10264U, 256U, 4U, 1U, 65535U, 65535U}, {8552U, 256U, 1U, 1U, 65520U, 65520U}, {368U, 256U, 1U, 1U, 65535U, 65535U}, {1040U, 256U, 1U, 1U, 1073741823U, 1073741823U}, {14336U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {14340U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {14344U, 256U, 4U, 1U, 1048448U, 1048575U}, {14360U, 256U, 4U, 1U, 65535U, 65535U}, {256U, 256U, 1U, 2U, 4294967295U, 0U}, {256U, 256U, 1U, 2U, 80720126U, 4194299U}, {256U, 256U, 1U, 2U, 80720126U, 4294967295U}, {1024U, 256U, 1U, 2U, 4294967295U, 0U}, {21504U, 0U, 16U, 5U, 4294967295U, 4294967295U}, {21504U, 0U, 16U, 6U, 2416967679U, 4294967295U}, {20992U, 0U, 128U, 4U, 4294967295U, 4294967295U}, {0U, 0U, 0U, 0U, 0U, 0U}}; static struct igb_reg_test reg_test_i350[33U] = { {40U, 256U, 1U, 1U, 4294967295U, 4294967295U}, {44U, 256U, 1U, 1U, 65535U, 4294967295U}, {48U, 256U, 1U, 1U, 65535U, 4294967295U}, {56U, 256U, 1U, 1U, 4294901760U, 4294901760U}, {10240U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {10244U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {10248U, 256U, 4U, 1U, 1048448U, 1048575U}, {49408U, 64U, 4U, 1U, 4294967168U, 4294967295U}, {49412U, 64U, 4U, 1U, 4294967295U, 4294967295U}, {49416U, 64U, 4U, 1U, 1048448U, 1048575U}, {10264U, 256U, 4U, 1U, 65535U, 65535U}, {49432U, 64U, 4U, 1U, 65535U, 65535U}, {8552U, 256U, 1U, 1U, 65520U, 65520U}, {368U, 256U, 1U, 1U, 65535U, 65535U}, {1040U, 256U, 1U, 1U, 1073741823U, 1073741823U}, {14336U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {14340U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {14344U, 256U, 4U, 1U, 1048448U, 1048575U}, {57600U, 64U, 4U, 1U, 4294967168U, 4294967295U}, {57604U, 64U, 4U, 1U, 4294967295U, 4294967295U}, {57608U, 64U, 4U, 1U, 1048448U, 1048575U}, {14360U, 256U, 4U, 1U, 65535U, 65535U}, {57624U, 64U, 4U, 1U, 65535U, 65535U}, {256U, 256U, 1U, 2U, 4294967295U, 0U}, {256U, 256U, 1U, 2U, 80720126U, 4194299U}, {256U, 256U, 1U, 2U, 80720126U, 4294967295U}, {1024U, 256U, 1U, 2U, 4294967295U, 0U}, {21504U, 0U, 16U, 5U, 4294967295U, 4294967295U}, {21504U, 0U, 16U, 6U, 3288334335U, 4294967295U}, {21728U, 0U, 16U, 5U, 4294967295U, 4294967295U}, {21728U, 0U, 16U, 6U, 3288334335U, 4294967295U}, {20992U, 0U, 128U, 4U, 4294967295U, 4294967295U}, {0U, 0U, 0U, 0U, 0U, 0U}}; static struct igb_reg_test reg_test_82580[33U] = { {40U, 256U, 1U, 1U, 4294967295U, 4294967295U}, {44U, 256U, 1U, 1U, 65535U, 4294967295U}, {48U, 256U, 1U, 1U, 65535U, 4294967295U}, {56U, 256U, 1U, 1U, 4294967295U, 4294967295U}, {10240U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {10244U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {10248U, 256U, 4U, 1U, 1048560U, 1048575U}, {49408U, 64U, 4U, 1U, 4294967168U, 4294967295U}, {49412U, 64U, 4U, 1U, 4294967295U, 4294967295U}, {49416U, 64U, 4U, 1U, 1048560U, 1048575U}, {10264U, 256U, 4U, 1U, 65535U, 65535U}, {49432U, 64U, 4U, 1U, 65535U, 65535U}, {8552U, 256U, 1U, 1U, 65520U, 65520U}, {368U, 256U, 1U, 1U, 65535U, 65535U}, {1040U, 256U, 1U, 1U, 1073741823U, 1073741823U}, {14336U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {14340U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {14344U, 256U, 4U, 1U, 1048560U, 1048575U}, {57600U, 64U, 4U, 1U, 4294967168U, 4294967295U}, {57604U, 64U, 4U, 1U, 4294967295U, 4294967295U}, {57608U, 64U, 4U, 1U, 1048560U, 1048575U}, {14360U, 256U, 4U, 1U, 65535U, 65535U}, {57624U, 64U, 4U, 1U, 65535U, 65535U}, {256U, 256U, 1U, 2U, 4294967295U, 0U}, {256U, 256U, 1U, 2U, 80720126U, 4194299U}, {256U, 256U, 1U, 2U, 80720126U, 4294967295U}, {1024U, 256U, 1U, 2U, 4294967295U, 0U}, {21504U, 0U, 16U, 5U, 4294967295U, 4294967295U}, {21504U, 0U, 16U, 6U, 2214592511U, 4294967295U}, {21728U, 0U, 8U, 5U, 4294967295U, 4294967295U}, {21728U, 0U, 8U, 6U, 2214592511U, 4294967295U}, {20992U, 0U, 128U, 4U, 4294967295U, 4294967295U}, {0U, 0U, 0U, 0U, 0U, 0U}}; static struct igb_reg_test reg_test_82576[35U] = { {40U, 256U, 1U, 1U, 4294967295U, 4294967295U}, {44U, 256U, 1U, 1U, 65535U, 4294967295U}, {48U, 256U, 1U, 1U, 65535U, 4294967295U}, {56U, 256U, 1U, 1U, 4294967295U, 4294967295U}, {10240U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {10244U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {10248U, 256U, 4U, 1U, 1048560U, 1048575U}, {49408U, 64U, 12U, 1U, 4294967168U, 4294967295U}, {49412U, 64U, 12U, 1U, 4294967295U, 4294967295U}, {49416U, 64U, 12U, 1U, 1048560U, 1048575U}, {10280U, 256U, 4U, 3U, 0U, 33554432U}, {49448U, 64U, 12U, 3U, 0U, 33554432U}, {10264U, 256U, 4U, 1U, 65535U, 65535U}, {49432U, 64U, 12U, 1U, 65535U, 65535U}, {10280U, 256U, 4U, 3U, 0U, 0U}, {49448U, 64U, 12U, 3U, 0U, 0U}, {8552U, 256U, 1U, 1U, 65520U, 65520U}, {368U, 256U, 1U, 1U, 65535U, 65535U}, {1040U, 256U, 1U, 1U, 1073741823U, 1073741823U}, {14336U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {14340U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {14344U, 256U, 4U, 1U, 1048560U, 1048575U}, {57600U, 64U, 12U, 1U, 4294967168U, 4294967295U}, {57604U, 64U, 12U, 1U, 4294967295U, 4294967295U}, {57608U, 64U, 12U, 1U, 1048560U, 1048575U}, {256U, 256U, 1U, 2U, 4294967295U, 0U}, {256U, 256U, 1U, 2U, 80720126U, 4194299U}, {256U, 256U, 1U, 2U, 80720126U, 4294967295U}, {1024U, 256U, 1U, 2U, 4294967295U, 0U}, {21504U, 0U, 16U, 5U, 4294967295U, 4294967295U}, {21504U, 0U, 16U, 6U, 2214592511U, 4294967295U}, {21728U, 0U, 8U, 5U, 4294967295U, 4294967295U}, {21728U, 0U, 8U, 6U, 2214592511U, 4294967295U}, {20992U, 0U, 128U, 4U, 4294967295U, 4294967295U}, {0U, 0U, 0U, 0U, 0U, 0U}}; static struct igb_reg_test reg_test_82575[25U] = { {40U, 256U, 1U, 1U, 4294967295U, 4294967295U}, {44U, 256U, 1U, 1U, 65535U, 4294967295U}, {48U, 256U, 1U, 1U, 65535U, 4294967295U}, {56U, 256U, 1U, 1U, 4294967295U, 4294967295U}, {10240U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {10244U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {10248U, 256U, 4U, 1U, 1048448U, 1048575U}, {10280U, 256U, 4U, 3U, 0U, 33554432U}, {10264U, 256U, 4U, 1U, 65535U, 65535U}, {10280U, 256U, 4U, 3U, 0U, 0U}, {8552U, 256U, 1U, 1U, 65520U, 65520U}, {368U, 256U, 1U, 1U, 65535U, 65535U}, {1040U, 256U, 1U, 1U, 1073741823U, 1073741823U}, {14336U, 256U, 4U, 1U, 4294967168U, 4294967295U}, {14340U, 256U, 4U, 1U, 4294967295U, 4294967295U}, {14344U, 256U, 4U, 1U, 1048448U, 1048575U}, {256U, 256U, 1U, 2U, 4294967295U, 0U}, {256U, 256U, 1U, 2U, 80720894U, 4194299U}, {256U, 256U, 1U, 2U, 80720894U, 4294967295U}, {1024U, 256U, 1U, 2U, 4294967295U, 0U}, {376U, 256U, 1U, 1U, 3221291007U, 65535U}, {21504U, 0U, 16U, 5U, 4294967295U, 4294967295U}, {21504U, 0U, 16U, 6U, 2148532223U, 4294967295U}, {20992U, 0U, 128U, 4U, 4294967295U, 4294967295U}, {0U, 0U, 0U, 0U, 0U, 0U}}; static bool reg_pattern_test(struct igb_adapter *adapter , u64 *data , int reg , u32 mask , u32 write ) { struct e1000_hw *hw ; u32 pat ; u32 val ; u32 _test[4U] ; u8 *hw_addr ; u8 *__var ; long tmp ; u32 tmp___0 ; { hw = & adapter->hw; _test[0] = 1515870810U; _test[1] = 2779096485U; _test[2] = 0U; _test[3] = 4294967295U; pat = 0U; goto ldv_48291; ldv_48290: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(_test[pat] & write, (void volatile *)hw_addr + (unsigned long )reg); } else { } tmp___0 = igb_rd32(hw, (u32 )reg); val = tmp___0 & mask; if (((_test[pat] & write) & mask) != val) { dev_err((struct device const *)(& (adapter->pdev)->dev), "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, (_test[pat] & write) & mask); *data = (u64 )reg; return (1); } else { } pat = pat + 1U; ldv_48291: ; if (pat <= 3U) { goto ldv_48290; } else { } return (0); } } static bool reg_set_and_check(struct igb_adapter *adapter , u64 *data , int reg , u32 mask , u32 write ) { struct e1000_hw *hw ; u32 val ; u8 *hw_addr ; u8 *__var ; long tmp ; { hw = & adapter->hw; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(write & mask, (void volatile *)hw_addr + (unsigned long )reg); } else { } val = igb_rd32(hw, (u32 )reg); if (((write ^ val) & mask) != 0U) { dev_err((struct device const *)(& (adapter->pdev)->dev), "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, val & mask, write & mask); *data = (u64 )reg; return (1); } else { } return (0); } } static int igb_reg_test(struct igb_adapter *adapter , u64 *data ) { struct e1000_hw *hw ; struct igb_reg_test *test ; u32 value ; u32 before ; u32 after ; u32 i ; u32 toggle ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u32 tmp___1 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___2 ; bool tmp___3 ; bool tmp___4 ; bool tmp___5 ; bool tmp___6 ; bool tmp___7 ; { hw = & adapter->hw; switch ((unsigned int )adapter->hw.mac.type) { case 4U: ; case 5U: test = (struct igb_reg_test *)(& reg_test_i350); toggle = 2146431999U; goto ldv_48318; case 6U: ; case 7U: test = (struct igb_reg_test *)(& reg_test_i210); toggle = 2146431999U; goto ldv_48318; case 3U: test = (struct igb_reg_test *)(& reg_test_82580); toggle = 2146431999U; goto ldv_48318; case 2U: test = (struct igb_reg_test *)(& reg_test_82576); toggle = 2147480575U; goto ldv_48318; default: test = (struct igb_reg_test *)(& reg_test_82575); toggle = 2147480575U; goto ldv_48318; } ldv_48318: before = igb_rd32(hw, 8U); tmp = igb_rd32(hw, 8U); value = tmp & toggle; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(toggle, (void volatile *)hw_addr + 8U); } else { } tmp___1 = igb_rd32(hw, 8U); after = tmp___1 & toggle; if (value != after) { dev_err((struct device const *)(& (adapter->pdev)->dev), "failed STATUS register test got: 0x%08X expected: 0x%08X\n", after, value); *data = 1ULL; return (1); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(before, (void volatile *)hw_addr___0 + 8U); } else { } goto ldv_48341; ldv_48340: i = 0U; goto ldv_48338; ldv_48337: ; switch ((int )test->test_type) { case 1: tmp___3 = reg_pattern_test(adapter, data, (int )((u32 )test->reg + (u32 )test->reg_offset * i), test->mask, test->write); if ((int )tmp___3) { return (1); } else { } goto ldv_48331; case 2: tmp___4 = reg_set_and_check(adapter, data, (int )((u32 )test->reg + (u32 )test->reg_offset * i), test->mask, test->write); if ((int )tmp___4) { return (1); } else { } goto ldv_48331; case 3: writel(test->write, (void volatile *)(adapter->hw.hw_addr + ((unsigned long )test->reg + (unsigned long )((u32 )test->reg_offset * i)))); goto ldv_48331; case 4: tmp___5 = reg_pattern_test(adapter, data, (int )((u32 )test->reg + i * 4U), test->mask, test->write); if ((int )tmp___5) { return (1); } else { } goto ldv_48331; case 5: tmp___6 = reg_pattern_test(adapter, data, (int )((u32 )test->reg + i * 8U), test->mask, test->write); if ((int )tmp___6) { return (1); } else { } goto ldv_48331; case 6: tmp___7 = reg_pattern_test(adapter, data, (int )(((u32 )test->reg + i * 8U) + 4U), test->mask, test->write); if ((int )tmp___7) { return (1); } else { } goto ldv_48331; } ldv_48331: i = i + 1U; ldv_48338: ; if ((u32 )test->array_len > i) { goto ldv_48337; } else { } test = test + 1; ldv_48341: ; if ((unsigned int )test->reg != 0U) { goto ldv_48340; } else { } *data = 0ULL; return (0); } } static int igb_eeprom_test(struct igb_adapter *adapter , u64 *data ) { struct e1000_hw *hw ; s32 tmp ; bool tmp___0 ; s32 tmp___1 ; { hw = & adapter->hw; *data = 0ULL; switch ((unsigned int )hw->mac.type) { case 6U: ; case 7U: tmp___0 = igb_get_flash_presence_i210(hw); if ((int )tmp___0) { tmp = (*(adapter->hw.nvm.ops.validate))(& adapter->hw); if (tmp < 0) { *data = 2ULL; } else { } } else { } goto ldv_48350; default: tmp___1 = (*(adapter->hw.nvm.ops.validate))(& adapter->hw); if (tmp___1 < 0) { *data = 2ULL; } else { } goto ldv_48350; } ldv_48350: ; return ((int )*data); } } static irqreturn_t igb_test_intr(int irq , void *data ) { struct igb_adapter *adapter ; struct e1000_hw *hw ; u32 tmp ; { adapter = (struct igb_adapter *)data; hw = & adapter->hw; tmp = igb_rd32(hw, 192U); adapter->test_icr = adapter->test_icr | tmp; return (1); } } static int igb_intr_test(struct igb_adapter *adapter , u64 *data ) { struct e1000_hw *hw ; struct net_device *netdev ; u32 mask ; u32 ics_mask ; u32 i ; u32 shared_int ; u32 irq ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; u8 *hw_addr ; u8 *__var ; long tmp___3 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___4 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___5 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___6 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___7 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___8 ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___9 ; u8 *hw_addr___6 ; u8 *__var___6 ; long tmp___10 ; u8 *hw_addr___7 ; u8 *__var___7 ; long tmp___11 ; u8 *hw_addr___8 ; u8 *__var___8 ; long tmp___12 ; u8 *hw_addr___9 ; u8 *__var___9 ; long tmp___13 ; { hw = & adapter->hw; netdev = adapter->netdev; i = 0U; shared_int = 1U; irq = (adapter->pdev)->irq; *data = 0ULL; if ((adapter->flags & 8192U) != 0U) { tmp = ldv_request_irq_101(adapter->msix_entries[0].vector, & igb_test_intr, 0UL, (char const *)(& netdev->name), (void *)adapter); if (tmp != 0) { *data = 1ULL; return (-1); } else { } } else if ((int )adapter->flags & 1) { shared_int = 0U; tmp___0 = ldv_request_irq_19(irq, & igb_test_intr, 0UL, (char const *)(& netdev->name), (void *)adapter); if (tmp___0 != 0) { *data = 1ULL; return (-1); } else { } } else { tmp___2 = ldv_request_irq_20(irq, & igb_test_intr, 256UL, (char const *)(& netdev->name), (void *)adapter); if (tmp___2 == 0) { shared_int = 0U; } else { tmp___1 = ldv_request_irq_23(irq, & igb_test_intr, 128UL, (char const *)(& netdev->name), (void *)adapter); if (tmp___1 != 0) { *data = 1ULL; return (-1); } else { } } } _dev_info((struct device const *)(& (adapter->pdev)->dev), "testing %s interrupt\n", shared_int != 0U ? (char *)"shared" : (char *)"unshared"); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(4294967295U, (void volatile *)hw_addr + 216U); } else { } igb_rd32(hw, 8U); usleep_range(10000UL, 11000UL); switch ((unsigned int )hw->mac.type) { case 1U: ics_mask = 938770141U; goto ldv_48373; case 2U: ics_mask = 2010446845U; goto ldv_48373; case 3U: ics_mask = 2010971861U; goto ldv_48373; case 4U: ; case 5U: ; case 6U: ; case 7U: ics_mask = 2010971861U; goto ldv_48373; default: ics_mask = 2147483647U; goto ldv_48373; } ldv_48373: ; goto ldv_48411; ldv_48410: mask = (u32 )(1 << (int )i); if ((mask & ics_mask) == 0U) { goto ldv_48381; } else { } if (shared_int == 0U) { adapter->test_icr = 0U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(4294967295U, (void volatile *)hw_addr___0 + 192U); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(mask, (void volatile *)hw_addr___1 + 216U); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(mask, (void volatile *)hw_addr___2 + 200U); } else { } igb_rd32(hw, 8U); usleep_range(10000UL, 11000UL); if ((adapter->test_icr & mask) != 0U) { *data = 3ULL; goto ldv_48391; } else { } } else { } adapter->test_icr = 0U; __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(4294967295U, (void volatile *)hw_addr___3 + 192U); } else { } __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___8 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___8 == 0L) { writel(mask, (void volatile *)hw_addr___4 + 208U); } else { } __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___9 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___9 == 0L) { writel(mask, (void volatile *)hw_addr___5 + 200U); } else { } igb_rd32(hw, 8U); usleep_range(10000UL, 11000UL); if ((adapter->test_icr & mask) == 0U) { *data = 4ULL; goto ldv_48391; } else { } if (shared_int == 0U) { adapter->test_icr = 0U; __var___6 = (u8 *)0U; hw_addr___6 = *((u8 * volatile *)(& hw->hw_addr)); tmp___10 = ldv__builtin_expect((unsigned long )hw_addr___6 == (unsigned long )((u8 *)0U), 0L); if (tmp___10 == 0L) { writel(4294967295U, (void volatile *)hw_addr___6 + 192U); } else { } __var___7 = (u8 *)0U; hw_addr___7 = *((u8 * volatile *)(& hw->hw_addr)); tmp___11 = ldv__builtin_expect((unsigned long )hw_addr___7 == (unsigned long )((u8 *)0U), 0L); if (tmp___11 == 0L) { writel(~ mask, (void volatile *)hw_addr___7 + 216U); } else { } __var___8 = (u8 *)0U; hw_addr___8 = *((u8 * volatile *)(& hw->hw_addr)); tmp___12 = ldv__builtin_expect((unsigned long )hw_addr___8 == (unsigned long )((u8 *)0U), 0L); if (tmp___12 == 0L) { writel(~ mask, (void volatile *)hw_addr___8 + 200U); } else { } igb_rd32(hw, 8U); usleep_range(10000UL, 11000UL); if ((adapter->test_icr & mask) != 0U) { *data = 5ULL; goto ldv_48391; } else { } } else { } ldv_48381: i = i + 1U; ldv_48411: ; if (i <= 30U) { goto ldv_48410; } else { } ldv_48391: __var___9 = (u8 *)0U; hw_addr___9 = *((u8 * volatile *)(& hw->hw_addr)); tmp___13 = ldv__builtin_expect((unsigned long )hw_addr___9 == (unsigned long )((u8 *)0U), 0L); if (tmp___13 == 0L) { writel(4294967295U, (void volatile *)hw_addr___9 + 216U); } else { } igb_rd32(hw, 8U); usleep_range(10000UL, 11000UL); if ((adapter->flags & 8192U) != 0U) { ldv_free_irq_105(adapter->msix_entries[0].vector, (void *)adapter); } else { ldv_free_irq_106(irq, (void *)adapter); } return ((int )*data); } } static void igb_free_desc_rings(struct igb_adapter *adapter ) { { igb_free_tx_resources(& adapter->test_tx_ring); igb_free_rx_resources(& adapter->test_rx_ring); return; } } static int igb_setup_desc_rings(struct igb_adapter *adapter ) { struct igb_ring *tx_ring ; struct igb_ring *rx_ring ; struct e1000_hw *hw ; int ret_val ; int tmp ; int tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; int tmp___2 ; { tx_ring = & adapter->test_tx_ring; rx_ring = & adapter->test_rx_ring; hw = & adapter->hw; tx_ring->count = 256U; tx_ring->dev = & (adapter->pdev)->dev; tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = (u8 )adapter->vfs_allocated_count; tmp = igb_setup_tx_resources(tx_ring); if (tmp != 0) { ret_val = 1; goto err_nomem; } else { } igb_setup_tctl(adapter); igb_configure_tx_ring(adapter, tx_ring); rx_ring->count = 256U; rx_ring->dev = & (adapter->pdev)->dev; rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = (u8 )adapter->vfs_allocated_count; tmp___0 = igb_setup_rx_resources(rx_ring); if (tmp___0 != 0) { ret_val = 3; goto err_nomem; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(adapter->vfs_allocated_count << 3, (void volatile *)hw_addr + 22552U); } else { } igb_setup_rctl(adapter); igb_configure_rx_ring(adapter, rx_ring); tmp___2 = igb_desc_unused(rx_ring); igb_alloc_rx_buffers(rx_ring, (int )((u16 )tmp___2)); return (0); err_nomem: igb_free_desc_rings(adapter); return (ret_val); } } static void igb_phy_disable_receiver(struct igb_adapter *adapter ) { struct e1000_hw *hw ; { hw = & adapter->hw; igb_write_phy_reg(hw, 29U, 31); igb_write_phy_reg(hw, 30U, 36860); igb_write_phy_reg(hw, 29U, 26); igb_write_phy_reg(hw, 30U, 36848); return; } } static int igb_integrated_phy_loopback(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 ctrl_reg ; u8 *hw_addr ; u8 *__var ; long tmp ; unsigned long __ms ; unsigned long tmp___0 ; { hw = & adapter->hw; ctrl_reg = 0U; hw->mac.autoneg = 0; if ((unsigned int )hw->phy.type == 2U) { if (hw->phy.id != 21040128U) { igb_write_phy_reg(hw, 16U, 2056); igb_write_phy_reg(hw, 0U, 37184); igb_write_phy_reg(hw, 0U, 33088); } else { igb_write_phy_reg(hw, 22U, 0); igb_write_phy_reg(hw, 0U, 16704); } } else if ((unsigned int )hw->phy.type == 8U) { igb_write_phy_reg(hw, 19U, 32833); } else { } msleep(50U); igb_write_phy_reg(hw, 0U, 16704); ctrl_reg = igb_rd32(hw, 0U); ctrl_reg = ctrl_reg & 4294966527U; ctrl_reg = ctrl_reg | 6721U; if ((unsigned int )hw->phy.type == 2U) { ctrl_reg = ctrl_reg | 128U; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(ctrl_reg, (void volatile *)hw_addr); } else { } if ((unsigned int )hw->phy.type == 2U) { igb_phy_disable_receiver(adapter); } else { } __ms = 500UL; goto ldv_48443; ldv_48442: __const_udelay(4295000UL); ldv_48443: tmp___0 = __ms; __ms = __ms - 1UL; if (tmp___0 != 0UL) { goto ldv_48442; } else { } return (0); } } static int igb_set_phy_loopback(struct igb_adapter *adapter ) { int tmp ; { tmp = igb_integrated_phy_loopback(adapter); return (tmp); } } static int igb_setup_loopback_test(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 reg ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___2 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___3 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___4 ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___5 ; u8 *hw_addr___6 ; u8 *__var___6 ; long tmp___6 ; int tmp___7 ; { hw = & adapter->hw; reg = igb_rd32(hw, 24U); if ((reg & 12582912U) != 0U) { if ((((((unsigned int )hw->device_id == 1080U || (unsigned int )hw->device_id == 1082U) || (unsigned int )hw->device_id == 1084U) || (unsigned int )hw->device_id == 1088U) || (unsigned int )hw->device_id == 8001U) || (unsigned int )hw->device_id == 8005U) { reg = igb_rd32(hw, 36U); reg = (reg & 4294901760U) | 4U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(reg, (void volatile *)hw_addr + 36U); } else { } reg = igb_rd32(hw, 3600U); reg = reg | 16U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(reg, (void volatile *)hw_addr___0 + 3600U); } else { } } else { } reg = igb_rd32(hw, 256U); reg = reg | 192U; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(reg, (void volatile *)hw_addr___1 + 256U); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(1040U, (void volatile *)hw_addr___2 + 36U); } else { } reg = igb_rd32(hw, 0U); reg = reg & 3892314103U; reg = reg | 65U; __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(reg, (void volatile *)hw_addr___3); } else { } reg = igb_rd32(hw, 52U); reg = reg & 4294967291U; __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(reg, (void volatile *)hw_addr___4 + 52U); } else { } if ((unsigned int )hw->mac.type > 2U) { reg = igb_rd32(hw, 16896U); reg = reg | 1U; __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(reg, (void volatile *)hw_addr___5 + 16896U); } else { } } else { } reg = igb_rd32(hw, 16904U); reg = reg & 4294901759U; reg = reg | 61U; __var___6 = (u8 *)0U; hw_addr___6 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___6 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(reg, (void volatile *)hw_addr___6 + 16904U); } else { } return (0); } else { } tmp___7 = igb_set_phy_loopback(adapter); return (tmp___7); } } static void igb_loopback_cleanup(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 rctl ; u16 phy_reg ; u32 reg ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; { hw = & adapter->hw; if (((((unsigned int )hw->device_id == 1080U || (unsigned int )hw->device_id == 1082U) || (unsigned int )hw->device_id == 1084U) || (unsigned int )hw->device_id == 1088U) || (unsigned int )hw->device_id == 8001U) { reg = igb_rd32(hw, 36U); reg = (reg & 4294901760U) | 4U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(reg, (void volatile *)hw_addr + 36U); } else { } reg = igb_rd32(hw, 3600U); reg = reg & 4294967279U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(reg, (void volatile *)hw_addr___0 + 3600U); } else { } } else { } rctl = igb_rd32(hw, 256U); rctl = rctl & 4294967103U; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(rctl, (void volatile *)hw_addr___1 + 256U); } else { } hw->mac.autoneg = 1; igb_read_phy_reg(hw, 0U, & phy_reg); if (((int )phy_reg & 16384) != 0) { phy_reg = (unsigned int )phy_reg & 49151U; igb_write_phy_reg(hw, 0U, (int )phy_reg); igb_phy_sw_reset(hw); } else { } return; } } static void igb_create_lbtest_frame(struct sk_buff *skb , unsigned int frame_size ) { { memset((void *)skb->data, 255, (size_t )frame_size); frame_size = frame_size / 2U; memset((void *)skb->data + (unsigned long )frame_size, 170, (size_t )(frame_size - 1U)); memset((void *)skb->data + (unsigned long )(frame_size + 10U), 190, 1UL); memset((void *)skb->data + (unsigned long )(frame_size + 12U), 175, 1UL); return; } } static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer , unsigned int frame_size ) { unsigned char *data ; bool match ; void *tmp ; { match = 1; frame_size = frame_size >> 1; tmp = kmap(rx_buffer->page); data = (unsigned char *)tmp; if (((unsigned int )*(data + 3UL) != 255U || (unsigned int )*(data + (unsigned long )(frame_size + 10U)) != 190U) || (unsigned int )*(data + (unsigned long )(frame_size + 12U)) != 175U) { match = 0; } else { } kunmap(rx_buffer->page); return ((int )match); } } static int igb_clean_test_rings(struct igb_ring *rx_ring , struct igb_ring *tx_ring , unsigned int size ) { union e1000_adv_rx_desc *rx_desc ; struct igb_rx_buffer *rx_buffer_info ; struct igb_tx_buffer *tx_buffer_info ; u16 rx_ntc ; u16 tx_ntc ; u16 count ; int tmp ; __le32 tmp___0 ; struct netdev_queue *tmp___1 ; { count = 0U; rx_ntc = rx_ring->next_to_clean; tx_ntc = tx_ring->next_to_clean; rx_desc = (union e1000_adv_rx_desc *)rx_ring->desc + (unsigned long )rx_ntc; goto ldv_48515; ldv_48514: rx_buffer_info = rx_ring->__annonCompField117.rx_buffer_info + (unsigned long )rx_ntc; dma_sync_single_for_cpu(rx_ring->dev, rx_buffer_info->dma, 2048UL, 2); tmp = igb_check_lbtest_frame(rx_buffer_info, size); if (tmp != 0) { count = (u16 )((int )count + 1); } else { } dma_sync_single_for_device(rx_ring->dev, rx_buffer_info->dma, 2048UL, 2); tx_buffer_info = tx_ring->__annonCompField117.tx_buffer_info + (unsigned long )tx_ntc; igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); rx_ntc = (u16 )((int )rx_ntc + 1); if ((int )rx_ring->count == (int )rx_ntc) { rx_ntc = 0U; } else { } tx_ntc = (u16 )((int )tx_ntc + 1); if ((int )tx_ring->count == (int )tx_ntc) { tx_ntc = 0U; } else { } rx_desc = (union e1000_adv_rx_desc *)rx_ring->desc + (unsigned long )rx_ntc; ldv_48515: tmp___0 = igb_test_staterr(rx_desc, 1U); if (tmp___0 != 0U) { goto ldv_48514; } else { } tmp___1 = txring_txq((struct igb_ring const *)tx_ring); netdev_tx_reset_queue(tmp___1); igb_alloc_rx_buffers(rx_ring, (int )count); rx_ring->next_to_clean = rx_ntc; tx_ring->next_to_clean = tx_ntc; return ((int )count); } } static int igb_run_loopback_test(struct igb_adapter *adapter ) { struct igb_ring *tx_ring ; struct igb_ring *rx_ring ; u16 i ; u16 j ; u16 lc ; u16 good_cnt ; int ret_val ; unsigned int size ; netdev_tx_t tx_ret_val ; struct sk_buff *skb ; int tmp ; { tx_ring = & adapter->test_tx_ring; rx_ring = & adapter->test_rx_ring; ret_val = 0; size = 256U; skb = alloc_skb(size, 208U); if ((unsigned long )skb == (unsigned long )((struct sk_buff *)0)) { return (11); } else { } igb_create_lbtest_frame(skb, size); skb_put(skb, size); if ((int )rx_ring->count <= (int )tx_ring->count) { lc = (unsigned int )((u16 )((unsigned int )tx_ring->count / 64U)) * 2U + 1U; } else { lc = (unsigned int )((u16 )((unsigned int )rx_ring->count / 64U)) * 2U + 1U; } j = 0U; goto ldv_48535; ldv_48534: good_cnt = 0U; i = 0U; goto ldv_48531; ldv_48530: skb_get(skb); tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); if ((int )tx_ret_val == 0) { good_cnt = (u16 )((int )good_cnt + 1); } else { } i = (u16 )((int )i + 1); ldv_48531: ; if ((unsigned int )i <= 63U) { goto ldv_48530; } else { } if ((unsigned int )good_cnt != 64U) { ret_val = 12; goto ldv_48533; } else { } msleep(200U); tmp = igb_clean_test_rings(rx_ring, tx_ring, size); good_cnt = (u16 )tmp; if ((unsigned int )good_cnt != 64U) { ret_val = 13; goto ldv_48533; } else { } j = (u16 )((int )j + 1); ldv_48535: ; if ((int )j <= (int )lc) { goto ldv_48534; } else { } ldv_48533: kfree_skb(skb); return (ret_val); } } static int igb_loopback_test(struct igb_adapter *adapter , u64 *data ) { s32 tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = igb_check_reset_block(& adapter->hw); if (tmp != 0) { dev_err((struct device const *)(& (adapter->pdev)->dev), "Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0ULL; goto out; } else { } if ((unsigned int )adapter->hw.mac.type == 5U) { _dev_info((struct device const *)(& (adapter->pdev)->dev), "Loopback test not supported on i354.\n"); *data = 0ULL; goto out; } else { } tmp___0 = igb_setup_desc_rings(adapter); *data = (u64 )tmp___0; if (*data != 0ULL) { goto out; } else { } tmp___1 = igb_setup_loopback_test(adapter); *data = (u64 )tmp___1; if (*data != 0ULL) { goto err_loopback; } else { } tmp___2 = igb_run_loopback_test(adapter); *data = (u64 )tmp___2; igb_loopback_cleanup(adapter); err_loopback: igb_free_desc_rings(adapter); out: ; return ((int )*data); } } static int igb_link_test(struct igb_adapter *adapter , u64 *data ) { struct e1000_hw *hw ; int i ; int tmp ; u32 tmp___0 ; { hw = & adapter->hw; *data = 0ULL; if ((unsigned int )hw->phy.media_type == 3U) { i = 0; hw->mac.serdes_has_link = 0; ldv_48548: (*(hw->mac.ops.check_for_link))(& adapter->hw); if ((int )hw->mac.serdes_has_link) { return ((int )*data); } else { } msleep(20U); tmp = i; i = i + 1; if (tmp <= 3749) { goto ldv_48548; } else { } *data = 1ULL; } else { (*(hw->mac.ops.check_for_link))(& adapter->hw); if ((int )hw->mac.autoneg) { msleep(5000U); } else { } tmp___0 = igb_rd32(hw, 8U); if ((tmp___0 & 2U) == 0U) { *data = 1ULL; } else { } } return ((int )*data); } } static void igb_diag_test(struct net_device *netdev , struct ethtool_test *eth_test , u64 *data ) { struct igb_adapter *adapter ; void *tmp ; u16 autoneg_advertised ; u8 forced_speed_duplex ; u8 autoneg ; bool if_running ; bool tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; tmp___0 = netif_running((struct net_device const *)netdev); if_running = tmp___0; set_bit(0L, (unsigned long volatile *)(& adapter->state)); if ((int )adapter->hw.dev_spec._82575.mas_capable) { eth_test->flags = eth_test->flags & 4294967294U; } else { } if (eth_test->flags == 1U) { autoneg_advertised = adapter->hw.phy.autoneg_advertised; forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; autoneg = (u8 )adapter->hw.mac.autoneg; _dev_info((struct device const *)(& (adapter->pdev)->dev), "offline testing starting\n"); igb_power_up_link(adapter); tmp___1 = igb_link_test(adapter, data + 4UL); if (tmp___1 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } if ((int )if_running) { dev_close(netdev); } else { igb_reset(adapter); } tmp___2 = igb_reg_test(adapter, data); if (tmp___2 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } igb_reset(adapter); tmp___3 = igb_eeprom_test(adapter, data + 1UL); if (tmp___3 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } igb_reset(adapter); tmp___4 = igb_intr_test(adapter, data + 2UL); if (tmp___4 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } igb_reset(adapter); igb_power_up_link(adapter); tmp___5 = igb_loopback_test(adapter, data + 3UL); if (tmp___5 != 0) { eth_test->flags = eth_test->flags | 2U; } else { } adapter->hw.phy.autoneg_advertised = autoneg_advertised; adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; adapter->hw.mac.autoneg = (unsigned int )autoneg != 0U; adapter->hw.phy.autoneg_wait_to_complete = 1; igb_reset(adapter); adapter->hw.phy.autoneg_wait_to_complete = 0; clear_bit(0L, (unsigned long volatile *)(& adapter->state)); if ((int )if_running) { dev_open(netdev); } else { } } else { _dev_info((struct device const *)(& (adapter->pdev)->dev), "online testing starting\n"); if ((int )if_running) { tmp___6 = igb_link_test(adapter, data + 4UL); if (tmp___6 != 0) { eth_test->flags = eth_test->flags | 2U; } else { *(data + 4UL) = 0ULL; } } else { *(data + 4UL) = 0ULL; } *data = 0ULL; *(data + 1UL) = 0ULL; *(data + 2UL) = 0ULL; *(data + 3UL) = 0ULL; clear_bit(0L, (unsigned long volatile *)(& adapter->state)); } msleep_interruptible(4000U); return; } } static void igb_get_wol(struct net_device *netdev , struct ethtool_wolinfo *wol ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; wol->wolopts = 0U; if ((adapter->flags & 256U) == 0U) { return; } else { } wol->supported = 47U; switch ((int )adapter->hw.device_id) { default: ; goto ldv_48566; } ldv_48566: ; if ((adapter->wol & 4U) != 0U) { wol->wolopts = wol->wolopts | 2U; } else { } if ((adapter->wol & 8U) != 0U) { wol->wolopts = wol->wolopts | 4U; } else { } if ((adapter->wol & 16U) != 0U) { wol->wolopts = wol->wolopts | 8U; } else { } if ((adapter->wol & 2U) != 0U) { wol->wolopts = wol->wolopts | 32U; } else { } if ((int )adapter->wol & 1) { wol->wolopts = wol->wolopts | 1U; } else { } return; } } static int igb_set_wol(struct net_device *netdev , struct ethtool_wolinfo *wol ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; if ((wol->wolopts & 80U) != 0U) { return (-95); } else { } if ((adapter->flags & 256U) == 0U) { return (wol->wolopts != 0U ? -95 : 0); } else { } adapter->wol = 0U; if ((wol->wolopts & 2U) != 0U) { adapter->wol = adapter->wol | 4U; } else { } if ((wol->wolopts & 4U) != 0U) { adapter->wol = adapter->wol | 8U; } else { } if ((wol->wolopts & 8U) != 0U) { adapter->wol = adapter->wol | 16U; } else { } if ((wol->wolopts & 32U) != 0U) { adapter->wol = adapter->wol | 2U; } else { } if ((int )wol->wolopts & 1) { adapter->wol = adapter->wol | 1U; } else { } device_set_wakeup_enable(& (adapter->pdev)->dev, adapter->wol != 0U); return (0); } } static int igb_set_phys_id(struct net_device *netdev , enum ethtool_phys_id_state state ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; switch ((unsigned int )state) { case 1U: igb_blink_led(hw); return (2); case 2U: igb_blink_led(hw); goto ldv_48580; case 3U: igb_led_off(hw); goto ldv_48580; case 0U: igb_led_off(hw); clear_bit(0L, (unsigned long volatile *)(& adapter->led_status)); igb_cleanup_led(hw); goto ldv_48580; } ldv_48580: ; return (0); } } static int igb_set_coalesce(struct net_device *netdev , struct ethtool_coalesce *ec ) { struct igb_adapter *adapter ; void *tmp ; int i ; struct igb_q_vector *q_vector ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; if ((ec->rx_coalesce_usecs > 10000U || (ec->rx_coalesce_usecs > 3U && ec->rx_coalesce_usecs <= 9U)) || ec->rx_coalesce_usecs == 2U) { return (-22); } else { } if ((ec->tx_coalesce_usecs > 10000U || (ec->tx_coalesce_usecs > 3U && ec->tx_coalesce_usecs <= 9U)) || ec->tx_coalesce_usecs == 2U) { return (-22); } else { } if ((adapter->flags & 8U) != 0U && ec->tx_coalesce_usecs != 0U) { return (-22); } else { } if (ec->rx_coalesce_usecs == 0U) { if ((adapter->flags & 16U) != 0U) { adapter->flags = adapter->flags & 4294967279U; } else { } } else { } if (ec->rx_coalesce_usecs != 0U && ec->rx_coalesce_usecs <= 3U) { adapter->rx_itr_setting = ec->rx_coalesce_usecs; } else { adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; } if ((adapter->flags & 8U) != 0U) { adapter->tx_itr_setting = adapter->rx_itr_setting; } else if (ec->tx_coalesce_usecs != 0U && ec->tx_coalesce_usecs <= 3U) { adapter->tx_itr_setting = ec->tx_coalesce_usecs; } else { adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; } i = 0; goto ldv_48591; ldv_48590: q_vector = adapter->q_vector[i]; q_vector->tx.work_limit = adapter->tx_work_limit; if ((unsigned long )q_vector->rx.ring != (unsigned long )((struct igb_ring *)0)) { q_vector->itr_val = (u16 )adapter->rx_itr_setting; } else { q_vector->itr_val = (u16 )adapter->tx_itr_setting; } if ((unsigned int )q_vector->itr_val != 0U && (unsigned int )q_vector->itr_val <= 3U) { q_vector->itr_val = 648U; } else { } q_vector->set_itr = 1U; i = i + 1; ldv_48591: ; if ((unsigned int )i < adapter->num_q_vectors) { goto ldv_48590; } else { } return (0); } } static int igb_get_coalesce(struct net_device *netdev , struct ethtool_coalesce *ec ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; if (adapter->rx_itr_setting <= 3U) { ec->rx_coalesce_usecs = adapter->rx_itr_setting; } else { ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; } if ((adapter->flags & 8U) == 0U) { if (adapter->tx_itr_setting <= 3U) { ec->tx_coalesce_usecs = adapter->tx_itr_setting; } else { ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; } } else { } return (0); } } static int igb_nway_reset(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; bool tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; tmp___0 = netif_running((struct net_device const *)netdev); if ((int )tmp___0) { igb_reinit_locked(adapter); } else { } return (0); } } static int igb_get_sset_count(struct net_device *netdev , int sset ) { void *tmp ; void *tmp___0 ; { switch (sset) { case 1: tmp = netdev_priv((struct net_device const *)netdev); tmp___0 = netdev_priv((struct net_device const *)netdev); return ((int )(((unsigned int )((unsigned long )((struct igb_adapter *)tmp)->num_rx_queues) * 5U + (unsigned int )(((struct igb_adapter *)tmp___0)->num_tx_queues * 3)) + 50U)); case 0: ; return (5); default: ; return (-524); } } } static void igb_get_ethtool_stats(struct net_device *netdev , struct ethtool_stats *stats , u64 *data ) { struct igb_adapter *adapter ; void *tmp ; struct rtnl_link_stats64 *net_stats ; unsigned int start ; struct igb_ring *ring ; int i ; int j ; char *p ; u64 restart2 ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; net_stats = & adapter->stats64; spin_lock(& adapter->stats64_lock); igb_update_stats(adapter, net_stats); i = 0; goto ldv_48622; ldv_48621: p = (char *)adapter + (unsigned long )igb_gstrings_stats[i].stat_offset; *(data + (unsigned long )i) = igb_gstrings_stats[i].sizeof_stat == 8 ? *((u64 *)p) : (u64 )*((u32 *)p); i = i + 1; ldv_48622: ; if ((unsigned int )i <= 40U) { goto ldv_48621; } else { } j = 0; goto ldv_48625; ldv_48624: p = (char *)net_stats + (unsigned long )igb_gstrings_net_stats[j].stat_offset; *(data + (unsigned long )i) = igb_gstrings_net_stats[j].sizeof_stat == 8 ? *((u64 *)p) : (u64 )*((u32 *)p); j = j + 1; i = i + 1; ldv_48625: ; if ((unsigned int )j <= 8U) { goto ldv_48624; } else { } j = 0; goto ldv_48633; ldv_48632: ring = adapter->tx_ring[j]; ldv_48628: start = u64_stats_fetch_begin_irq((struct u64_stats_sync const *)(& ring->__annonCompField120.__annonCompField118.tx_syncp)); *(data + (unsigned long )i) = ring->__annonCompField120.__annonCompField118.tx_stats.packets; *(data + ((unsigned long )i + 1UL)) = ring->__annonCompField120.__annonCompField118.tx_stats.bytes; *(data + ((unsigned long )i + 2UL)) = ring->__annonCompField120.__annonCompField118.tx_stats.restart_queue; tmp___0 = u64_stats_fetch_retry_irq((struct u64_stats_sync const *)(& ring->__annonCompField120.__annonCompField118.tx_syncp), start); if ((int )tmp___0) { goto ldv_48628; } else { } ldv_48630: start = u64_stats_fetch_begin_irq((struct u64_stats_sync const *)(& ring->__annonCompField120.__annonCompField118.tx_syncp2)); restart2 = ring->__annonCompField120.__annonCompField118.tx_stats.restart_queue2; tmp___1 = u64_stats_fetch_retry_irq((struct u64_stats_sync const *)(& ring->__annonCompField120.__annonCompField118.tx_syncp2), start); if ((int )tmp___1) { goto ldv_48630; } else { } *(data + ((unsigned long )i + 2UL)) = *(data + ((unsigned long )i + 2UL)) + restart2; i = i + 3; j = j + 1; ldv_48633: ; if (adapter->num_tx_queues > j) { goto ldv_48632; } else { } j = 0; goto ldv_48638; ldv_48637: ring = adapter->rx_ring[j]; ldv_48635: start = u64_stats_fetch_begin_irq((struct u64_stats_sync const *)(& ring->__annonCompField120.__annonCompField119.rx_syncp)); *(data + (unsigned long )i) = ring->__annonCompField120.__annonCompField119.rx_stats.packets; *(data + ((unsigned long )i + 1UL)) = ring->__annonCompField120.__annonCompField119.rx_stats.bytes; *(data + ((unsigned long )i + 2UL)) = ring->__annonCompField120.__annonCompField119.rx_stats.drops; *(data + ((unsigned long )i + 3UL)) = ring->__annonCompField120.__annonCompField119.rx_stats.csum_err; *(data + ((unsigned long )i + 4UL)) = ring->__annonCompField120.__annonCompField119.rx_stats.alloc_failed; tmp___2 = u64_stats_fetch_retry_irq((struct u64_stats_sync const *)(& ring->__annonCompField120.__annonCompField119.rx_syncp), start); if ((int )tmp___2) { goto ldv_48635; } else { } i = (int )((unsigned int )i + 5U); j = j + 1; ldv_48638: ; if (adapter->num_rx_queues > j) { goto ldv_48637; } else { } spin_unlock(& adapter->stats64_lock); return; } } static void igb_get_strings(struct net_device *netdev , u32 stringset , u8 *data ) { struct igb_adapter *adapter ; void *tmp ; u8 *p ; int i ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; p = data; switch (stringset) { case 0U: memcpy((void *)data, (void const *)(& igb_gstrings_test), 160UL); goto ldv_48649; case 1U: i = 0; goto ldv_48652; ldv_48651: memcpy((void *)p, (void const *)(& igb_gstrings_stats[i].stat_string), 32UL); p = p + 32UL; i = i + 1; ldv_48652: ; if ((unsigned int )i <= 40U) { goto ldv_48651; } else { } i = 0; goto ldv_48655; ldv_48654: memcpy((void *)p, (void const *)(& igb_gstrings_net_stats[i].stat_string), 32UL); p = p + 32UL; i = i + 1; ldv_48655: ; if ((unsigned int )i <= 8U) { goto ldv_48654; } else { } i = 0; goto ldv_48658; ldv_48657: sprintf((char *)p, "tx_queue_%u_packets", i); p = p + 32UL; sprintf((char *)p, "tx_queue_%u_bytes", i); p = p + 32UL; sprintf((char *)p, "tx_queue_%u_restart", i); p = p + 32UL; i = i + 1; ldv_48658: ; if (adapter->num_tx_queues > i) { goto ldv_48657; } else { } i = 0; goto ldv_48661; ldv_48660: sprintf((char *)p, "rx_queue_%u_packets", i); p = p + 32UL; sprintf((char *)p, "rx_queue_%u_bytes", i); p = p + 32UL; sprintf((char *)p, "rx_queue_%u_drops", i); p = p + 32UL; sprintf((char *)p, "rx_queue_%u_csum_err", i); p = p + 32UL; sprintf((char *)p, "rx_queue_%u_alloc_failed", i); p = p + 32UL; i = i + 1; ldv_48661: ; if (adapter->num_rx_queues > i) { goto ldv_48660; } else { } goto ldv_48649; } ldv_48649: ; return; } } static int igb_get_ts_info(struct net_device *dev , struct ethtool_ts_info *info ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct igb_adapter *)tmp; if ((unsigned long )adapter->ptp_clock != (unsigned long )((struct ptp_clock *)0)) { info->phc_index = ptp_clock_index(adapter->ptp_clock); } else { info->phc_index = -1; } switch ((unsigned int )adapter->hw.mac.type) { case 1U: info->so_timestamping = 26U; return (0); case 2U: ; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: info->so_timestamping = 95U; info->tx_types = 3U; info->rx_filters = 1U; if ((unsigned int )adapter->hw.mac.type > 2U) { info->rx_filters = info->rx_filters | 2U; } else { info->rx_filters = info->rx_filters | 7600U; } return (0); default: ; return (-95); } } } static int igb_get_rss_hash_opts(struct igb_adapter *adapter , struct ethtool_rxnfc *cmd ) { { cmd->data = 0ULL; switch (cmd->flow_type) { case 1U: cmd->data = cmd->data | 192ULL; case 2U: ; if ((adapter->flags & 64U) != 0U) { cmd->data = cmd->data | 192ULL; } else { } case 3U: ; case 4U: ; case 9U: ; case 10U: ; case 16U: cmd->data = cmd->data | 48ULL; goto ldv_48687; case 5U: cmd->data = cmd->data | 192ULL; case 6U: ; if ((adapter->flags & 128U) != 0U) { cmd->data = cmd->data | 192ULL; } else { } case 7U: ; case 8U: ; case 11U: ; case 12U: ; case 17U: cmd->data = cmd->data | 48ULL; goto ldv_48687; default: ; return (-22); } ldv_48687: ; return (0); } } static int igb_get_rxnfc(struct net_device *dev , struct ethtool_rxnfc *cmd , u32 *rule_locs ) { struct igb_adapter *adapter ; void *tmp ; int ret ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct igb_adapter *)tmp; ret = -95; switch (cmd->cmd) { case 45U: cmd->data = (__u64 )adapter->num_rx_queues; ret = 0; goto ldv_48704; case 41U: ret = igb_get_rss_hash_opts(adapter, cmd); goto ldv_48704; default: ; goto ldv_48704; } ldv_48704: ; return (ret); } } static int igb_set_rss_hash_opt(struct igb_adapter *adapter , struct ethtool_rxnfc *nfc ) { u32 flags ; struct e1000_hw *hw ; u32 mrqc ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { flags = adapter->flags; if ((nfc->data & 0xffffffffffffff0fULL) != 0ULL) { return (-22); } else { } switch (nfc->flow_type) { case 1U: ; case 5U: ; if ((((nfc->data & 16ULL) == 0ULL || (nfc->data & 32ULL) == 0ULL) || (nfc->data & 64ULL) == 0ULL) || (nfc->data & 128ULL) == 0ULL) { return (-22); } else { } goto ldv_48714; case 2U: ; if ((nfc->data & 16ULL) == 0ULL || (nfc->data & 32ULL) == 0ULL) { return (-22); } else { } switch (nfc->data & 192ULL) { case 0ULL: flags = flags & 4294967231U; goto ldv_48717; case 192ULL: flags = flags | 64U; goto ldv_48717; default: ; return (-22); } ldv_48717: ; goto ldv_48714; case 6U: ; if ((nfc->data & 16ULL) == 0ULL || (nfc->data & 32ULL) == 0ULL) { return (-22); } else { } switch (nfc->data & 192ULL) { case 0ULL: flags = flags & 4294967167U; goto ldv_48722; case 192ULL: flags = flags | 128U; goto ldv_48722; default: ; return (-22); } ldv_48722: ; goto ldv_48714; case 4U: ; case 9U: ; case 10U: ; case 3U: ; case 8U: ; case 11U: ; case 12U: ; case 7U: ; if ((((nfc->data & 16ULL) == 0ULL || (nfc->data & 32ULL) == 0ULL) || (nfc->data & 64ULL) != 0ULL) || (nfc->data & 128ULL) != 0ULL) { return (-22); } else { } goto ldv_48714; default: ; return (-22); } ldv_48714: ; if (adapter->flags != flags) { hw = & adapter->hw; tmp = igb_rd32(hw, 22552U); mrqc = tmp; if ((flags & 192U) != 0U && (adapter->flags & 192U) == 0U) { dev_err((struct device const *)(& (adapter->pdev)->dev), "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); } else { } adapter->flags = flags; mrqc = mrqc | 3342336U; mrqc = mrqc & 4282384383U; if ((flags & 64U) != 0U) { mrqc = mrqc | 4194304U; } else { } if ((flags & 128U) != 0U) { mrqc = mrqc | 8388608U; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(mrqc, (void volatile *)hw_addr + 22552U); } else { } } else { } return (0); } } static int igb_set_rxnfc(struct net_device *dev , struct ethtool_rxnfc *cmd ) { struct igb_adapter *adapter ; void *tmp ; int ret ; { tmp = netdev_priv((struct net_device const *)dev); adapter = (struct igb_adapter *)tmp; ret = -95; switch (cmd->cmd) { case 42U: ret = igb_set_rss_hash_opt(adapter, cmd); goto ldv_48746; default: ; goto ldv_48746; } ldv_48746: ; return (ret); } } static int igb_get_eee(struct net_device *netdev , struct ethtool_eee *edata ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; u32 ret_val ; u16 phy_data ; u32 eeer ; s32 tmp___0 ; s32 tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; if ((unsigned int )hw->mac.type <= 3U || (unsigned int )hw->phy.media_type != 1U) { return (-95); } else { } edata->supported = 40U; if (! hw->dev_spec._82575.eee_disable) { edata->advertised = mmd_eee_adv_to_ethtool_adv_t((int )adapter->eee_advert); } else { } if ((unsigned int )hw->mac.type == 5U) { igb_get_eee_status_i354(hw, (bool *)(& edata->eee_active)); } else { eeer = igb_rd32(hw, 3632U); if ((eeer & 536870912U) != 0U) { edata->eee_active = 1U; } else { } if ((eeer & 65536U) != 0U) { edata->tx_lpi_enabled = 1U; } else { } } switch ((unsigned int )hw->mac.type) { case 4U: tmp___0 = igb_read_emi_reg(hw, 1039, & phy_data); ret_val = (u32 )tmp___0; if (ret_val != 0U) { return (-61); } else { } edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t((int )phy_data); goto ldv_48758; case 5U: ; case 6U: ; case 7U: tmp___1 = igb_read_xmdio_reg(hw, 61, 7, & phy_data); ret_val = (u32 )tmp___1; if (ret_val != 0U) { return (-61); } else { } edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t((int )phy_data); goto ldv_48758; default: ; goto ldv_48758; } ldv_48758: edata->eee_enabled = (__u32 )(! hw->dev_spec._82575.eee_disable); if ((unsigned int )hw->mac.type == 5U && edata->eee_enabled != 0U) { edata->tx_lpi_enabled = 1U; } else { } if ((unsigned int )adapter->link_duplex == 1U) { edata->eee_enabled = 0U; edata->eee_active = 0U; edata->tx_lpi_enabled = 0U; edata->advertised = 0U; } else { } return (0); } } static int igb_set_eee(struct net_device *netdev , struct ethtool_eee *edata ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; struct ethtool_eee eee_curr ; bool adv1g_eee ; bool adv100m_eee ; s32 ret_val ; bool tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; adv1g_eee = 1; adv100m_eee = 1; if ((unsigned int )hw->mac.type <= 3U || (unsigned int )hw->phy.media_type != 1U) { return (-95); } else { } memset((void *)(& eee_curr), 0, 40UL); ret_val = igb_get_eee(netdev, & eee_curr); if (ret_val != 0) { return (ret_val); } else { } if (eee_curr.eee_enabled != 0U) { if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { dev_err((struct device const *)(& (adapter->pdev)->dev), "Setting EEE tx-lpi is not supported\n"); return (-22); } else { } if (edata->tx_lpi_timer != 0U) { dev_err((struct device const *)(& (adapter->pdev)->dev), "Setting EEE Tx LPI timer is not supported\n"); return (-22); } else { } if (edata->advertised == 0U || (edata->advertised & 4294967255U) != 0U) { dev_err((struct device const *)(& (adapter->pdev)->dev), "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); return (-22); } else { } adv100m_eee = (edata->advertised & 8U) != 0U; adv1g_eee = (edata->advertised & 32U) != 0U; } else if (edata->eee_enabled == 0U) { dev_err((struct device const *)(& (adapter->pdev)->dev), "Setting EEE options are not supported with EEE disabled\n"); return (-22); } else { } adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); if ((int )hw->dev_spec._82575.eee_disable != (edata->eee_enabled == 0U)) { hw->dev_spec._82575.eee_disable = edata->eee_enabled == 0U; adapter->flags = adapter->flags | 16384U; tmp___0 = netif_running((struct net_device const *)netdev); if ((int )tmp___0) { igb_reinit_locked(adapter); } else { igb_reset(adapter); } } else { } if ((unsigned int )hw->mac.type == 5U) { ret_val = igb_set_eee_i354(hw, (int )adv1g_eee, (int )adv100m_eee); } else { ret_val = igb_set_eee_i350(hw, (int )adv1g_eee, (int )adv100m_eee); } if (ret_val != 0) { dev_err((struct device const *)(& (adapter->pdev)->dev), "Problem setting EEE advertisement options\n"); return (-22); } else { } return (0); } } static int igb_get_module_info(struct net_device *netdev , struct ethtool_modinfo *modinfo ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; u32 status ; u16 sff8472_rev ; u16 addr_mode ; bool page_swap ; s32 tmp___0 ; s32 tmp___1 ; struct _ddebug descriptor ; struct net_device *tmp___2 ; long tmp___3 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; status = 0U; page_swap = 0; if ((unsigned int )hw->phy.media_type == 1U || (unsigned int )hw->phy.media_type == 0U) { return (-95); } else { } tmp___0 = igb_read_phy_reg_i2c(hw, 94U, & sff8472_rev); status = (u32 )tmp___0; if (status != 0U) { return (-5); } else { } tmp___1 = igb_read_phy_reg_i2c(hw, 92U, & addr_mode); status = (u32 )tmp___1; if (status != 0U) { return (-5); } else { } if (((int )addr_mode & 4) != 0) { descriptor.modname = "igb"; descriptor.function = "igb_get_module_info"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/igb_ethtool.c"; descriptor.format = "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"; descriptor.lineno = 2771U; descriptor.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___2, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); } else { } page_swap = 1; } else { } if (((int )sff8472_rev & 255) == 0 || (int )page_swap) { modinfo->type = 1U; modinfo->eeprom_len = 256U; } else { modinfo->type = 2U; modinfo->eeprom_len = 512U; } return (0); } } static int igb_get_module_eeprom(struct net_device *netdev , struct ethtool_eeprom *ee , u8 *data ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; u32 status ; u16 *dataword ; u16 first_word ; u16 last_word ; int i ; void *tmp___0 ; s32 tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; status = 0U; i = 0; if (ee->len == 0U) { return (-22); } else { } first_word = (u16 )(ee->offset >> 1); last_word = (u16 )(((ee->offset + ee->len) - 1U) >> 1); tmp___0 = kmalloc((unsigned long )(((int )last_word - (int )first_word) + 1) * 2UL, 208U); dataword = (u16 *)tmp___0; if ((unsigned long )dataword == (unsigned long )((u16 *)0U)) { return (-12); } else { } i = 0; goto ldv_48798; ldv_48797: tmp___1 = igb_read_phy_reg_i2c(hw, (u32 )((int )first_word + i), dataword + (unsigned long )i); status = (u32 )tmp___1; if (status != 0U) { kfree((void const *)dataword); return (-5); } else { } __swab16s(dataword + (unsigned long )i); i = i + 1; ldv_48798: ; if (((int )last_word - (int )first_word) + 1 > i) { goto ldv_48797; } else { } memcpy((void *)data, (void const *)dataword + ((unsigned long )ee->offset & 1UL), (size_t )ee->len); kfree((void const *)dataword); return (0); } } static int igb_ethtool_begin(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; pm_runtime_get_sync(& (adapter->pdev)->dev); return (0); } } static void igb_ethtool_complete(struct net_device *netdev ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; pm_runtime_put(& (adapter->pdev)->dev); return; } } static u32 igb_get_rxfh_indir_size(struct net_device *netdev ) { { return (128U); } } static int igb_get_rxfh(struct net_device *netdev , u32 *indir , u8 *key , u8 *hfunc ) { struct igb_adapter *adapter ; void *tmp ; int i ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; if ((unsigned long )hfunc != (unsigned long )((u8 *)0U)) { *hfunc = 1U; } else { } if ((unsigned long )indir == (unsigned long )((u32 *)0U)) { return (0); } else { } i = 0; goto ldv_48820; ldv_48819: *(indir + (unsigned long )i) = (u32 )adapter->rss_indir_tbl[i]; i = i + 1; ldv_48820: ; if (i <= 127) { goto ldv_48819; } else { } return (0); } } void igb_write_rss_indir_tbl(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 reg ; u32 shift ; int i ; u32 val ; int j ; u8 *hw_addr ; u8 *__var ; long tmp ; { hw = & adapter->hw; reg = 23552U; shift = 0U; i = 0; switch ((unsigned int )hw->mac.type) { case 1U: shift = 6U; goto ldv_48830; case 2U: ; if (adapter->vfs_allocated_count != 0U) { shift = 3U; } else { } goto ldv_48830; default: ; goto ldv_48830; } ldv_48830: ; goto ldv_48842; ldv_48841: val = 0U; j = 3; goto ldv_48836; ldv_48835: val = val << 8; val = (u32 )adapter->rss_indir_tbl[i + j] | val; j = j - 1; ldv_48836: ; if (j >= 0) { goto ldv_48835; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(val << (int )shift, (void volatile *)hw_addr + (unsigned long )reg); } else { } reg = reg + 4U; i = i + 4; ldv_48842: ; if (i <= 127) { goto ldv_48841; } else { } return; } } static int igb_set_rxfh(struct net_device *netdev , u32 const *indir , u8 const *key , u8 const hfunc ) { struct igb_adapter *adapter ; void *tmp ; struct e1000_hw *hw ; int i ; u32 num_queues ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; hw = & adapter->hw; if ((unsigned long )key != (unsigned long )((u8 const *)0U) || ((unsigned int )((unsigned char )hfunc) != 0U && (unsigned int )((unsigned char )hfunc) != 1U)) { return (-95); } else { } if ((unsigned long )indir == (unsigned long )((u32 const *)0U)) { return (0); } else { } num_queues = adapter->rss_queues; switch ((unsigned int )hw->mac.type) { case 2U: ; if (adapter->vfs_allocated_count != 0U) { num_queues = 2U; } else { } goto ldv_48855; default: ; goto ldv_48855; } ldv_48855: i = 0; goto ldv_48858; ldv_48857: ; if ((unsigned int )*(indir + (unsigned long )i) >= num_queues) { return (-22); } else { } i = i + 1; ldv_48858: ; if (i <= 127) { goto ldv_48857; } else { } i = 0; goto ldv_48861; ldv_48860: adapter->rss_indir_tbl[i] = (u8 )*(indir + (unsigned long )i); i = i + 1; ldv_48861: ; if (i <= 127) { goto ldv_48860; } else { } igb_write_rss_indir_tbl(adapter); return (0); } } static unsigned int igb_max_channels(struct igb_adapter *adapter ) { struct e1000_hw *hw ; unsigned int max_combined ; { hw = & adapter->hw; max_combined = 0U; switch ((unsigned int )hw->mac.type) { case 7U: max_combined = 2U; goto ldv_48869; case 1U: ; case 6U: max_combined = 4U; goto ldv_48869; case 4U: ; if (adapter->vfs_allocated_count != 0U) { max_combined = 1U; goto ldv_48869; } else { } case 2U: ; if (adapter->vfs_allocated_count != 0U) { max_combined = 2U; goto ldv_48869; } else { } case 3U: ; case 5U: ; default: max_combined = 8U; goto ldv_48869; } ldv_48869: ; return (max_combined); } } static void igb_get_channels(struct net_device *netdev , struct ethtool_channels *ch ) { struct igb_adapter *adapter ; void *tmp ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; ch->max_combined = igb_max_channels(adapter); if ((adapter->flags & 8192U) != 0U) { ch->max_other = 1U; ch->other_count = 1U; } else { } ch->combined_count = adapter->rss_queues; return; } } static int igb_set_channels(struct net_device *netdev , struct ethtool_channels *ch ) { struct igb_adapter *adapter ; void *tmp ; unsigned int count ; unsigned int tmp___0 ; int tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; count = ch->combined_count; if ((count == 0U || ch->rx_count != 0U) || ch->tx_count != 0U) { return (-22); } else { } if (ch->other_count != 1U) { return (-22); } else { } tmp___0 = igb_max_channels(adapter); if (tmp___0 < count) { return (-22); } else { } if (adapter->rss_queues != count) { adapter->rss_queues = count; tmp___1 = igb_reinit_queues(adapter); return (tmp___1); } else { } return (0); } } static struct ethtool_ops const igb_ethtool_ops = {& igb_get_settings, & igb_set_settings, & igb_get_drvinfo, & igb_get_regs_len, & igb_get_regs, & igb_get_wol, & igb_set_wol, & igb_get_msglevel, & igb_set_msglevel, & igb_nway_reset, & igb_get_link, & igb_get_eeprom_len, & igb_get_eeprom, & igb_set_eeprom, & igb_get_coalesce, & igb_set_coalesce, & igb_get_ringparam, & igb_set_ringparam, & igb_get_pauseparam, & igb_set_pauseparam, & igb_diag_test, & igb_get_strings, & igb_set_phys_id, & igb_get_ethtool_stats, & igb_ethtool_begin, & igb_ethtool_complete, 0, 0, & igb_get_sset_count, & igb_get_rxnfc, & igb_set_rxnfc, 0, 0, 0, & igb_get_rxfh_indir_size, & igb_get_rxfh, & igb_set_rxfh, & igb_get_channels, & igb_set_channels, 0, 0, 0, & igb_get_ts_info, & igb_get_module_info, & igb_get_module_eeprom, & igb_get_eee, & igb_set_eee, 0, 0}; void igb_set_ethtool_ops(struct net_device *netdev ) { { netdev->ethtool_ops = & igb_ethtool_ops; return; } } void disable_suitable_irq_7(int line , void *data ) { { if (ldv_irq_7_0 != 0 && line == ldv_irq_line_7_0) { ldv_irq_7_0 = 0; return; } else { } if (ldv_irq_7_1 != 0 && line == ldv_irq_line_7_1) { ldv_irq_7_1 = 0; return; } else { } if (ldv_irq_7_2 != 0 && line == ldv_irq_line_7_2) { ldv_irq_7_2 = 0; return; } else { } if (ldv_irq_7_3 != 0 && line == ldv_irq_line_7_3) { ldv_irq_7_3 = 0; return; } else { } return; } } int ldv_irq_6(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = igb_test_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_48903; default: ldv_stop(); } ldv_48903: ; } else { } return (state); } } void choose_interrupt_8(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_8_0 = ldv_irq_8(ldv_irq_8_0, ldv_irq_line_8_0, ldv_irq_data_8_0); goto ldv_48909; case 1: ldv_irq_8_0 = ldv_irq_8(ldv_irq_8_1, ldv_irq_line_8_1, ldv_irq_data_8_1); goto ldv_48909; case 2: ldv_irq_8_0 = ldv_irq_8(ldv_irq_8_2, ldv_irq_line_8_2, ldv_irq_data_8_2); goto ldv_48909; case 3: ldv_irq_8_0 = ldv_irq_8(ldv_irq_8_3, ldv_irq_line_8_3, ldv_irq_data_8_3); goto ldv_48909; default: ldv_stop(); } ldv_48909: ; return; } } void activate_suitable_irq_6(int line , void *data ) { { if (ldv_irq_6_0 == 0) { ldv_irq_line_6_0 = line; ldv_irq_data_6_0 = data; ldv_irq_6_0 = 1; return; } else { } if (ldv_irq_6_1 == 0) { ldv_irq_line_6_1 = line; ldv_irq_data_6_1 = data; ldv_irq_6_1 = 1; return; } else { } if (ldv_irq_6_2 == 0) { ldv_irq_line_6_2 = line; ldv_irq_data_6_2 = data; ldv_irq_6_2 = 1; return; } else { } if (ldv_irq_6_3 == 0) { ldv_irq_line_6_3 = line; ldv_irq_data_6_3 = data; ldv_irq_6_3 = 1; return; } else { } return; } } int reg_check_8(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& igb_test_intr)) { return (1); } else { } return (0); } } void activate_suitable_irq_8(int line , void *data ) { { if (ldv_irq_8_0 == 0) { ldv_irq_line_8_0 = line; ldv_irq_data_8_0 = data; ldv_irq_8_0 = 1; return; } else { } if (ldv_irq_8_1 == 0) { ldv_irq_line_8_1 = line; ldv_irq_data_8_1 = data; ldv_irq_8_1 = 1; return; } else { } if (ldv_irq_8_2 == 0) { ldv_irq_line_8_2 = line; ldv_irq_data_8_2 = data; ldv_irq_8_2 = 1; return; } else { } if (ldv_irq_8_3 == 0) { ldv_irq_line_8_3 = line; ldv_irq_data_8_3 = data; ldv_irq_8_3 = 1; return; } else { } return; } } void choose_interrupt_5(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_5_0 = ldv_irq_5(ldv_irq_5_0, ldv_irq_line_5_0, ldv_irq_data_5_0); goto ldv_48931; case 1: ldv_irq_5_0 = ldv_irq_5(ldv_irq_5_1, ldv_irq_line_5_1, ldv_irq_data_5_1); goto ldv_48931; case 2: ldv_irq_5_0 = ldv_irq_5(ldv_irq_5_2, ldv_irq_line_5_2, ldv_irq_data_5_2); goto ldv_48931; case 3: ldv_irq_5_0 = ldv_irq_5(ldv_irq_5_3, ldv_irq_line_5_3, ldv_irq_data_5_3); goto ldv_48931; default: ldv_stop(); } ldv_48931: ; return; } } void disable_suitable_irq_5(int line , void *data ) { { if (ldv_irq_5_0 != 0 && line == ldv_irq_line_5_0) { ldv_irq_5_0 = 0; return; } else { } if (ldv_irq_5_1 != 0 && line == ldv_irq_line_5_1) { ldv_irq_5_1 = 0; return; } else { } if (ldv_irq_5_2 != 0 && line == ldv_irq_line_5_2) { ldv_irq_5_2 = 0; return; } else { } if (ldv_irq_5_3 != 0 && line == ldv_irq_line_5_3) { ldv_irq_5_3 = 0; return; } else { } return; } } int ldv_irq_5(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = igb_test_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_48947; default: ldv_stop(); } ldv_48947: ; } else { } return (state); } } void activate_suitable_irq_7(int line , void *data ) { { if (ldv_irq_7_0 == 0) { ldv_irq_line_7_0 = line; ldv_irq_data_7_0 = data; ldv_irq_7_0 = 1; return; } else { } if (ldv_irq_7_1 == 0) { ldv_irq_line_7_1 = line; ldv_irq_data_7_1 = data; ldv_irq_7_1 = 1; return; } else { } if (ldv_irq_7_2 == 0) { ldv_irq_line_7_2 = line; ldv_irq_data_7_2 = data; ldv_irq_7_2 = 1; return; } else { } if (ldv_irq_7_3 == 0) { ldv_irq_line_7_3 = line; ldv_irq_data_7_3 = data; ldv_irq_7_3 = 1; return; } else { } return; } } int reg_check_6(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& igb_test_intr)) { return (1); } else { } return (0); } } int reg_check_7(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& igb_test_intr)) { return (1); } else { } return (0); } } void choose_interrupt_6(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_6_0 = ldv_irq_6(ldv_irq_6_0, ldv_irq_line_6_0, ldv_irq_data_6_0); goto ldv_48967; case 1: ldv_irq_6_0 = ldv_irq_6(ldv_irq_6_1, ldv_irq_line_6_1, ldv_irq_data_6_1); goto ldv_48967; case 2: ldv_irq_6_0 = ldv_irq_6(ldv_irq_6_2, ldv_irq_line_6_2, ldv_irq_data_6_2); goto ldv_48967; case 3: ldv_irq_6_0 = ldv_irq_6(ldv_irq_6_3, ldv_irq_line_6_3, ldv_irq_data_6_3); goto ldv_48967; default: ldv_stop(); } ldv_48967: ; return; } } void disable_suitable_irq_6(int line , void *data ) { { if (ldv_irq_6_0 != 0 && line == ldv_irq_line_6_0) { ldv_irq_6_0 = 0; return; } else { } if (ldv_irq_6_1 != 0 && line == ldv_irq_line_6_1) { ldv_irq_6_1 = 0; return; } else { } if (ldv_irq_6_2 != 0 && line == ldv_irq_line_6_2) { ldv_irq_6_2 = 0; return; } else { } if (ldv_irq_6_3 != 0 && line == ldv_irq_line_6_3) { ldv_irq_6_3 = 0; return; } else { } return; } } void activate_suitable_irq_5(int line , void *data ) { { if (ldv_irq_5_0 == 0) { ldv_irq_line_5_0 = line; ldv_irq_data_5_0 = data; ldv_irq_5_0 = 1; return; } else { } if (ldv_irq_5_1 == 0) { ldv_irq_line_5_1 = line; ldv_irq_data_5_1 = data; ldv_irq_5_1 = 1; return; } else { } if (ldv_irq_5_2 == 0) { ldv_irq_line_5_2 = line; ldv_irq_data_5_2 = data; ldv_irq_5_2 = 1; return; } else { } if (ldv_irq_5_3 == 0) { ldv_irq_line_5_3 = line; ldv_irq_data_5_3 = data; ldv_irq_5_3 = 1; return; } else { } return; } } void ldv_initialize_ethtool_ops_19(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; void *tmp___3 ; void *tmp___4 ; void *tmp___5 ; void *tmp___6 ; void *tmp___7 ; void *tmp___8 ; { tmp = ldv_init_zalloc(40UL); igb_ethtool_ops_group0 = (struct ethtool_eee *)tmp; tmp___0 = ldv_init_zalloc(36UL); igb_ethtool_ops_group1 = (struct ethtool_ringparam *)tmp___0; tmp___1 = ldv_init_zalloc(16UL); igb_ethtool_ops_group3 = (struct ethtool_eeprom *)tmp___1; tmp___2 = ldv_init_zalloc(44UL); igb_ethtool_ops_group2 = (struct ethtool_cmd *)tmp___2; tmp___3 = ldv_init_zalloc(16UL); igb_ethtool_ops_group4 = (struct ethtool_pauseparam *)tmp___3; tmp___4 = ldv_init_zalloc(36UL); igb_ethtool_ops_group5 = (struct ethtool_channels *)tmp___4; tmp___5 = ldv_init_zalloc(92UL); igb_ethtool_ops_group6 = (struct ethtool_coalesce *)tmp___5; tmp___6 = ldv_init_zalloc(3008UL); igb_ethtool_ops_group9 = (struct net_device *)tmp___6; tmp___7 = ldv_init_zalloc(192UL); igb_ethtool_ops_group7 = (struct ethtool_rxnfc *)tmp___7; tmp___8 = ldv_init_zalloc(20UL); igb_ethtool_ops_group8 = (struct ethtool_wolinfo *)tmp___8; return; } } int ldv_irq_8(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = igb_test_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_48990; default: ldv_stop(); } ldv_48990: ; } else { } return (state); } } void disable_suitable_irq_8(int line , void *data ) { { if (ldv_irq_8_0 != 0 && line == ldv_irq_line_8_0) { ldv_irq_8_0 = 0; return; } else { } if (ldv_irq_8_1 != 0 && line == ldv_irq_line_8_1) { ldv_irq_8_1 = 0; return; } else { } if (ldv_irq_8_2 != 0 && line == ldv_irq_line_8_2) { ldv_irq_8_2 = 0; return; } else { } if (ldv_irq_8_3 != 0 && line == ldv_irq_line_8_3) { ldv_irq_8_3 = 0; return; } else { } return; } } int ldv_irq_7(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = igb_test_intr(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_49003; default: ldv_stop(); } ldv_49003: ; } else { } return (state); } } void choose_interrupt_7(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_7_0 = ldv_irq_7(ldv_irq_7_0, ldv_irq_line_7_0, ldv_irq_data_7_0); goto ldv_49009; case 1: ldv_irq_7_0 = ldv_irq_7(ldv_irq_7_1, ldv_irq_line_7_1, ldv_irq_data_7_1); goto ldv_49009; case 2: ldv_irq_7_0 = ldv_irq_7(ldv_irq_7_2, ldv_irq_line_7_2, ldv_irq_data_7_2); goto ldv_49009; case 3: ldv_irq_7_0 = ldv_irq_7(ldv_irq_7_3, ldv_irq_line_7_3, ldv_irq_data_7_3); goto ldv_49009; default: ldv_stop(); } ldv_49009: ; return; } } int reg_check_5(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& igb_test_intr)) { return (1); } else { } return (0); } } void ldv_main_exported_19(void) { u8 ldvarg75 ; enum ethtool_phys_id_state ldvarg74 ; u8 *ldvarg76 ; void *tmp ; struct ethtool_drvinfo *ldvarg61 ; void *tmp___0 ; u8 *ldvarg69 ; void *tmp___1 ; u32 ldvarg68 ; u32 *ldvarg78 ; void *tmp___2 ; int ldvarg70 ; u32 *ldvarg73 ; void *tmp___3 ; u8 *ldvarg63 ; void *tmp___4 ; u8 *ldvarg55 ; void *tmp___5 ; u32 ldvarg64 ; struct ethtool_test *ldvarg66 ; void *tmp___6 ; struct ethtool_regs *ldvarg58 ; void *tmp___7 ; void *ldvarg57 ; void *tmp___8 ; u64 *ldvarg65 ; void *tmp___9 ; struct ethtool_modinfo *ldvarg62 ; void *tmp___10 ; u8 *ldvarg56 ; void *tmp___11 ; u8 *ldvarg72 ; void *tmp___12 ; u64 *ldvarg59 ; void *tmp___13 ; u8 *ldvarg71 ; void *tmp___14 ; struct ethtool_stats *ldvarg60 ; void *tmp___15 ; struct ethtool_ts_info *ldvarg67 ; void *tmp___16 ; u32 *ldvarg77 ; void *tmp___17 ; int tmp___18 ; { tmp = ldv_init_zalloc(1UL); ldvarg76 = (u8 *)tmp; tmp___0 = ldv_init_zalloc(196UL); ldvarg61 = (struct ethtool_drvinfo *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg69 = (u8 *)tmp___1; tmp___2 = ldv_init_zalloc(4UL); ldvarg78 = (u32 *)tmp___2; tmp___3 = ldv_init_zalloc(4UL); ldvarg73 = (u32 *)tmp___3; tmp___4 = ldv_init_zalloc(1UL); ldvarg63 = (u8 *)tmp___4; tmp___5 = ldv_init_zalloc(1UL); ldvarg55 = (u8 *)tmp___5; tmp___6 = ldv_init_zalloc(16UL); ldvarg66 = (struct ethtool_test *)tmp___6; tmp___7 = ldv_init_zalloc(12UL); ldvarg58 = (struct ethtool_regs *)tmp___7; tmp___8 = ldv_init_zalloc(1UL); ldvarg57 = tmp___8; tmp___9 = ldv_init_zalloc(8UL); ldvarg65 = (u64 *)tmp___9; tmp___10 = ldv_init_zalloc(44UL); ldvarg62 = (struct ethtool_modinfo *)tmp___10; tmp___11 = ldv_init_zalloc(1UL); ldvarg56 = (u8 *)tmp___11; tmp___12 = ldv_init_zalloc(1UL); ldvarg72 = (u8 *)tmp___12; tmp___13 = ldv_init_zalloc(8UL); ldvarg59 = (u64 *)tmp___13; tmp___14 = ldv_init_zalloc(1UL); ldvarg71 = (u8 *)tmp___14; tmp___15 = ldv_init_zalloc(8UL); ldvarg60 = (struct ethtool_stats *)tmp___15; tmp___16 = ldv_init_zalloc(44UL); ldvarg67 = (struct ethtool_ts_info *)tmp___16; tmp___17 = ldv_init_zalloc(4UL); ldvarg77 = (u32 *)tmp___17; ldv_memset((void *)(& ldvarg75), 0, 1UL); ldv_memset((void *)(& ldvarg74), 0, 4UL); ldv_memset((void *)(& ldvarg68), 0, 4UL); ldv_memset((void *)(& ldvarg70), 0, 4UL); ldv_memset((void *)(& ldvarg64), 0, 4UL); tmp___18 = __VERIFIER_nondet_int(); switch (tmp___18) { case 0: ; if (ldv_state_variable_19 == 1) { igb_ethtool_complete(igb_ethtool_ops_group9); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 1: ; if (ldv_state_variable_19 == 1) { igb_set_rxnfc(igb_ethtool_ops_group9, igb_ethtool_ops_group7); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 2: ; if (ldv_state_variable_19 == 1) { igb_get_rxnfc(igb_ethtool_ops_group9, igb_ethtool_ops_group7, ldvarg78); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 3: ; if (ldv_state_variable_19 == 1) { igb_get_ringparam(igb_ethtool_ops_group9, igb_ethtool_ops_group1); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 4: ; if (ldv_state_variable_19 == 1) { igb_set_rxfh(igb_ethtool_ops_group9, (u32 const *)ldvarg77, (u8 const *)ldvarg76, (int )ldvarg75); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 5: ; if (ldv_state_variable_19 == 1) { igb_get_pauseparam(igb_ethtool_ops_group9, igb_ethtool_ops_group4); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 6: ; if (ldv_state_variable_19 == 1) { igb_set_phys_id(igb_ethtool_ops_group9, ldvarg74); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 7: ; if (ldv_state_variable_19 == 1) { igb_get_rxfh(igb_ethtool_ops_group9, ldvarg73, ldvarg72, ldvarg71); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 8: ; if (ldv_state_variable_19 == 1) { igb_get_sset_count(igb_ethtool_ops_group9, ldvarg70); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 9: ; if (ldv_state_variable_19 == 1) { igb_get_settings(igb_ethtool_ops_group9, igb_ethtool_ops_group2); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 10: ; if (ldv_state_variable_19 == 1) { igb_set_channels(igb_ethtool_ops_group9, igb_ethtool_ops_group5); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 11: ; if (ldv_state_variable_19 == 1) { igb_set_coalesce(igb_ethtool_ops_group9, igb_ethtool_ops_group6); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 12: ; if (ldv_state_variable_19 == 1) { igb_ethtool_begin(igb_ethtool_ops_group9); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 13: ; if (ldv_state_variable_19 == 1) { igb_get_module_eeprom(igb_ethtool_ops_group9, igb_ethtool_ops_group3, ldvarg69); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 14: ; if (ldv_state_variable_19 == 1) { igb_set_msglevel(igb_ethtool_ops_group9, ldvarg68); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 15: ; if (ldv_state_variable_19 == 1) { igb_get_ts_info(igb_ethtool_ops_group9, ldvarg67); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 16: ; if (ldv_state_variable_19 == 1) { igb_get_eeprom_len(igb_ethtool_ops_group9); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 17: ; if (ldv_state_variable_19 == 1) { igb_diag_test(igb_ethtool_ops_group9, ldvarg66, ldvarg65); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 18: ; if (ldv_state_variable_19 == 1) { igb_get_strings(igb_ethtool_ops_group9, ldvarg64, ldvarg63); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 19: ; if (ldv_state_variable_19 == 1) { igb_nway_reset(igb_ethtool_ops_group9); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 20: ; if (ldv_state_variable_19 == 1) { igb_set_eee(igb_ethtool_ops_group9, igb_ethtool_ops_group0); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 21: ; if (ldv_state_variable_19 == 1) { igb_get_eee(igb_ethtool_ops_group9, igb_ethtool_ops_group0); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 22: ; if (ldv_state_variable_19 == 1) { igb_get_module_info(igb_ethtool_ops_group9, ldvarg62); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 23: ; if (ldv_state_variable_19 == 1) { igb_get_link(igb_ethtool_ops_group9); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 24: ; if (ldv_state_variable_19 == 1) { igb_get_channels(igb_ethtool_ops_group9, igb_ethtool_ops_group5); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 25: ; if (ldv_state_variable_19 == 1) { igb_get_drvinfo(igb_ethtool_ops_group9, ldvarg61); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 26: ; if (ldv_state_variable_19 == 1) { igb_set_pauseparam(igb_ethtool_ops_group9, igb_ethtool_ops_group4); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 27: ; if (ldv_state_variable_19 == 1) { igb_get_ethtool_stats(igb_ethtool_ops_group9, ldvarg60, ldvarg59); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 28: ; if (ldv_state_variable_19 == 1) { igb_get_coalesce(igb_ethtool_ops_group9, igb_ethtool_ops_group6); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 29: ; if (ldv_state_variable_19 == 1) { igb_get_regs(igb_ethtool_ops_group9, ldvarg58, ldvarg57); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 30: ; if (ldv_state_variable_19 == 1) { igb_get_rxfh_indir_size(igb_ethtool_ops_group9); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 31: ; if (ldv_state_variable_19 == 1) { igb_set_wol(igb_ethtool_ops_group9, igb_ethtool_ops_group8); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 32: ; if (ldv_state_variable_19 == 1) { igb_set_settings(igb_ethtool_ops_group9, igb_ethtool_ops_group2); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 33: ; if (ldv_state_variable_19 == 1) { igb_get_eeprom(igb_ethtool_ops_group9, igb_ethtool_ops_group3, ldvarg56); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 34: ; if (ldv_state_variable_19 == 1) { igb_get_wol(igb_ethtool_ops_group9, igb_ethtool_ops_group8); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 35: ; if (ldv_state_variable_19 == 1) { igb_set_eeprom(igb_ethtool_ops_group9, igb_ethtool_ops_group3, ldvarg55); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 36: ; if (ldv_state_variable_19 == 1) { igb_get_msglevel(igb_ethtool_ops_group9); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 37: ; if (ldv_state_variable_19 == 1) { igb_get_regs_len(igb_ethtool_ops_group9); ldv_state_variable_19 = 1; } else { } goto ldv_49047; case 38: ; if (ldv_state_variable_19 == 1) { igb_set_ringparam(igb_ethtool_ops_group9, igb_ethtool_ops_group1); ldv_state_variable_19 = 1; } else { } goto ldv_49047; default: ldv_stop(); } ldv_49047: ; return; } } bool ldv_queue_work_on_89(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_90(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_91(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_92(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_93(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_94(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_95(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_96(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_97(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_98(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_99(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_100(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static int ldv_request_irq_101(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_7(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_7((int )irq, dev); } else { } return (ldv_func_res); } } void ldv_free_irq_105(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } void ldv_free_irq_106(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_7((int )ldv_func_arg1, ldv_func_arg2); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; int ldv_mutex_trylock_137(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_135(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_138(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_139(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_134(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_136(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_140(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_129(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_131(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_130(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_133(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_132(struct workqueue_struct *ldv_func_arg1 ) ; s32 igb_copper_link_setup_igp(struct e1000_hw *hw ) ; s32 igb_copper_link_setup_m88(struct e1000_hw *hw ) ; s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw ) ; s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw ) ; s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw ) ; s32 igb_get_cable_length_m88(struct e1000_hw *hw ) ; s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw ) ; s32 igb_get_cable_length_igp_2(struct e1000_hw *hw ) ; s32 igb_get_phy_id(struct e1000_hw *hw ) ; s32 igb_get_phy_info_igp(struct e1000_hw *hw ) ; s32 igb_get_phy_info_m88(struct e1000_hw *hw ) ; s32 igb_phy_hw_reset(struct e1000_hw *hw ) ; s32 igb_read_phy_reg_igp(struct e1000_hw *hw , u32 offset , u16 *data ) ; s32 igb_set_d3_lplu_state(struct e1000_hw *hw , bool active ) ; s32 igb_setup_copper_link(struct e1000_hw *hw ) ; s32 igb_write_phy_reg_igp(struct e1000_hw *hw , u32 offset , u16 data ) ; void igb_power_down_phy_copper(struct e1000_hw *hw ) ; s32 igb_phy_init_script_igp3(struct e1000_hw *hw ) ; s32 igb_read_phy_reg_mdic(struct e1000_hw *hw , u32 offset , u16 *data ) ; s32 igb_write_phy_reg_mdic(struct e1000_hw *hw , u32 offset , u16 data ) ; s32 igb_write_phy_reg_i2c(struct e1000_hw *hw , u32 offset , u16 data ) ; s32 igb_read_sfp_data_byte(struct e1000_hw *hw , u16 offset , u8 *data ) ; s32 igb_copper_link_setup_82580(struct e1000_hw *hw ) ; s32 igb_get_phy_info_82580(struct e1000_hw *hw ) ; s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw ) ; s32 igb_get_cable_length_82580(struct e1000_hw *hw ) ; s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw , u32 offset , u16 *data ) ; s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw , u32 offset , u16 data ) ; s32 igb_check_polarity_m88(struct e1000_hw *hw ) ; s32 igb_acquire_nvm(struct e1000_hw *hw ) ; void igb_release_nvm(struct e1000_hw *hw ) ; s32 igb_read_mac_addr(struct e1000_hw *hw ) ; s32 igb_read_nvm_eerd(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) ; s32 igb_read_nvm_spi(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) ; s32 igb_write_nvm_spi(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) ; s32 igb_validate_nvm_checksum(struct e1000_hw *hw ) ; s32 igb_update_nvm_checksum(struct e1000_hw *hw ) ; s32 igb_init_mbx_params_pf(struct e1000_hw *hw ) ; s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw , u16 mask ) ; void igb_release_swfw_sync_i210(struct e1000_hw *hw , u16 mask ) ; s32 igb_write_xmdio_reg(struct e1000_hw *hw , u16 addr , u8 dev_addr , u16 data ) ; s32 igb_init_nvm_params_i210(struct e1000_hw *hw ) ; s32 igb_pll_workaround_i210(struct e1000_hw *hw ) ; s32 igb_check_for_copper_link(struct e1000_hw *hw ) ; s32 igb_config_fc_after_link_up(struct e1000_hw *hw ) ; s32 igb_get_auto_rd_done(struct e1000_hw *hw ) ; s32 igb_get_hw_semaphore(struct e1000_hw *hw ) ; s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw , u16 *speed , u16 *duplex ) ; s32 igb_id_led_init(struct e1000_hw *hw ) ; s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw , u32 reg , u32 offset , u8 data ) ; void igb_clear_hw_cntrs_base(struct e1000_hw *hw ) ; void igb_clear_vfta(struct e1000_hw *hw ) ; void igb_clear_vfta_i350(struct e1000_hw *hw ) ; void igb_init_rx_addrs(struct e1000_hw *hw , u16 rar_count ) ; void igb_put_hw_semaphore(struct e1000_hw *hw ) ; void igb_rar_set(struct e1000_hw *hw , u8 *addr , u32 index ) ; s32 igb_check_alt_mac_addr(struct e1000_hw *hw ) ; static s32 igb_get_invariants_82575(struct e1000_hw *hw ) ; static s32 igb_acquire_phy_82575(struct e1000_hw *hw ) ; static void igb_release_phy_82575(struct e1000_hw *hw ) ; static s32 igb_acquire_nvm_82575(struct e1000_hw *hw ) ; static void igb_release_nvm_82575(struct e1000_hw *hw ) ; static s32 igb_check_for_link_82575(struct e1000_hw *hw ) ; static s32 igb_get_cfg_done_82575(struct e1000_hw *hw ) ; static s32 igb_init_hw_82575(struct e1000_hw *hw ) ; static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw ) ; static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw , u32 offset , u16 *data ) ; static s32 igb_read_phy_reg_82580(struct e1000_hw *hw , u32 offset , u16 *data ) ; static s32 igb_write_phy_reg_82580(struct e1000_hw *hw , u32 offset , u16 data ) ; static s32 igb_reset_hw_82575(struct e1000_hw *hw ) ; static s32 igb_reset_hw_82580(struct e1000_hw *hw ) ; static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw , bool active ) ; static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw , bool active ) ; static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw , bool active ) ; static s32 igb_setup_copper_link_82575(struct e1000_hw *hw ) ; static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw ) ; static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw , u32 offset , u16 data ) ; static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw ) ; static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw , u16 mask ) ; static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw , u16 *speed , u16 *duplex ) ; static s32 igb_get_phy_id_82575(struct e1000_hw *hw ) ; static void igb_release_swfw_sync_82575(struct e1000_hw *hw , u16 mask ) ; static bool igb_sgmii_active_82575(struct e1000_hw *hw ) ; static s32 igb_reset_init_script_82575(struct e1000_hw *hw ) ; static s32 igb_read_mac_addr_82575(struct e1000_hw *hw ) ; static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw ) ; static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw ) ; static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw ) ; static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw ) ; static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw ) ; static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw ) ; static u16 const e1000_82580_rxpbs_table[11U] = { 36U, 72U, 144U, 1U, 2U, 4U, 8U, 16U, 35U, 70U, 140U}; static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw ) { u32 reg ; bool ext_mdio ; { reg = 0U; ext_mdio = 0; switch ((unsigned int )hw->mac.type) { case 1U: ; case 2U: reg = igb_rd32(hw, 32U); ext_mdio = (int )reg < 0; goto ldv_44316; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: reg = igb_rd32(hw, 3588U); ext_mdio = (int )reg < 0; goto ldv_44316; default: ; goto ldv_44316; } ldv_44316: ; return (ext_mdio); } } static s32 igb_check_for_link_media_swap(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 data ; u8 port ; { phy = & hw->phy; port = 0U; ret_val = (*(phy->ops.write_reg))(hw, 22U, 0); if (ret_val != 0) { return (ret_val); } else { } ret_val = (*(phy->ops.read_reg))(hw, 1U, & data); if (ret_val != 0) { return (ret_val); } else { } if (((int )data & 4) != 0) { port = 1U; } else { } ret_val = (*(phy->ops.write_reg))(hw, 22U, 1); if (ret_val != 0) { return (ret_val); } else { } ret_val = (*(phy->ops.read_reg))(hw, 1U, & data); if (ret_val != 0) { return (ret_val); } else { } ret_val = (*(phy->ops.write_reg))(hw, 22U, 0); if (ret_val != 0) { return (ret_val); } else { } if (((int )data & 4) != 0) { port = 2U; } else { } if ((unsigned int )port != 0U && (int )hw->dev_spec._82575.media_port != (int )port) { hw->dev_spec._82575.media_port = port; hw->dev_spec._82575.media_changed = 1; } else { ret_val = igb_check_for_link_82575(hw); } return (0); } } static s32 igb_init_phy_params_82575(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u32 ctrl_ext ; bool tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; bool tmp___1 ; bool tmp___2 ; int tmp___3 ; u32 tmp___4 ; u16 data ; { phy = & hw->phy; ret_val = 0; if ((unsigned int )hw->phy.media_type != 1U) { phy->type = 1; goto out; } else { } phy->autoneg_mask = 47U; phy->reset_delay_us = 100U; ctrl_ext = igb_rd32(hw, 24U); tmp = igb_sgmii_active_82575(hw); if ((int )tmp) { phy->ops.reset = & igb_phy_hw_reset_sgmii_82575; ctrl_ext = ctrl_ext | 33554432U; } else { phy->ops.reset = & igb_phy_hw_reset; ctrl_ext = ctrl_ext & 4261412863U; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(ctrl_ext, (void volatile *)hw_addr + 24U); } else { } igb_reset_mdicnfg_82580(hw); tmp___1 = igb_sgmii_active_82575(hw); if ((int )tmp___1) { tmp___2 = igb_sgmii_uses_mdio_82575(hw); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { phy->ops.read_reg = & igb_read_phy_reg_sgmii_82575; phy->ops.write_reg = & igb_write_phy_reg_sgmii_82575; } else { goto _L; } } else { _L: /* CIL Label */ switch ((unsigned int )hw->mac.type) { case 3U: ; case 4U: ; case 5U: phy->ops.read_reg = & igb_read_phy_reg_82580; phy->ops.write_reg = & igb_write_phy_reg_82580; goto ldv_44343; case 6U: ; case 7U: phy->ops.read_reg = & igb_read_phy_reg_gs40g; phy->ops.write_reg = & igb_write_phy_reg_gs40g; goto ldv_44343; default: phy->ops.read_reg = & igb_read_phy_reg_igp; phy->ops.write_reg = & igb_write_phy_reg_igp; } ldv_44343: ; } tmp___4 = igb_rd32(hw, 8U); hw->bus.func = (u16 )((tmp___4 & 12U) >> 2); ret_val = igb_get_phy_id_82575(hw); if (ret_val != 0) { return (ret_val); } else { } switch (phy->id) { case 21040800U: ; case 21040576U: ; case 21040272U: ; case 21040320U: phy->type = 2; phy->ops.check_polarity = & igb_check_polarity_m88; phy->ops.get_phy_info = & igb_get_phy_info_m88; if (phy->id != 21040320U) { phy->ops.get_cable_length = & igb_get_cable_length_m88_gen2; } else { phy->ops.get_cable_length = & igb_get_cable_length_m88; } phy->ops.force_speed_duplex = & igb_phy_force_speed_duplex_m88; if (phy->id == 21040272U) { ret_val = (*(phy->ops.write_reg))(hw, 22U, 2); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 16U, & data); if (ret_val != 0) { goto out; } else { } data = (u16 )(((int )data & 896) >> 7); if ((unsigned int )data == 2U || (unsigned int )data == 3U) { hw->mac.ops.check_for_link = & igb_check_for_link_media_swap; } else { } } else { } goto ldv_44352; case 44565392U: phy->type = 6; phy->ops.get_phy_info = & igb_get_phy_info_igp; phy->ops.get_cable_length = & igb_get_cable_length_igp_2; phy->ops.force_speed_duplex = & igb_phy_force_speed_duplex_igp; phy->ops.set_d0_lplu_state = & igb_set_d0_lplu_state_82575; phy->ops.set_d3_lplu_state = & igb_set_d3_lplu_state; goto ldv_44352; case 22283168U: ; case 22283184U: phy->type = 8; phy->ops.force_speed_duplex = & igb_phy_force_speed_duplex_82580; phy->ops.get_cable_length = & igb_get_cable_length_82580; phy->ops.get_phy_info = & igb_get_phy_info_82580; phy->ops.set_d0_lplu_state = & igb_set_d0_lplu_state_82580; phy->ops.set_d3_lplu_state = & igb_set_d3_lplu_state_82580; goto ldv_44352; case 21040128U: phy->type = 9; phy->ops.check_polarity = & igb_check_polarity_m88; phy->ops.get_phy_info = & igb_get_phy_info_m88; phy->ops.get_cable_length = & igb_get_cable_length_m88_gen2; phy->ops.set_d0_lplu_state = & igb_set_d0_lplu_state_82580; phy->ops.set_d3_lplu_state = & igb_set_d3_lplu_state_82580; phy->ops.force_speed_duplex = & igb_phy_force_speed_duplex_m88; goto ldv_44352; default: ret_val = -2; goto out; } ldv_44352: ; out: ; return (ret_val); } } static s32 igb_init_nvm_params_82575(struct e1000_hw *hw ) { struct e1000_nvm_info *nvm ; u32 eecd ; u32 tmp ; u16 size ; { nvm = & hw->nvm; tmp = igb_rd32(hw, 16U); eecd = tmp; size = (unsigned short )((eecd & 30720U) >> 11); size = (unsigned int )size + 6U; if ((unsigned int )size > 15U) { size = 15U; } else { } nvm->word_size = (u16 )(1 << (int )size); nvm->opcode_bits = 8U; nvm->delay_usec = 1U; switch ((unsigned int )nvm->override) { case 2U: nvm->page_size = 32U; nvm->address_bits = 16U; goto ldv_44365; case 1U: nvm->page_size = 8U; nvm->address_bits = 8U; goto ldv_44365; default: nvm->page_size = (eecd & 1024U) != 0U ? 32U : 8U; nvm->address_bits = (eecd & 1024U) != 0U ? 16U : 8U; goto ldv_44365; } ldv_44365: ; if ((unsigned int )nvm->word_size == 32768U) { nvm->page_size = 128U; } else { } nvm->type = 2; nvm->ops.acquire = & igb_acquire_nvm_82575; nvm->ops.release = & igb_release_nvm_82575; nvm->ops.write = & igb_write_nvm_spi; nvm->ops.validate = & igb_validate_nvm_checksum; nvm->ops.update = & igb_update_nvm_checksum; if ((int )((short )nvm->word_size) >= 0) { nvm->ops.read = & igb_read_nvm_eerd; } else { nvm->ops.read = & igb_read_nvm_spi; } switch ((unsigned int )hw->mac.type) { case 3U: nvm->ops.validate = & igb_validate_nvm_checksum_82580; nvm->ops.update = & igb_update_nvm_checksum_82580; goto ldv_44369; case 5U: ; case 4U: nvm->ops.validate = & igb_validate_nvm_checksum_i350; nvm->ops.update = & igb_update_nvm_checksum_i350; goto ldv_44369; default: ; goto ldv_44369; } ldv_44369: ; return (0); } } static s32 igb_init_mac_params_82575(struct e1000_hw *hw ) { struct e1000_mac_info *mac ; struct e1000_dev_spec_82575 *dev_spec ; u32 tmp ; { mac = & hw->mac; dev_spec = & hw->dev_spec._82575; mac->mta_reg_count = 128U; switch ((unsigned int )mac->type) { case 2U: mac->rar_entry_count = 24U; goto ldv_44379; case 3U: mac->rar_entry_count = 24U; goto ldv_44379; case 4U: ; case 5U: mac->rar_entry_count = 32U; goto ldv_44379; default: mac->rar_entry_count = 16U; goto ldv_44379; } ldv_44379: ; if ((unsigned int )mac->type > 2U) { mac->ops.reset_hw = & igb_reset_hw_82580; } else { mac->ops.reset_hw = & igb_reset_hw_82575; } if ((unsigned int )mac->type > 5U) { mac->ops.acquire_swfw_sync = & igb_acquire_swfw_sync_i210; mac->ops.release_swfw_sync = & igb_release_swfw_sync_i210; } else { mac->ops.acquire_swfw_sync = & igb_acquire_swfw_sync_82575; mac->ops.release_swfw_sync = & igb_release_swfw_sync_82575; } mac->asf_firmware_present = 1; tmp = igb_rd32(hw, 23380U); mac->arc_subsystem_valid = (tmp & 14U) != 0U; if ((unsigned int )mac->type > 3U) { dev_spec->eee_disable = 0; } else { dev_spec->eee_disable = 1; } if ((unsigned int )mac->type > 5U) { dev_spec->clear_semaphore_once = 1; } else { } mac->ops.setup_physical_interface = (unsigned int )hw->phy.media_type == 1U ? & igb_setup_copper_link_82575 : & igb_setup_serdes_link_82575; if ((unsigned int )mac->type == 3U) { switch ((int )hw->device_id) { case 1080: ; case 1082: ; case 1084: ; case 1088: ; goto ldv_44388; default: hw->dev_spec._82575.mas_capable = 1; goto ldv_44388; } ldv_44388: ; } else { } return (0); } } static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw ) { s32 ret_val ; u32 ctrl_ext ; struct e1000_dev_spec_82575 *dev_spec ; struct e1000_sfp_flags *eth_flags ; u8 tranceiver_type ; s32 timeout ; u8 *hw_addr ; u8 *__var ; long tmp ; struct _ddebug descriptor ; struct net_device *tmp___0 ; long tmp___1 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___2 ; { ret_val = 3; ctrl_ext = 0U; dev_spec = & hw->dev_spec._82575; eth_flags = & dev_spec->eth_flags; tranceiver_type = 0U; timeout = 3; ctrl_ext = igb_rd32(hw, 24U); ctrl_ext = ctrl_ext & 4294967167U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(ctrl_ext | 33554432U, (void volatile *)hw_addr + 24U); } else { } igb_rd32(hw, 8U); goto ldv_44404; ldv_44403: ret_val = igb_read_sfp_data_byte(hw, 0, & tranceiver_type); if (ret_val == 0) { goto ldv_44402; } else { } msleep(100U); timeout = timeout - 1; ldv_44404: ; if (timeout != 0) { goto ldv_44403; } else { } ldv_44402: ; if (ret_val != 0) { goto out; } else { } ret_val = igb_read_sfp_data_byte(hw, 6, (u8 *)eth_flags); if (ret_val != 0) { goto out; } else { } if ((unsigned int )tranceiver_type == 3U || (unsigned int )tranceiver_type == 2U) { dev_spec->module_plugged = 1; if ((unsigned int )*((unsigned char *)eth_flags + 0UL) != 0U || (unsigned int )*((unsigned char *)eth_flags + 0UL) != 0U) { hw->phy.media_type = 3; } else if ((unsigned int )*((unsigned char *)eth_flags + 0UL) != 0U) { dev_spec->sgmii_active = 1; hw->phy.media_type = 3; } else if ((unsigned int )*((unsigned char *)eth_flags + 0UL) != 0U) { dev_spec->sgmii_active = 1; hw->phy.media_type = 1; } else { hw->phy.media_type = 0; descriptor.modname = "igb"; descriptor.function = "igb_set_sfp_media_type_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "PHY module has not been recognized\n"; descriptor.lineno = 509U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___0, "PHY module has not been recognized\n"); } else { } goto out; } } else { hw->phy.media_type = 0; } ret_val = 0; out: __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(ctrl_ext, (void volatile *)hw_addr___0 + 24U); } else { } return (ret_val); } } static s32 igb_get_invariants_82575(struct e1000_hw *hw ) { struct e1000_mac_info *mac ; struct e1000_dev_spec_82575 *dev_spec ; s32 ret_val ; u32 ctrl_ext ; u32 link_mode ; bool tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { mac = & hw->mac; dev_spec = & hw->dev_spec._82575; ctrl_ext = 0U; link_mode = 0U; switch ((int )hw->device_id) { case 4263: ; case 4265: ; case 4310: mac->type = 1; goto ldv_44422; case 4297: ; case 5386: ; case 5400: ; case 4326: ; case 4327: ; case 4328: ; case 5414: ; case 5389: mac->type = 2; goto ldv_44422; case 5390: ; case 5391: ; case 5415: ; case 5392: ; case 5393: ; case 5398: ; case 1080: ; case 1082: ; case 1084: ; case 1088: mac->type = 3; goto ldv_44422; case 5409: ; case 5410: ; case 5411: ; case 5412: mac->type = 4; goto ldv_44422; case 5427: ; case 5430: ; case 5431: ; case 5432: ; case 5499: ; case 5500: mac->type = 6; goto ldv_44422; case 5433: mac->type = 7; goto ldv_44422; case 8000: ; case 8001: ; case 8005: mac->type = 5; goto ldv_44422; default: ; return (-5); } ldv_44422: hw->phy.media_type = 1; dev_spec->sgmii_active = 0; dev_spec->module_plugged = 0; ctrl_ext = igb_rd32(hw, 24U); link_mode = ctrl_ext & 12582912U; switch (link_mode) { case 4194304U: hw->phy.media_type = 3; goto ldv_44457; case 8388608U: tmp = igb_sgmii_uses_mdio_82575(hw); if ((int )tmp) { hw->phy.media_type = 1; dev_spec->sgmii_active = 1; goto ldv_44457; } else { } case 12582912U: ret_val = igb_set_sfp_media_type_82575(hw); if (ret_val != 0 || (unsigned int )hw->phy.media_type == 0U) { hw->phy.media_type = 3; if (link_mode == 8388608U) { hw->phy.media_type = 1; dev_spec->sgmii_active = 1; } else { } goto ldv_44457; } else { } if ((unsigned int )*((unsigned char *)dev_spec + 4UL) != 0U) { goto ldv_44457; } else { } ctrl_ext = ctrl_ext & 4282384383U; if ((unsigned int )hw->phy.media_type == 1U) { ctrl_ext = ctrl_ext | 8388608U; } else { ctrl_ext = ctrl_ext | 12582912U; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(ctrl_ext, (void volatile *)hw_addr + 24U); } else { } goto ldv_44457; default: ; goto ldv_44457; } ldv_44457: ret_val = igb_init_mac_params_82575(hw); if (ret_val != 0) { goto out; } else { } ret_val = igb_init_nvm_params_82575(hw); switch ((unsigned int )hw->mac.type) { case 6U: ; case 7U: ret_val = igb_init_nvm_params_i210(hw); goto ldv_44467; default: ; goto ldv_44467; } ldv_44467: ; if (ret_val != 0) { goto out; } else { } switch ((unsigned int )mac->type) { case 2U: ; case 4U: igb_init_mbx_params_pf(hw); goto ldv_44471; default: ; goto ldv_44471; } ldv_44471: ret_val = igb_init_phy_params_82575(hw); out: ; return (ret_val); } } static s32 igb_acquire_phy_82575(struct e1000_hw *hw ) { u16 mask ; s32 tmp ; { mask = 2U; if ((unsigned int )hw->bus.func == 1U) { mask = 4U; } else if ((unsigned int )hw->bus.func == 2U) { mask = 32U; } else if ((unsigned int )hw->bus.func == 3U) { mask = 64U; } else { } tmp = (*(hw->mac.ops.acquire_swfw_sync))(hw, (int )mask); return (tmp); } } static void igb_release_phy_82575(struct e1000_hw *hw ) { u16 mask ; { mask = 2U; if ((unsigned int )hw->bus.func == 1U) { mask = 4U; } else if ((unsigned int )hw->bus.func == 2U) { mask = 32U; } else if ((unsigned int )hw->bus.func == 3U) { mask = 64U; } else { } (*(hw->mac.ops.release_swfw_sync))(hw, (int )mask); return; } } static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw , u32 offset , u16 *data ) { s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { ret_val = -4; if (offset > 255U) { descriptor.modname = "igb"; descriptor.function = "igb_read_phy_reg_sgmii_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "PHY Address %u is out of range\n"; descriptor.lineno = 740U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "PHY Address %u is out of range\n", offset); } else { } goto out; } else { } ret_val = (*(hw->phy.ops.acquire))(hw); if (ret_val != 0) { goto out; } else { } ret_val = igb_read_phy_reg_i2c(hw, offset, data); (*(hw->phy.ops.release))(hw); out: ; return (ret_val); } } static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw , u32 offset , u16 data ) { s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { ret_val = -4; if (offset > 255U) { descriptor.modname = "igb"; descriptor.function = "igb_write_phy_reg_sgmii_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "PHY Address %d is out of range\n"; descriptor.lineno = 772U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "PHY Address %d is out of range\n", offset); } else { } goto out; } else { } ret_val = (*(hw->phy.ops.acquire))(hw); if (ret_val != 0) { goto out; } else { } ret_val = igb_write_phy_reg_i2c(hw, offset, (int )data); (*(hw->phy.ops.release))(hw); out: ; return (ret_val); } } static s32 igb_get_phy_id_82575(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_id ; u32 ctrl_ext ; u32 mdic ; bool tmp ; int tmp___0 ; bool tmp___1 ; u8 *hw_addr ; u8 *__var ; long tmp___2 ; struct _ddebug descriptor ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___0 ; struct net_device *tmp___5 ; long tmp___6 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___7 ; { phy = & hw->phy; ret_val = 0; if ((unsigned int )hw->mac.type == 5U) { igb_get_phy_id(hw); } else { } tmp = igb_sgmii_active_82575(hw); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { phy->addr = 1U; ret_val = igb_get_phy_id(hw); goto out; } else { } tmp___1 = igb_sgmii_uses_mdio_82575(hw); if ((int )tmp___1) { switch ((unsigned int )hw->mac.type) { case 1U: ; case 2U: mdic = igb_rd32(hw, 32U); mdic = mdic & 65011712U; phy->addr = mdic >> 21; goto ldv_44510; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: mdic = igb_rd32(hw, 3588U); mdic = mdic & 65011712U; phy->addr = mdic >> 21; goto ldv_44510; default: ret_val = -2; goto out; } ldv_44510: ret_val = igb_get_phy_id(hw); goto out; } else { } ctrl_ext = igb_rd32(hw, 24U); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(ctrl_ext & 4294967167U, (void volatile *)hw_addr + 24U); } else { } igb_rd32(hw, 8U); msleep(300U); phy->addr = 1U; goto ldv_44525; ldv_44524: ret_val = igb_read_phy_reg_sgmii_82575(hw, 2U, & phy_id); if (ret_val == 0) { descriptor.modname = "igb"; descriptor.function = "igb_get_phy_id_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "Vendor ID 0x%08X read at address %u\n"; descriptor.lineno = 857U; descriptor.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___3, "Vendor ID 0x%08X read at address %u\n", (int )phy_id, phy->addr); } else { } if ((unsigned int )phy_id == 321U) { goto ldv_44522; } else { } } else { descriptor___0.modname = "igb"; descriptor___0.function = "igb_get_phy_id_82575"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___0.format = "PHY address %u was unreadable\n"; descriptor___0.lineno = 864U; descriptor___0.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___5, "PHY address %u was unreadable\n", phy->addr); } else { } } phy->addr = phy->addr + 1U; ldv_44525: ; if (phy->addr <= 7U) { goto ldv_44524; } else { } ldv_44522: ; if (phy->addr == 8U) { phy->addr = 0U; ret_val = -2; goto out; } else { ret_val = igb_get_phy_id(hw); } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(ctrl_ext, (void volatile *)hw_addr___0 + 24U); } else { } out: ; return (ret_val); } } static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw ) { s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { descriptor.modname = "igb"; descriptor.function = "igb_phy_hw_reset_sgmii_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "Soft resetting SGMII attached PHY...\n"; descriptor.lineno = 898U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Soft resetting SGMII attached PHY...\n"); } else { } ret_val = (*(hw->phy.ops.write_reg))(hw, 27U, 32900); if (ret_val != 0) { goto out; } else { } ret_val = igb_phy_sw_reset(hw); out: ; return (ret_val); } } static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw , bool active ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 data ; { phy = & hw->phy; ret_val = (*(phy->ops.read_reg))(hw, 25U, & data); if (ret_val != 0) { goto out; } else { } if ((int )active) { data = (u16 )((unsigned int )data | 2U); ret_val = (*(phy->ops.write_reg))(hw, 25U, (int )data); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 16U, & data); data = (unsigned int )data & 65407U; ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )data); if (ret_val != 0) { goto out; } else { } } else { data = (unsigned int )data & 65533U; ret_val = (*(phy->ops.write_reg))(hw, 25U, (int )data); if ((unsigned int )phy->smart_speed == 1U) { ret_val = (*(phy->ops.read_reg))(hw, 16U, & data); if (ret_val != 0) { goto out; } else { } data = (u16 )((unsigned int )data | 128U); ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )data); if (ret_val != 0) { goto out; } else { } } else if ((unsigned int )phy->smart_speed == 2U) { ret_val = (*(phy->ops.read_reg))(hw, 16U, & data); if (ret_val != 0) { goto out; } else { } data = (unsigned int )data & 65407U; ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )data); if (ret_val != 0) { goto out; } else { } } else { } } out: ; return (ret_val); } } static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw , bool active ) { struct e1000_phy_info *phy ; u16 data ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { phy = & hw->phy; tmp = igb_rd32(hw, 3604U); data = (u16 )tmp; if ((int )active) { data = (u16 )((unsigned int )data | 2U); data = (unsigned int )data & 65534U; } else { data = (unsigned int )data & 65533U; if ((unsigned int )phy->smart_speed == 1U) { data = (u16 )((unsigned int )data | 1U); } else if ((unsigned int )phy->smart_speed == 2U) { data = (unsigned int )data & 65534U; } else { } } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel((unsigned int )data, (void volatile *)hw_addr + 3604U); } else { } return (0); } } static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw , bool active ) { struct e1000_phy_info *phy ; u16 data ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { phy = & hw->phy; tmp = igb_rd32(hw, 3604U); data = (u16 )tmp; if (! active) { data = (unsigned int )data & 65531U; if ((unsigned int )phy->smart_speed == 1U) { data = (u16 )((unsigned int )data | 1U); } else if ((unsigned int )phy->smart_speed == 2U) { data = (unsigned int )data & 65534U; } else { } } else if (((unsigned int )phy->autoneg_advertised == 47U || (unsigned int )phy->autoneg_advertised == 15U) || (unsigned int )phy->autoneg_advertised == 3U) { data = (u16 )((unsigned int )data | 4U); data = (unsigned int )data & 65534U; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel((unsigned int )data, (void volatile *)hw_addr + 3604U); } else { } return (0); } } static s32 igb_acquire_nvm_82575(struct e1000_hw *hw ) { s32 ret_val ; { ret_val = (*(hw->mac.ops.acquire_swfw_sync))(hw, 1); if (ret_val != 0) { goto out; } else { } ret_val = igb_acquire_nvm(hw); if (ret_val != 0) { (*(hw->mac.ops.release_swfw_sync))(hw, 1); } else { } out: ; return (ret_val); } } static void igb_release_nvm_82575(struct e1000_hw *hw ) { { igb_release_nvm(hw); (*(hw->mac.ops.release_swfw_sync))(hw, 1); return; } } static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw , u16 mask ) { u32 swfw_sync ; u32 swmask ; u32 fwmask ; s32 ret_val ; s32 i ; s32 timeout ; s32 tmp ; unsigned long __ms ; unsigned long tmp___0 ; struct _ddebug descriptor ; struct net_device *tmp___1 ; long tmp___2 ; u8 *hw_addr ; u8 *__var ; long tmp___3 ; { swmask = (u32 )mask; fwmask = (u32 )((int )mask << 16); ret_val = 0; i = 0; timeout = 200; goto ldv_44587; ldv_44586: tmp = igb_get_hw_semaphore(hw); if (tmp != 0) { ret_val = -13; goto out; } else { } swfw_sync = igb_rd32(hw, 23388U); if (((fwmask | swmask) & swfw_sync) == 0U) { goto ldv_44581; } else { } igb_put_hw_semaphore(hw); if (1) { __const_udelay(21475000UL); } else { __ms = 5UL; goto ldv_44584; ldv_44583: __const_udelay(4295000UL); ldv_44584: tmp___0 = __ms; __ms = __ms - 1UL; if (tmp___0 != 0UL) { goto ldv_44583; } else { } } i = i + 1; ldv_44587: ; if (i < timeout) { goto ldv_44586; } else { } ldv_44581: ; if (i == timeout) { descriptor.modname = "igb"; descriptor.function = "igb_acquire_swfw_sync_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "Driver can\'t access resource, SW_FW_SYNC timeout.\n"; descriptor.lineno = 1149U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___1, "Driver can\'t access resource, SW_FW_SYNC timeout.\n"); } else { } ret_val = -13; goto out; } else { } swfw_sync = swfw_sync | swmask; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(swfw_sync, (void volatile *)hw_addr + 23388U); } else { } igb_put_hw_semaphore(hw); out: ; return (ret_val); } } static void igb_release_swfw_sync_82575(struct e1000_hw *hw , u16 mask ) { u32 swfw_sync ; s32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { goto ldv_44599; ldv_44598: ; ldv_44599: tmp = igb_get_hw_semaphore(hw); if (tmp != 0) { goto ldv_44598; } else { } swfw_sync = igb_rd32(hw, 23388U); swfw_sync = (u32 )(~ ((int )mask)) & swfw_sync; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(swfw_sync, (void volatile *)hw_addr + 23388U); } else { } igb_put_hw_semaphore(hw); return; } } static s32 igb_get_cfg_done_82575(struct e1000_hw *hw ) { s32 timeout ; u32 mask ; u32 tmp ; struct _ddebug descriptor ; struct net_device *tmp___0 ; long tmp___1 ; u32 tmp___2 ; { timeout = 100; mask = 262144U; if ((unsigned int )hw->bus.func == 1U) { mask = 524288U; } else if ((unsigned int )hw->bus.func == 2U) { mask = 1048576U; } else if ((unsigned int )hw->bus.func == 3U) { mask = 2097152U; } else { } goto ldv_44611; ldv_44610: tmp = igb_rd32(hw, 4112U); if ((tmp & mask) != 0U) { goto ldv_44609; } else { } usleep_range(1000UL, 2000UL); timeout = timeout - 1; ldv_44611: ; if (timeout != 0) { goto ldv_44610; } else { } ldv_44609: ; if (timeout == 0) { descriptor.modname = "igb"; descriptor.function = "igb_get_cfg_done_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "MNG configuration cycle has not completed.\n"; descriptor.lineno = 1214U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___0, "MNG configuration cycle has not completed.\n"); } else { } } else { } tmp___2 = igb_rd32(hw, 16U); if ((tmp___2 & 256U) == 0U && (unsigned int )hw->phy.type == 6U) { igb_phy_init_script_igp3(hw); } else { } return (0); } } static s32 igb_get_link_up_info_82575(struct e1000_hw *hw , u16 *speed , u16 *duplex ) { s32 ret_val ; { if ((unsigned int )hw->phy.media_type != 1U) { ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, duplex); } else { ret_val = igb_get_speed_and_duplex_copper(hw, speed, duplex); } return (ret_val); } } static s32 igb_check_for_link_82575(struct e1000_hw *hw ) { s32 ret_val ; u16 speed ; u16 duplex ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { if ((unsigned int )hw->phy.media_type != 1U) { ret_val = igb_get_pcs_speed_and_duplex_82575(hw, & speed, & duplex); hw->mac.get_link_status = (bool )(! ((int )hw->mac.serdes_has_link != 0)); ret_val = igb_config_fc_after_link_up(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_check_for_link_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "Error configuring flow control\n"; descriptor.lineno = 1277U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Error configuring flow control\n"); } else { } } else { } } else { ret_val = igb_check_for_copper_link(hw); } return (ret_val); } } void igb_power_up_serdes_link_82575(struct e1000_hw *hw ) { u32 reg ; bool tmp ; int tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___2 ; { if ((unsigned int )hw->phy.media_type != 3U) { tmp = igb_sgmii_active_82575(hw); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } } else { } reg = igb_rd32(hw, 16896U); reg = reg | 8U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(reg, (void volatile *)hw_addr + 16896U); } else { } reg = igb_rd32(hw, 24U); reg = reg & 4294967167U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(reg, (void volatile *)hw_addr___0 + 24U); } else { } igb_rd32(hw, 8U); usleep_range(1000UL, 2000UL); return; } } static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw , u16 *speed , u16 *duplex ) { struct e1000_mac_info *mac ; u32 pcs ; u32 status ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; { mac = & hw->mac; mac->serdes_has_link = 0; *speed = 0U; *duplex = 0U; pcs = igb_rd32(hw, 16908U); if ((int )pcs & 1 && (pcs & 16U) != 0U) { mac->serdes_has_link = 1; if ((pcs & 4U) != 0U) { *speed = 1000U; } else if ((pcs & 2U) != 0U) { *speed = 100U; } else { *speed = 10U; } if ((pcs & 8U) != 0U) { *duplex = 2U; } else { *duplex = 1U; } if ((unsigned int )mac->type == 5U) { status = igb_rd32(hw, 8U); if ((status & 4096U) != 0U && (status & 8192U) == 0U) { *speed = 2500U; *duplex = 2U; descriptor.modname = "igb"; descriptor.function = "igb_get_pcs_speed_and_duplex_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "2500 Mbs, "; descriptor.lineno = 1367U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "2500 Mbs, "); } else { } descriptor___0.modname = "igb"; descriptor___0.function = "igb_get_pcs_speed_and_duplex_82575"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___0.format = "Full Duplex\n"; descriptor___0.lineno = 1368U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Full Duplex\n"); } else { } } else { } } else { } } else { } return (0); } } void igb_shutdown_serdes_link_82575(struct e1000_hw *hw ) { u32 reg ; bool tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; bool tmp___2 ; int tmp___3 ; { if ((unsigned int )hw->phy.media_type != 3U) { tmp = igb_sgmii_active_82575(hw); if ((int )tmp) { return; } else { } } else { } tmp___2 = igb_enable_mng_pass_thru(hw); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { reg = igb_rd32(hw, 16896U); reg = reg & 4294967287U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(reg, (void volatile *)hw_addr + 16896U); } else { } reg = igb_rd32(hw, 24U); reg = reg | 128U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(reg, (void volatile *)hw_addr___0 + 24U); } else { } igb_rd32(hw, 8U); usleep_range(1000UL, 2000UL); } else { } return; } } static s32 igb_reset_hw_82575(struct e1000_hw *hw ) { u32 ctrl ; s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; u8 *hw_addr ; u8 *__var ; long tmp___5 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___6 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___7 ; struct _ddebug descriptor___2 ; struct net_device *tmp___8 ; long tmp___9 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___10 ; struct _ddebug descriptor___3 ; struct net_device *tmp___11 ; long tmp___12 ; u32 tmp___13 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___14 ; { ret_val = igb_disable_pcie_master(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_reset_hw_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "PCI-E Master disable polling has failed.\n"; descriptor.lineno = 1426U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "PCI-E Master disable polling has failed.\n"); } else { } } else { } ret_val = igb_set_pcie_completion_timeout(hw); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_reset_hw_82575"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___0.format = "PCI-E Set completion timeout has failed.\n"; descriptor___0.lineno = 1431U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "PCI-E Set completion timeout has failed.\n"); } else { } } else { } descriptor___1.modname = "igb"; descriptor___1.function = "igb_reset_hw_82575"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___1.format = "Masking off all interrupts\n"; descriptor___1.lineno = 1433U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Masking off all interrupts\n"); } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(4294967295U, (void volatile *)hw_addr + 216U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(0U, (void volatile *)hw_addr___0 + 256U); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(8U, (void volatile *)hw_addr___1 + 1024U); } else { } igb_rd32(hw, 8U); usleep_range(10000UL, 20000UL); ctrl = igb_rd32(hw, 0U); descriptor___2.modname = "igb"; descriptor___2.function = "igb_reset_hw_82575"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___2.format = "Issuing a global reset to MAC\n"; descriptor___2.lineno = 1444U; descriptor___2.flags = 0U; tmp___9 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___9 != 0L) { tmp___8 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___8, "Issuing a global reset to MAC\n"); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___10 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___10 == 0L) { writel(ctrl | 67108864U, (void volatile *)hw_addr___2); } else { } ret_val = igb_get_auto_rd_done(hw); if (ret_val != 0) { descriptor___3.modname = "igb"; descriptor___3.function = "igb_reset_hw_82575"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___3.format = "Auto Read Done did not complete\n"; descriptor___3.lineno = 1453U; descriptor___3.flags = 0U; tmp___12 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___12 != 0L) { tmp___11 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)tmp___11, "Auto Read Done did not complete\n"); } else { } } else { } tmp___13 = igb_rd32(hw, 16U); if ((tmp___13 & 256U) == 0U) { igb_reset_init_script_82575(hw); } else { } __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___14 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___14 == 0L) { writel(4294967295U, (void volatile *)hw_addr___3 + 216U); } else { } igb_rd32(hw, 192U); ret_val = igb_check_alt_mac_addr(hw); return (ret_val); } } static s32 igb_init_hw_82575(struct e1000_hw *hw ) { struct e1000_mac_info *mac ; s32 ret_val ; u16 i ; u16 rar_count ; bool tmp ; int tmp___0 ; struct _ddebug descriptor ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___0 ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___1 ; struct net_device *tmp___5 ; long tmp___6 ; u8 *hw_addr ; u8 *__var ; long tmp___7 ; struct _ddebug descriptor___2 ; struct net_device *tmp___8 ; long tmp___9 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___10 ; { mac = & hw->mac; rar_count = mac->rar_entry_count; if ((unsigned int )hw->mac.type > 5U) { tmp = igb_get_flash_presence_i210(hw); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { ret_val = igb_pll_workaround_i210(hw); if (ret_val != 0) { return (ret_val); } else { } } else { } } else { } ret_val = igb_id_led_init(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_init_hw_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "Error initializing identification LED\n"; descriptor.lineno = 1492U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___1, "Error initializing identification LED\n"); } else { } } else { } descriptor___0.modname = "igb"; descriptor___0.function = "igb_init_hw_82575"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___0.format = "Initializing the IEEE VLAN\n"; descriptor___0.lineno = 1497U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___3, "Initializing the IEEE VLAN\n"); } else { } if ((unsigned int )hw->mac.type == 4U || (unsigned int )hw->mac.type == 5U) { igb_clear_vfta_i350(hw); } else { igb_clear_vfta(hw); } igb_init_rx_addrs(hw, (int )rar_count); descriptor___1.modname = "igb"; descriptor___1.function = "igb_init_hw_82575"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___1.format = "Zeroing the MTA\n"; descriptor___1.lineno = 1507U; descriptor___1.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___5, "Zeroing the MTA\n"); } else { } i = 0U; goto ldv_44700; ldv_44699: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(0U, (void volatile *)hw_addr + (unsigned long )(((int )i << 2) + 20992)); } else { } i = (u16 )((int )i + 1); ldv_44700: ; if ((int )mac->mta_reg_count > (int )i) { goto ldv_44699; } else { } descriptor___2.modname = "igb"; descriptor___2.function = "igb_init_hw_82575"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___2.format = "Zeroing the UTA\n"; descriptor___2.lineno = 1512U; descriptor___2.flags = 0U; tmp___9 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___9 != 0L) { tmp___8 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___8, "Zeroing the UTA\n"); } else { } i = 0U; goto ldv_44707; ldv_44706: __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___10 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___10 == 0L) { writel(0U, (void volatile *)hw_addr___0 + (unsigned long )(((int )i << 2) + 40960)); } else { } i = (u16 )((int )i + 1); ldv_44707: ; if ((int )mac->uta_reg_count > (int )i) { goto ldv_44706; } else { } ret_val = igb_setup_link(hw); igb_clear_hw_cntrs_82575(hw); return (ret_val); } } static s32 igb_setup_copper_link_82575(struct e1000_hw *hw ) { u32 ctrl ; s32 ret_val ; u32 phpm_reg ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; struct _ddebug descriptor ; struct net_device *tmp___1 ; long tmp___2 ; bool tmp___3 ; { ctrl = igb_rd32(hw, 0U); ctrl = ctrl | 64U; ctrl = ctrl & 4294961151U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(ctrl, (void volatile *)hw_addr); } else { } switch ((unsigned int )hw->mac.type) { case 3U: ; case 4U: ; case 6U: ; case 7U: phpm_reg = igb_rd32(hw, 3604U); phpm_reg = phpm_reg & 4294967263U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(phpm_reg, (void volatile *)hw_addr___0 + 3604U); } else { } goto ldv_44725; default: ; goto ldv_44725; } ldv_44725: ret_val = igb_setup_serdes_link_82575(hw); if (ret_val != 0) { goto out; } else { } tmp___3 = igb_sgmii_active_82575(hw); if ((int )tmp___3 && ! hw->phy.reset_disable) { msleep(300U); ret_val = (*(hw->phy.ops.reset))(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_setup_copper_link_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "Error resetting the PHY.\n"; descriptor.lineno = 1571U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___1, "Error resetting the PHY.\n"); } else { } goto out; } else { } } else { } switch ((unsigned int )hw->phy.type) { case 9U: ; case 2U: ; switch (hw->phy.id) { case 21040576U: ; case 21040272U: ; case 21040800U: ; case 21040128U: ret_val = igb_copper_link_setup_m88_gen2(hw); goto ldv_44736; default: ret_val = igb_copper_link_setup_m88(hw); goto ldv_44736; } ldv_44736: ; goto ldv_44738; case 6U: ret_val = igb_copper_link_setup_igp(hw); goto ldv_44738; case 8U: ret_val = igb_copper_link_setup_82580(hw); goto ldv_44738; default: ret_val = -2; goto ldv_44738; } ldv_44738: ; if (ret_val != 0) { goto out; } else { } ret_val = igb_setup_copper_link(hw); out: ; return (ret_val); } } static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw ) { u32 ctrl_ext ; u32 ctrl_reg ; u32 reg ; u32 anadv_reg ; bool pcs_autoneg ; s32 ret_val ; u16 data ; bool tmp ; int tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___2 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___3 ; struct _ddebug descriptor ; struct net_device *tmp___4 ; long tmp___5 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___6 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___7 ; struct _ddebug descriptor___0 ; struct net_device *tmp___8 ; long tmp___9 ; struct _ddebug descriptor___1 ; struct net_device *tmp___10 ; long tmp___11 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___12 ; bool tmp___13 ; int tmp___14 ; { ret_val = 0; if ((unsigned int )hw->phy.media_type != 3U) { tmp = igb_sgmii_active_82575(hw); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (ret_val); } else { } } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(1024U, (void volatile *)hw_addr + 36U); } else { } ctrl_ext = igb_rd32(hw, 24U); ctrl_ext = ctrl_ext & 4294967167U; ctrl_ext = ctrl_ext | 33554432U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(ctrl_ext, (void volatile *)hw_addr___0 + 24U); } else { } ctrl_reg = igb_rd32(hw, 0U); ctrl_reg = ctrl_reg | 64U; if ((unsigned int )hw->mac.type == 1U || (unsigned int )hw->mac.type == 2U) { ctrl_reg = ctrl_reg | 786432U; reg = igb_rd32(hw, 52U); reg = reg | 4U; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(reg, (void volatile *)hw_addr___1 + 52U); } else { } } else { } reg = igb_rd32(hw, 16904U); pcs_autoneg = hw->mac.autoneg; switch (ctrl_ext & 12582912U) { case 8388608U: pcs_autoneg = 1; reg = reg & 4294705151U; goto ldv_44762; case 4194304U: pcs_autoneg = 0; default: ; if ((unsigned int )hw->mac.type == 1U || (unsigned int )hw->mac.type == 2U) { ret_val = (*(hw->nvm.ops.read))(hw, 3, 1, & data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_setup_serdes_link_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "\017NVM Read Error\n\n"; descriptor.lineno = 1676U; descriptor.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___5 != 0L) { tmp___4 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___4, "\017NVM Read Error\n\n"); } else { } return (ret_val); } else { } if (((int )data & 16384) != 0) { pcs_autoneg = 0; } else { } } else { } ctrl_reg = ctrl_reg | 6657U; reg = reg | 12U; goto ldv_44762; } ldv_44762: __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(ctrl_reg, (void volatile *)hw_addr___2); } else { } reg = reg & 4294901710U; if ((int )pcs_autoneg) { reg = reg | 196608U; reg = reg & 4294967167U; anadv_reg = igb_rd32(hw, 16920U); anadv_reg = anadv_reg & 4294966911U; switch ((unsigned int )hw->fc.requested_mode) { case 3U: ; case 1U: anadv_reg = anadv_reg | 256U; anadv_reg = anadv_reg | 128U; goto ldv_44772; case 2U: anadv_reg = anadv_reg | 256U; goto ldv_44772; default: ; goto ldv_44772; } ldv_44772: __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(anadv_reg, (void volatile *)hw_addr___3 + 16920U); } else { } descriptor___0.modname = "igb"; descriptor___0.function = "igb_setup_serdes_link_82575"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___0.format = "Configuring Autoneg:PCS_LCTL=0x%08X\n"; descriptor___0.lineno = 1731U; descriptor___0.flags = 0U; tmp___9 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___9 != 0L) { tmp___8 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___8, "Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); } else { } } else { reg = reg | 16U; reg = reg | 128U; descriptor___1.modname = "igb"; descriptor___1.function = "igb_setup_serdes_link_82575"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___1.format = "Configuring Forced Link:PCS_LCTL=0x%08X\n"; descriptor___1.lineno = 1739U; descriptor___1.flags = 0U; tmp___11 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___11 != 0L) { tmp___10 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___10, "Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); } else { } } __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___12 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___12 == 0L) { writel(reg, (void volatile *)hw_addr___4 + 16904U); } else { } if (! pcs_autoneg) { tmp___13 = igb_sgmii_active_82575(hw); if (tmp___13) { tmp___14 = 0; } else { tmp___14 = 1; } if (tmp___14) { igb_force_mac_fc(hw); } else { } } else { } return (ret_val); } } static bool igb_sgmii_active_82575(struct e1000_hw *hw ) { struct e1000_dev_spec_82575 *dev_spec ; { dev_spec = & hw->dev_spec._82575; return (dev_spec->sgmii_active); } } static s32 igb_reset_init_script_82575(struct e1000_hw *hw ) { struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { if ((unsigned int )hw->mac.type == 1U) { descriptor.modname = "igb"; descriptor.function = "igb_reset_init_script_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "Running reset init script for 82575\n"; descriptor.lineno = 1774U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Running reset init script for 82575\n"); } else { } igb_write_8bit_ctrl_reg(hw, 36U, 0U, 12); igb_write_8bit_ctrl_reg(hw, 36U, 1U, 120); igb_write_8bit_ctrl_reg(hw, 36U, 27U, 35); igb_write_8bit_ctrl_reg(hw, 36U, 35U, 21); igb_write_8bit_ctrl_reg(hw, 23368U, 20U, 0); igb_write_8bit_ctrl_reg(hw, 23368U, 16U, 0); igb_write_8bit_ctrl_reg(hw, 23364U, 0U, 236); igb_write_8bit_ctrl_reg(hw, 23364U, 97U, 223); igb_write_8bit_ctrl_reg(hw, 23364U, 52U, 5); igb_write_8bit_ctrl_reg(hw, 23364U, 47U, 129); igb_write_8bit_ctrl_reg(hw, 23372U, 2U, 71); igb_write_8bit_ctrl_reg(hw, 23372U, 20U, 0); igb_write_8bit_ctrl_reg(hw, 23372U, 16U, 0); } else { } return (0); } } static s32 igb_read_mac_addr_82575(struct e1000_hw *hw ) { s32 ret_val ; { ret_val = 0; ret_val = igb_check_alt_mac_addr(hw); if (ret_val != 0) { goto out; } else { } ret_val = igb_read_mac_addr(hw); out: ; return (ret_val); } } void igb_power_down_phy_copper_82575(struct e1000_hw *hw ) { bool tmp ; int tmp___0 ; s32 tmp___1 ; { tmp = igb_enable_mng_pass_thru(hw); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { tmp___1 = igb_check_reset_block(hw); if (tmp___1 == 0) { igb_power_down_phy_copper(hw); } else { } } else { } return; } } static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw ) { bool tmp ; { igb_clear_hw_cntrs_base(hw); igb_rd32(hw, 16476U); igb_rd32(hw, 16480U); igb_rd32(hw, 16484U); igb_rd32(hw, 16488U); igb_rd32(hw, 16492U); igb_rd32(hw, 16496U); igb_rd32(hw, 16600U); igb_rd32(hw, 16604U); igb_rd32(hw, 16608U); igb_rd32(hw, 16612U); igb_rd32(hw, 16616U); igb_rd32(hw, 16620U); igb_rd32(hw, 16388U); igb_rd32(hw, 16396U); igb_rd32(hw, 16436U); igb_rd32(hw, 16444U); igb_rd32(hw, 16632U); igb_rd32(hw, 16636U); igb_rd32(hw, 16564U); igb_rd32(hw, 16568U); igb_rd32(hw, 16572U); igb_rd32(hw, 16640U); igb_rd32(hw, 16676U); igb_rd32(hw, 16644U); igb_rd32(hw, 16648U); igb_rd32(hw, 16652U); igb_rd32(hw, 16656U); igb_rd32(hw, 16664U); igb_rd32(hw, 16668U); igb_rd32(hw, 16672U); igb_rd32(hw, 16428U); igb_rd32(hw, 16444U); igb_rd32(hw, 16636U); igb_rd32(hw, 16644U); igb_rd32(hw, 16664U); igb_rd32(hw, 16676U); igb_rd32(hw, 16680U); igb_rd32(hw, 16684U); igb_rd32(hw, 16688U); igb_rd32(hw, 16692U); igb_rd32(hw, 16696U); if ((unsigned int )hw->phy.media_type == 3U) { igb_rd32(hw, 16936U); } else { tmp = igb_sgmii_active_82575(hw); if ((int )tmp) { igb_rd32(hw, 16936U); } else { } } return; } } void igb_rx_fifo_flush_82575(struct e1000_hw *hw ) { u32 rctl ; u32 rlpml ; u32 rxdctl[4U] ; u32 rfctl ; u32 temp_rctl ; u32 rx_enabled ; int i ; int ms_wait ; u8 *hw_addr ; u8 *__var ; long tmp ; u32 tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; u32 tmp___2 ; struct _ddebug descriptor ; struct net_device *tmp___3 ; long tmp___4 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___5 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___6 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___7 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___8 ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___9 ; u8 *hw_addr___6 ; u8 *__var___6 ; long tmp___10 ; u8 *hw_addr___7 ; u8 *__var___7 ; long tmp___11 ; u8 *hw_addr___8 ; u8 *__var___8 ; long tmp___12 ; { rfctl = igb_rd32(hw, 20488U); rfctl = rfctl | 65536U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(rfctl, (void volatile *)hw_addr + 20488U); } else { } if ((unsigned int )hw->mac.type != 1U) { return; } else { tmp___0 = igb_rd32(hw, 22560U); if ((tmp___0 & 131072U) == 0U) { return; } else { } } i = 0; goto ldv_44821; ldv_44820: rxdctl[i] = igb_rd32(hw, (u32 )(i <= 3 ? i * 256 + 10280 : i * 64 + 49192)); __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(rxdctl[i] & 4261412863U, (void volatile *)hw_addr___0 + (unsigned long )(i <= 3 ? i * 256 + 10280 : i * 64 + 49192)); } else { } i = i + 1; ldv_44821: ; if (i <= 3) { goto ldv_44820; } else { } ms_wait = 0; goto ldv_44828; ldv_44827: usleep_range(1000UL, 2000UL); rx_enabled = 0U; i = 0; goto ldv_44824; ldv_44823: tmp___2 = igb_rd32(hw, (u32 )(i <= 3 ? i * 256 + 10280 : i * 64 + 49192)); rx_enabled = tmp___2 | rx_enabled; i = i + 1; ldv_44824: ; if (i <= 3) { goto ldv_44823; } else { } if ((rx_enabled & 33554432U) == 0U) { goto ldv_44826; } else { } ms_wait = ms_wait + 1; ldv_44828: ; if (ms_wait <= 9) { goto ldv_44827; } else { } ldv_44826: ; if (ms_wait == 10) { descriptor.modname = "igb"; descriptor.function = "igb_rx_fifo_flush_82575"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "Queue disable timed out after 10ms\n"; descriptor.lineno = 1939U; descriptor.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___3, "Queue disable timed out after 10ms\n"); } else { } } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(rfctl & 4294705151U, (void volatile *)hw_addr___1 + 20488U); } else { } rlpml = igb_rd32(hw, 20484U); __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(0U, (void volatile *)hw_addr___2 + 20484U); } else { } rctl = igb_rd32(hw, 256U); temp_rctl = rctl & 4294967289U; temp_rctl = temp_rctl | 32U; __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(temp_rctl, (void volatile *)hw_addr___3 + 256U); } else { } __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___8 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___8 == 0L) { writel(temp_rctl | 2U, (void volatile *)hw_addr___4 + 256U); } else { } igb_rd32(hw, 8U); usleep_range(2000UL, 3000UL); i = 0; goto ldv_44847; ldv_44846: __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___9 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___9 == 0L) { writel(rxdctl[i], (void volatile *)hw_addr___5 + (unsigned long )(i <= 3 ? i * 256 + 10280 : i * 64 + 49192)); } else { } i = i + 1; ldv_44847: ; if (i <= 3) { goto ldv_44846; } else { } __var___6 = (u8 *)0U; hw_addr___6 = *((u8 * volatile *)(& hw->hw_addr)); tmp___10 = ldv__builtin_expect((unsigned long )hw_addr___6 == (unsigned long )((u8 *)0U), 0L); if (tmp___10 == 0L) { writel(rctl, (void volatile *)hw_addr___6 + 256U); } else { } igb_rd32(hw, 8U); __var___7 = (u8 *)0U; hw_addr___7 = *((u8 * volatile *)(& hw->hw_addr)); tmp___11 = ldv__builtin_expect((unsigned long )hw_addr___7 == (unsigned long )((u8 *)0U), 0L); if (tmp___11 == 0L) { writel(rlpml, (void volatile *)hw_addr___7 + 20484U); } else { } __var___8 = (u8 *)0U; hw_addr___8 = *((u8 * volatile *)(& hw->hw_addr)); tmp___12 = ldv__builtin_expect((unsigned long )hw_addr___8 == (unsigned long )((u8 *)0U), 0L); if (tmp___12 == 0L) { writel(rfctl, (void volatile *)hw_addr___8 + 20488U); } else { } igb_rd32(hw, 16556U); igb_rd32(hw, 16544U); igb_rd32(hw, 16400U); return; } } static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw ) { u32 gcr ; u32 tmp ; s32 ret_val ; u16 pcie_devctl2 ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { tmp = igb_rd32(hw, 23296U); gcr = tmp; ret_val = 0; if ((gcr & 61440U) != 0U) { goto out; } else { } if ((gcr & 262144U) == 0U) { gcr = gcr | 4096U; goto out; } else { } ret_val = igb_read_pcie_cap_reg(hw, 40U, & pcie_devctl2); if (ret_val != 0) { goto out; } else { } pcie_devctl2 = (u16 )((unsigned int )pcie_devctl2 | 5U); ret_val = igb_write_pcie_cap_reg(hw, 40U, & pcie_devctl2); out: gcr = gcr & 4294901759U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(gcr, (void volatile *)hw_addr + 23296U); } else { } return (ret_val); } } void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw , bool enable , int pf ) { u32 reg_val ; u32 reg_offset ; u8 *hw_addr ; u8 *__var ; long tmp ; { switch ((unsigned int )hw->mac.type) { case 2U: reg_offset = 13568U; goto ldv_44876; case 4U: ; case 5U: reg_offset = 23244U; goto ldv_44876; default: ; return; } ldv_44876: reg_val = igb_rd32(hw, reg_offset); if ((int )enable) { reg_val = reg_val | 65535U; reg_val = (u32 )((1 << pf) | (1 << (pf + 8))) ^ reg_val; } else { reg_val = reg_val & 4294901760U; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(reg_val, (void volatile *)hw_addr + (unsigned long )reg_offset); } else { } return; } } void igb_vmdq_set_loopback_pf(struct e1000_hw *hw , bool enable ) { u32 dtxswc ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { switch ((unsigned int )hw->mac.type) { case 2U: dtxswc = igb_rd32(hw, 13568U); if ((int )enable) { dtxswc = dtxswc | 2147483648U; } else { dtxswc = dtxswc & 2147483647U; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(dtxswc, (void volatile *)hw_addr + 13568U); } else { } goto ldv_44892; case 5U: ; case 4U: dtxswc = igb_rd32(hw, 23244U); if ((int )enable) { dtxswc = dtxswc | 2147483648U; } else { dtxswc = dtxswc & 2147483647U; } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(dtxswc, (void volatile *)hw_addr___0 + 23244U); } else { } goto ldv_44892; default: ; goto ldv_44892; } ldv_44892: ; return; } } void igb_vmdq_set_replication_pf(struct e1000_hw *hw , bool enable ) { u32 vt_ctl ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { tmp = igb_rd32(hw, 22556U); vt_ctl = tmp; if ((int )enable) { vt_ctl = vt_ctl | 1073741824U; } else { vt_ctl = vt_ctl & 3221225471U; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(vt_ctl, (void volatile *)hw_addr + 22556U); } else { } return; } } static s32 igb_read_phy_reg_82580(struct e1000_hw *hw , u32 offset , u16 *data ) { s32 ret_val ; { ret_val = (*(hw->phy.ops.acquire))(hw); if (ret_val != 0) { goto out; } else { } ret_val = igb_read_phy_reg_mdic(hw, offset, data); (*(hw->phy.ops.release))(hw); out: ; return (ret_val); } } static s32 igb_write_phy_reg_82580(struct e1000_hw *hw , u32 offset , u16 data ) { s32 ret_val ; { ret_val = (*(hw->phy.ops.acquire))(hw); if (ret_val != 0) { goto out; } else { } ret_val = igb_write_phy_reg_mdic(hw, offset, (int )data); (*(hw->phy.ops.release))(hw); out: ; return (ret_val); } } static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw ) { s32 ret_val ; u32 mdicnfg ; u16 nvm_data ; bool tmp ; int tmp___0 ; struct _ddebug descriptor ; struct net_device *tmp___1 ; long tmp___2 ; u8 *hw_addr ; u8 *__var ; long tmp___3 ; { ret_val = 0; nvm_data = 0U; if ((unsigned int )hw->mac.type != 3U) { goto out; } else { } tmp = igb_sgmii_active_82575(hw); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto out; } else { } ret_val = (*(hw->nvm.ops.read))(hw, (unsigned int )hw->bus.func != 0U ? (int )((unsigned int )((u16 )((int )hw->bus.func + 1)) * 64U + 36U) : 36, 1, & nvm_data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_reset_mdicnfg_82580"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "NVM Read Error\n"; descriptor.lineno = 2192U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___1, "NVM Read Error\n"); } else { } goto out; } else { } mdicnfg = igb_rd32(hw, 3588U); if (((int )nvm_data & 4) != 0) { mdicnfg = mdicnfg | 2147483648U; } else { } if (((int )nvm_data & 8) != 0) { mdicnfg = mdicnfg | 1073741824U; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(mdicnfg, (void volatile *)hw_addr + 3588U); } else { } out: ; return (ret_val); } } static s32 igb_reset_hw_82580(struct e1000_hw *hw ) { s32 ret_val ; u16 swmbsw_mask ; u32 ctrl ; bool global_device_reset ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; u8 *hw_addr ; u8 *__var ; long tmp___3 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___4 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___5 ; s32 tmp___6 ; u32 tmp___7 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___8 ; struct _ddebug descriptor___1 ; struct net_device *tmp___9 ; long tmp___10 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___11 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___12 ; struct _ddebug descriptor___2 ; struct net_device *tmp___13 ; long tmp___14 ; { ret_val = 0; swmbsw_mask = 256U; global_device_reset = hw->dev_spec._82575.global_device_reset; hw->dev_spec._82575.global_device_reset = 0; if ((unsigned int )hw->mac.type == 3U) { global_device_reset = 0; } else { } ctrl = igb_rd32(hw, 0U); ret_val = igb_disable_pcie_master(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_reset_hw_82580"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "PCI-E Master disable polling has failed.\n"; descriptor.lineno = 2237U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "PCI-E Master disable polling has failed.\n"); } else { } } else { } descriptor___0.modname = "igb"; descriptor___0.function = "igb_reset_hw_82580"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___0.format = "Masking off all interrupts\n"; descriptor___0.lineno = 2239U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Masking off all interrupts\n"); } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(4294967295U, (void volatile *)hw_addr + 216U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(0U, (void volatile *)hw_addr___0 + 256U); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(8U, (void volatile *)hw_addr___1 + 1024U); } else { } igb_rd32(hw, 8U); usleep_range(10000UL, 11000UL); if ((int )global_device_reset) { tmp___6 = (*(hw->mac.ops.acquire_swfw_sync))(hw, (int )swmbsw_mask); if (tmp___6 != 0) { global_device_reset = 0; } else { } } else { } if ((int )global_device_reset) { tmp___7 = igb_rd32(hw, 8U); if ((tmp___7 & 1048576U) == 0U) { ctrl = ctrl | 536870912U; } else { ctrl = ctrl | 67108864U; } } else { ctrl = ctrl | 67108864U; } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___8 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___8 == 0L) { writel(ctrl, (void volatile *)hw_addr___2); } else { } igb_rd32(hw, 8U); if ((int )global_device_reset) { usleep_range(5000UL, 6000UL); } else { } ret_val = igb_get_auto_rd_done(hw); if (ret_val != 0) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_reset_hw_82580"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___1.format = "Auto Read Done did not complete\n"; descriptor___1.lineno = 2271U; descriptor___1.flags = 0U; tmp___10 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___10 != 0L) { tmp___9 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___9, "Auto Read Done did not complete\n"); } else { } } else { } __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___11 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___11 == 0L) { writel(1048576U, (void volatile *)hw_addr___3 + 8U); } else { } __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___12 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___12 == 0L) { writel(4294967295U, (void volatile *)hw_addr___4 + 216U); } else { } igb_rd32(hw, 192U); ret_val = igb_reset_mdicnfg_82580(hw); if (ret_val != 0) { descriptor___2.modname = "igb"; descriptor___2.function = "igb_reset_hw_82580"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___2.format = "Could not reset MDICNFG based on EEPROM\n"; descriptor___2.lineno = 2283U; descriptor___2.flags = 0U; tmp___14 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___14 != 0L) { tmp___13 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___13, "Could not reset MDICNFG based on EEPROM\n"); } else { } } else { } ret_val = igb_check_alt_mac_addr(hw); if ((int )global_device_reset) { (*(hw->mac.ops.release_swfw_sync))(hw, (int )swmbsw_mask); } else { } return (ret_val); } } u16 igb_rxpbs_adjust_82580(u32 data ) { u16 ret_val ; { ret_val = 0U; if (data <= 10U) { ret_val = e1000_82580_rxpbs_table[data]; } else { } return (ret_val); } } static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw , u16 offset ) { s32 ret_val ; u16 checksum ; u16 i ; u16 nvm_data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; { ret_val = 0; checksum = 0U; i = offset; goto ldv_44981; ldv_44980: ret_val = (*(hw->nvm.ops.read))(hw, (int )i, 1, & nvm_data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_validate_nvm_checksum_with_offset"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "NVM Read Error\n"; descriptor.lineno = 2334U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error\n"); } else { } goto out; } else { } checksum = (int )checksum + (int )nvm_data; i = (u16 )((int )i + 1); ldv_44981: ; if ((int )i < (int )offset + 64) { goto ldv_44980; } else { } if ((unsigned int )checksum != 47802U) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_validate_nvm_checksum_with_offset"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___0.format = "NVM Checksum Invalid\n"; descriptor___0.lineno = 2341U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "NVM Checksum Invalid\n"); } else { } ret_val = -1; goto out; } else { } out: ; return (ret_val); } } static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw , u16 offset ) { s32 ret_val ; u16 checksum ; u16 i ; u16 nvm_data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; { checksum = 0U; i = offset; goto ldv_44996; ldv_44995: ret_val = (*(hw->nvm.ops.read))(hw, (int )i, 1, & nvm_data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_update_nvm_checksum_with_offset"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "NVM Read Error while updating checksum.\n"; descriptor.lineno = 2369U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error while updating checksum.\n"); } else { } goto out; } else { } checksum = (int )checksum + (int )nvm_data; i = (u16 )((int )i + 1); ldv_44996: ; if ((int )i < (int )offset + 63) { goto ldv_44995; } else { } checksum = 47802U - (unsigned int )checksum; ret_val = (*(hw->nvm.ops.write))(hw, (int )((unsigned int )offset + 63U), 1, & checksum); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_update_nvm_checksum_with_offset"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___0.format = "NVM Write Error while updating checksum.\n"; descriptor___0.lineno = 2378U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "NVM Write Error while updating checksum.\n"); } else { } } else { } out: ; return (ret_val); } } static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw ) { s32 ret_val ; u16 eeprom_regions_count ; u16 j ; u16 nvm_data ; u16 nvm_offset ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { ret_val = 0; eeprom_regions_count = 1U; ret_val = (*(hw->nvm.ops.read))(hw, 3, 1, & nvm_data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_validate_nvm_checksum_82580"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "NVM Read Error\n"; descriptor.lineno = 2401U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error\n"); } else { } goto out; } else { } if ((int )((short )nvm_data) < 0) { eeprom_regions_count = 4U; } else { } j = 0U; goto ldv_45011; ldv_45010: nvm_offset = (unsigned int )j != 0U ? (unsigned int )((u16 )((int )j + 1)) * 64U : 0U; ret_val = igb_validate_nvm_checksum_with_offset(hw, (int )nvm_offset); if (ret_val != 0) { goto out; } else { } j = (u16 )((int )j + 1); ldv_45011: ; if ((int )j < (int )eeprom_regions_count) { goto ldv_45010; } else { } out: ; return (ret_val); } } static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw ) { s32 ret_val ; u16 j ; u16 nvm_data ; u16 nvm_offset ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; { ret_val = (*(hw->nvm.ops.read))(hw, 3, 1, & nvm_data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_update_nvm_checksum_82580"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "NVM Read Error while updating checksum compatibility bit.\n"; descriptor.lineno = 2440U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error while updating checksum compatibility bit.\n"); } else { } goto out; } else { } if ((int )((short )nvm_data) >= 0) { nvm_data = (u16 )((unsigned int )nvm_data | 32768U); ret_val = (*(hw->nvm.ops.write))(hw, 3, 1, & nvm_data); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_update_nvm_checksum_82580"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor___0.format = "NVM Write Error while updating checksum compatibility bit.\n"; descriptor___0.lineno = 2450U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "NVM Write Error while updating checksum compatibility bit.\n"); } else { } goto out; } else { } } else { } j = 0U; goto ldv_45025; ldv_45024: nvm_offset = (unsigned int )j != 0U ? (unsigned int )((u16 )((int )j + 1)) * 64U : 0U; ret_val = igb_update_nvm_checksum_with_offset(hw, (int )nvm_offset); if (ret_val != 0) { goto out; } else { } j = (u16 )((int )j + 1); ldv_45025: ; if ((unsigned int )j <= 3U) { goto ldv_45024; } else { } out: ; return (ret_val); } } static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw ) { s32 ret_val ; u16 j ; u16 nvm_offset ; { ret_val = 0; j = 0U; goto ldv_45035; ldv_45034: nvm_offset = (unsigned int )j != 0U ? (unsigned int )((u16 )((int )j + 1)) * 64U : 0U; ret_val = igb_validate_nvm_checksum_with_offset(hw, (int )nvm_offset); if (ret_val != 0) { goto out; } else { } j = (u16 )((int )j + 1); ldv_45035: ; if ((unsigned int )j <= 3U) { goto ldv_45034; } else { } out: ; return (ret_val); } } static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw ) { s32 ret_val ; u16 j ; u16 nvm_offset ; { ret_val = 0; j = 0U; goto ldv_45045; ldv_45044: nvm_offset = (unsigned int )j != 0U ? (unsigned int )((u16 )((int )j + 1)) * 64U : 0U; ret_val = igb_update_nvm_checksum_with_offset(hw, (int )nvm_offset); if (ret_val != 0) { goto out; } else { } j = (u16 )((int )j + 1); ldv_45045: ; if ((unsigned int )j <= 3U) { goto ldv_45044; } else { } out: ; return (ret_val); } } static s32 __igb_access_emi_reg(struct e1000_hw *hw , u16 address , u16 *data , bool read ) { s32 ret_val ; { ret_val = 0; ret_val = (*(hw->phy.ops.write_reg))(hw, 16U, (int )address); if (ret_val != 0) { return (ret_val); } else { } if ((int )read) { ret_val = (*(hw->phy.ops.read_reg))(hw, 17U, data); } else { ret_val = (*(hw->phy.ops.write_reg))(hw, 17U, (int )*data); } return (ret_val); } } s32 igb_read_emi_reg(struct e1000_hw *hw , u16 addr , u16 *data ) { s32 tmp ; { tmp = __igb_access_emi_reg(hw, (int )addr, data, 1); return (tmp); } } s32 igb_set_eee_i350(struct e1000_hw *hw , bool adv1G , bool adv100M ) { u32 ipcnfg ; u32 eeer ; u32 eee_su ; u32 tmp ; struct _ddebug descriptor ; struct net_device *tmp___0 ; long tmp___1 ; u8 *hw_addr ; u8 *__var ; long tmp___2 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___3 ; { if ((unsigned int )hw->mac.type <= 3U || (unsigned int )hw->phy.media_type != 1U) { goto out; } else { } ipcnfg = igb_rd32(hw, 3640U); eeer = igb_rd32(hw, 3632U); if (! hw->dev_spec._82575.eee_disable) { tmp = igb_rd32(hw, 3636U); eee_su = tmp; if ((int )adv100M) { ipcnfg = ipcnfg | 4U; } else { ipcnfg = ipcnfg & 4294967291U; } if ((int )adv1G) { ipcnfg = ipcnfg | 8U; } else { ipcnfg = ipcnfg & 4294967287U; } eeer = eeer | 458752U; if ((eee_su & 8388608U) != 0U) { descriptor.modname = "igb"; descriptor.function = "igb_set_eee_i350"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_82575.c"; descriptor.format = "LPI Clock Stop Bit should not be set!\n"; descriptor.lineno = 2590U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___0, "LPI Clock Stop Bit should not be set!\n"); } else { } } else { } } else { ipcnfg = ipcnfg & 4294967283U; eeer = eeer & 4294508543U; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(ipcnfg, (void volatile *)hw_addr + 3640U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(eeer, (void volatile *)hw_addr___0 + 3632U); } else { } igb_rd32(hw, 3640U); igb_rd32(hw, 3632U); out: ; return (0); } } s32 igb_set_eee_i354(struct e1000_hw *hw , bool adv1G , bool adv100M ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; { phy = & hw->phy; ret_val = 0; if ((unsigned int )hw->phy.media_type != 1U || phy->id != 21040800U) { goto out; } else { } if (! hw->dev_spec._82575.eee_disable) { ret_val = (*(phy->ops.write_reg))(hw, 22U, 18); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 0U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (u16 )((unsigned int )phy_data | 1U); ret_val = (*(phy->ops.write_reg))(hw, 0U, (int )phy_data); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.write_reg))(hw, 22U, 0); if (ret_val != 0) { goto out; } else { } ret_val = igb_read_xmdio_reg(hw, 60, 7, & phy_data); if (ret_val != 0) { goto out; } else { } if ((int )adv100M) { phy_data = (u16 )((unsigned int )phy_data | 2U); } else { phy_data = (unsigned int )phy_data & 65533U; } if ((int )adv1G) { phy_data = (u16 )((unsigned int )phy_data | 4U); } else { phy_data = (unsigned int )phy_data & 65531U; } ret_val = igb_write_xmdio_reg(hw, 60, 7, (int )phy_data); } else { ret_val = igb_read_xmdio_reg(hw, 60, 7, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (unsigned int )phy_data & 65529U; ret_val = igb_write_xmdio_reg(hw, 60, 7, (int )phy_data); } out: ; return (ret_val); } } s32 igb_get_eee_status_i354(struct e1000_hw *hw , bool *status ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; { phy = & hw->phy; ret_val = 0; if ((unsigned int )hw->phy.media_type != 1U || phy->id != 21040800U) { goto out; } else { } ret_val = igb_read_xmdio_reg(hw, 1, 3, & phy_data); if (ret_val != 0) { goto out; } else { } *status = ((int )phy_data & 3072) != 0; out: ; return (ret_val); } } static u8 const e1000_emc_temp_data[4U] = { 0U, 1U, 35U, 42U}; static u8 const e1000_emc_therm_limit[4U] = { 32U, 25U, 26U, 48U}; static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw ) { u16 ets_offset ; u16 ets_cfg ; u16 ets_sensor ; u8 num_sensors ; u8 sensor_index ; u8 sensor_location ; u8 i ; struct e1000_thermal_sensor_data *data ; u32 tmp ; { data = & hw->mac.thermal_sensor_data; if ((unsigned int )hw->mac.type != 4U || (unsigned int )hw->bus.func != 0U) { return (14); } else { } tmp = igb_rd32(hw, 33024U); data->sensor[0].temp = (u8 )tmp; (*(hw->nvm.ops.read))(hw, 62, 1, & ets_offset); if ((unsigned int )ets_offset == 0U || (unsigned int )ets_offset == 65535U) { return (0); } else { } (*(hw->nvm.ops.read))(hw, (int )ets_offset, 1, & ets_cfg); if (((int )ets_cfg & 56) >> 3 != 0) { return (14); } else { } num_sensors = (unsigned int )((u8 )ets_cfg) & 7U; if ((unsigned int )num_sensors > 3U) { num_sensors = 3U; } else { } i = 1U; goto ldv_45107; ldv_45106: (*(hw->nvm.ops.read))(hw, (int )((u16 )i) + (int )ets_offset, 1, & ets_sensor); sensor_index = (u8 )(((int )ets_sensor & 768) >> 8); sensor_location = (u8 )(((int )ets_sensor & 15360) >> 10); if ((unsigned int )sensor_location != 0U) { (*(hw->phy.ops.read_i2c_byte))(hw, (int )e1000_emc_temp_data[(int )sensor_index], 248, & data->sensor[(int )i].temp); } else { } i = (u8 )((int )i + 1); ldv_45107: ; if ((int )i < (int )num_sensors) { goto ldv_45106; } else { } return (0); } } static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw ) { u16 ets_offset ; u16 ets_cfg ; u16 ets_sensor ; u8 low_thresh_delta ; u8 num_sensors ; u8 sensor_index ; u8 sensor_location ; u8 therm_limit ; u8 i ; struct e1000_thermal_sensor_data *data ; u32 tmp ; u32 tmp___0 ; { data = & hw->mac.thermal_sensor_data; if ((unsigned int )hw->mac.type != 4U || (unsigned int )hw->bus.func != 0U) { return (14); } else { } memset((void *)data, 0, 12UL); data->sensor[0].location = 1U; tmp = igb_rd32(hw, 33036U); data->sensor[0].caution_thresh = (u8 )tmp; tmp___0 = igb_rd32(hw, 33028U); data->sensor[0].max_op_thresh = (u8 )tmp___0; (*(hw->nvm.ops.read))(hw, 62, 1, & ets_offset); if ((unsigned int )ets_offset == 0U || (unsigned int )ets_offset == 65535U) { return (0); } else { } (*(hw->nvm.ops.read))(hw, (int )ets_offset, 1, & ets_cfg); if (((int )ets_cfg & 56) >> 3 != 0) { return (14); } else { } low_thresh_delta = (u8 )(((int )ets_cfg & 1984) >> 6); num_sensors = (unsigned int )((u8 )ets_cfg) & 7U; i = 1U; goto ldv_45123; ldv_45122: (*(hw->nvm.ops.read))(hw, (int )((u16 )i) + (int )ets_offset, 1, & ets_sensor); sensor_index = (u8 )(((int )ets_sensor & 768) >> 8); sensor_location = (u8 )(((int )ets_sensor & 15360) >> 10); therm_limit = (u8 )ets_sensor; (*(hw->phy.ops.write_i2c_byte))(hw, (int )e1000_emc_therm_limit[(int )sensor_index], 248, (int )therm_limit); if ((unsigned int )i <= 2U && (unsigned int )sensor_location != 0U) { data->sensor[(int )i].location = sensor_location; data->sensor[(int )i].caution_thresh = therm_limit; data->sensor[(int )i].max_op_thresh = (int )therm_limit - (int )low_thresh_delta; } else { } i = (u8 )((int )i + 1); ldv_45123: ; if ((int )i <= (int )num_sensors) { goto ldv_45122; } else { } return (0); } } static struct e1000_mac_operations e1000_mac_ops_82575 = {& igb_check_for_link_82575, 0, & igb_init_hw_82575, 0, 0, & igb_rar_set, & igb_read_mac_addr_82575, & igb_get_link_up_info_82575, 0, 0, & igb_get_thermal_sensor_data_generic, & igb_init_thermal_sensor_thresh_generic}; static struct e1000_phy_operations e1000_phy_ops_82575 = {& igb_acquire_phy_82575, 0, 0, 0, & igb_get_cfg_done_82575, 0, 0, 0, & igb_release_phy_82575, 0, 0, 0, 0, & igb_read_i2c_byte, & igb_write_i2c_byte}; static struct e1000_nvm_operations e1000_nvm_ops_82575 = {& igb_acquire_nvm_82575, & igb_read_nvm_eerd, & igb_release_nvm_82575, & igb_write_nvm_spi, 0, 0, 0}; struct e1000_info const e1000_82575_info = {& igb_get_invariants_82575, & e1000_mac_ops_82575, & e1000_phy_ops_82575, & e1000_nvm_ops_82575}; extern int ldv_release_18(void) ; extern int ldv_probe_18(void) ; void ldv_initialize_e1000_nvm_operations_16(void) { void *tmp ; { tmp = ldv_init_zalloc(1176UL); e1000_nvm_ops_82575_group0 = (struct e1000_hw *)tmp; return; } } void ldv_initialize_e1000_mac_operations_18(void) { void *tmp ; { tmp = ldv_init_zalloc(1176UL); e1000_mac_ops_82575_group0 = (struct e1000_hw *)tmp; return; } } void ldv_initialize_e1000_phy_operations_17(void) { void *tmp ; { tmp = ldv_init_zalloc(1176UL); e1000_phy_ops_82575_group0 = (struct e1000_hw *)tmp; return; } } void ldv_main_exported_18(void) { u8 *ldvarg15 ; void *tmp ; u16 *ldvarg16 ; void *tmp___0 ; u32 ldvarg14 ; u16 *ldvarg17 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg15 = (u8 *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg16 = (u16 *)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg17 = (u16 *)tmp___1; ldv_memset((void *)(& ldvarg14), 0, 4UL); tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_18 == 2) { igb_read_mac_addr_82575(e1000_mac_ops_82575_group0); ldv_state_variable_18 = 2; } else { } goto ldv_45150; case 1: ; if (ldv_state_variable_18 == 1) { igb_get_thermal_sensor_data_generic(e1000_mac_ops_82575_group0); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { igb_get_thermal_sensor_data_generic(e1000_mac_ops_82575_group0); ldv_state_variable_18 = 2; } else { } goto ldv_45150; case 2: ; if (ldv_state_variable_18 == 1) { igb_check_for_link_82575(e1000_mac_ops_82575_group0); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { igb_check_for_link_82575(e1000_mac_ops_82575_group0); ldv_state_variable_18 = 2; } else { } goto ldv_45150; case 3: ; if (ldv_state_variable_18 == 1) { igb_get_link_up_info_82575(e1000_mac_ops_82575_group0, ldvarg17, ldvarg16); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { igb_get_link_up_info_82575(e1000_mac_ops_82575_group0, ldvarg17, ldvarg16); ldv_state_variable_18 = 2; } else { } goto ldv_45150; case 4: ; if (ldv_state_variable_18 == 1) { igb_init_thermal_sensor_thresh_generic(e1000_mac_ops_82575_group0); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { igb_init_thermal_sensor_thresh_generic(e1000_mac_ops_82575_group0); ldv_state_variable_18 = 2; } else { } goto ldv_45150; case 5: ; if (ldv_state_variable_18 == 1) { igb_init_hw_82575(e1000_mac_ops_82575_group0); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { igb_init_hw_82575(e1000_mac_ops_82575_group0); ldv_state_variable_18 = 2; } else { } goto ldv_45150; case 6: ; if (ldv_state_variable_18 == 1) { igb_rar_set(e1000_mac_ops_82575_group0, ldvarg15, ldvarg14); ldv_state_variable_18 = 1; } else { } if (ldv_state_variable_18 == 2) { igb_rar_set(e1000_mac_ops_82575_group0, ldvarg15, ldvarg14); ldv_state_variable_18 = 2; } else { } goto ldv_45150; case 7: ; if (ldv_state_variable_18 == 2) { ldv_release_18(); ldv_state_variable_18 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_45150; case 8: ; if (ldv_state_variable_18 == 1) { ldv_probe_18(); ldv_state_variable_18 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_45150; default: ldv_stop(); } ldv_45150: ; return; } } void ldv_main_exported_16(void) { u16 *ldvarg19 ; void *tmp ; u16 *ldvarg22 ; void *tmp___0 ; u16 ldvarg23 ; u16 ldvarg20 ; u16 ldvarg21 ; u16 ldvarg24 ; int tmp___1 ; { tmp = ldv_init_zalloc(2UL); ldvarg19 = (u16 *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg22 = (u16 *)tmp___0; ldv_memset((void *)(& ldvarg23), 0, 2UL); ldv_memset((void *)(& ldvarg20), 0, 2UL); ldv_memset((void *)(& ldvarg21), 0, 2UL); ldv_memset((void *)(& ldvarg24), 0, 2UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_16 == 2) { igb_release_nvm_82575(e1000_nvm_ops_82575_group0); ldv_state_variable_16 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_45170; case 1: ; if (ldv_state_variable_16 == 1) { igb_acquire_nvm_82575(e1000_nvm_ops_82575_group0); ldv_state_variable_16 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_45170; case 2: ; if (ldv_state_variable_16 == 2) { igb_write_nvm_spi(e1000_nvm_ops_82575_group0, (int )ldvarg24, (int )ldvarg23, ldvarg22); ldv_state_variable_16 = 2; } else { } goto ldv_45170; case 3: ; if (ldv_state_variable_16 == 2) { igb_read_nvm_eerd(e1000_nvm_ops_82575_group0, (int )ldvarg21, (int )ldvarg20, ldvarg19); ldv_state_variable_16 = 2; } else { } goto ldv_45170; default: ldv_stop(); } ldv_45170: ; return; } } void ldv_main_exported_17(void) { u8 *ldvarg6 ; void *tmp ; u8 ldvarg9 ; u8 ldvarg10 ; u8 ldvarg7 ; u8 ldvarg11 ; u8 ldvarg8 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg6 = (u8 *)tmp; ldv_memset((void *)(& ldvarg9), 0, 1UL); ldv_memset((void *)(& ldvarg10), 0, 1UL); ldv_memset((void *)(& ldvarg7), 0, 1UL); ldv_memset((void *)(& ldvarg11), 0, 1UL); ldv_memset((void *)(& ldvarg8), 0, 1UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_17 == 2) { igb_release_phy_82575(e1000_phy_ops_82575_group0); ldv_state_variable_17 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_45185; case 1: ; if (ldv_state_variable_17 == 1) { igb_get_cfg_done_82575(e1000_phy_ops_82575_group0); ldv_state_variable_17 = 1; } else { } if (ldv_state_variable_17 == 2) { igb_get_cfg_done_82575(e1000_phy_ops_82575_group0); ldv_state_variable_17 = 2; } else { } goto ldv_45185; case 2: ; if (ldv_state_variable_17 == 2) { igb_write_i2c_byte(e1000_phy_ops_82575_group0, (int )ldvarg11, (int )ldvarg10, (int )ldvarg9); ldv_state_variable_17 = 2; } else { } goto ldv_45185; case 3: ; if (ldv_state_variable_17 == 1) { igb_acquire_phy_82575(e1000_phy_ops_82575_group0); ldv_state_variable_17 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_45185; case 4: ; if (ldv_state_variable_17 == 2) { igb_read_i2c_byte(e1000_phy_ops_82575_group0, (int )ldvarg8, (int )ldvarg7, ldvarg6); ldv_state_variable_17 = 2; } else { } goto ldv_45185; default: ldv_stop(); } ldv_45185: ; return; } } void ldv_main_exported_15(void) { struct e1000_hw *ldvarg51 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1176UL); ldvarg51 = (struct e1000_hw *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_15 == 1) { igb_get_invariants_82575(ldvarg51); ldv_state_variable_15 = 1; } else { } goto ldv_45196; default: ldv_stop(); } ldv_45196: ; return; } } bool ldv_queue_work_on_129(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_130(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_131(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_132(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_133(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_134(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_135(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_136(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_137(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_138(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_139(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_140(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; int ldv_mutex_trylock_165(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_163(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_166(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_167(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_162(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_164(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_168(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_157(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_159(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_158(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_161(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_160(struct workqueue_struct *ldv_func_arg1 ) ; s32 igb_phy_has_link(struct e1000_hw *hw , u32 iterations , u32 usec_interval , bool *success ) ; s32 igb_valid_led_default_i210(struct e1000_hw *hw , u16 *data ) ; static s32 igb_set_default_fc(struct e1000_hw *hw ) ; static s32 igb_set_fc_watermarks(struct e1000_hw *hw ) ; s32 igb_get_bus_info_pcie(struct e1000_hw *hw ) { struct e1000_bus_info *bus ; s32 ret_val ; u32 reg ; u16 pcie_link_status ; { bus = & hw->bus; bus->type = 3; ret_val = igb_read_pcie_cap_reg(hw, 18U, & pcie_link_status); if (ret_val != 0) { bus->width = 0; bus->speed = 0; } else { switch ((int )pcie_link_status & 15) { case 1: bus->speed = 6; goto ldv_47333; case 2: bus->speed = 7; goto ldv_47333; default: bus->speed = 0; goto ldv_47333; } ldv_47333: bus->width = (enum e1000_bus_width )(((int )pcie_link_status & 1008) >> 4); } reg = igb_rd32(hw, 8U); bus->func = (u16 )((reg & 12U) >> 2); return (0); } } void igb_clear_vfta(struct e1000_hw *hw ) { u32 offset ; u8 *hw_addr ; u8 *__var ; long tmp ; { offset = 0U; goto ldv_47344; ldv_47343: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(0U, (void volatile *)hw_addr + (unsigned long )((offset << 2) + 22016U)); } else { } igb_rd32(hw, 8U); offset = offset + 1U; ldv_47344: ; if (offset <= 127U) { goto ldv_47343; } else { } return; } } static void igb_write_vfta(struct e1000_hw *hw , u32 offset , u32 value ) { u8 *hw_addr ; u8 *__var ; long tmp ; { __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(value, (void volatile *)hw_addr + (unsigned long )((offset << 2) + 22016U)); } else { } igb_rd32(hw, 8U); return; } } void igb_clear_vfta_i350(struct e1000_hw *hw ) { u32 offset ; int i ; u8 *hw_addr ; u8 *__var ; long tmp ; { offset = 0U; goto ldv_47366; ldv_47365: i = 0; goto ldv_47363; ldv_47362: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(0U, (void volatile *)hw_addr + (unsigned long )((offset << 2) + 22016U)); } else { } i = i + 1; ldv_47363: ; if (i <= 9) { goto ldv_47362; } else { } igb_rd32(hw, 8U); offset = offset + 1U; ldv_47366: ; if (offset <= 127U) { goto ldv_47365; } else { } return; } } static void igb_write_vfta_i350(struct e1000_hw *hw , u32 offset , u32 value ) { int i ; u8 *hw_addr ; u8 *__var ; long tmp ; { i = 0; goto ldv_47378; ldv_47377: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(value, (void volatile *)hw_addr + (unsigned long )((offset << 2) + 22016U)); } else { } i = i + 1; ldv_47378: ; if (i <= 9) { goto ldv_47377; } else { } igb_rd32(hw, 8U); return; } } void igb_init_rx_addrs(struct e1000_hw *hw , u16 rar_count ) { u32 i ; u8 mac_addr[6U] ; unsigned int tmp ; struct _ddebug descriptor ; struct net_device *tmp___0 ; long tmp___1 ; struct _ddebug descriptor___0 ; struct net_device *tmp___2 ; long tmp___3 ; { mac_addr[0] = 0U; tmp = 1U; while (1) { if (tmp >= 6U) { break; } else { } mac_addr[tmp] = (unsigned char)0; tmp = tmp + 1U; } descriptor.modname = "igb"; descriptor.function = "igb_init_rx_addrs"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "Programming MAC Address into RAR[0]\n"; descriptor.lineno = 175U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___0, "Programming MAC Address into RAR[0]\n"); } else { } (*(hw->mac.ops.rar_set))(hw, (u8 *)(& hw->mac.addr), 0U); descriptor___0.modname = "igb"; descriptor___0.function = "igb_init_rx_addrs"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___0.format = "Clearing RAR[1-%u]\n"; descriptor___0.lineno = 180U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___2, "Clearing RAR[1-%u]\n", (int )rar_count + -1); } else { } i = 1U; goto ldv_47390; ldv_47389: (*(hw->mac.ops.rar_set))(hw, (u8 *)(& mac_addr), i); i = i + 1U; ldv_47390: ; if ((u32 )rar_count > i) { goto ldv_47389; } else { } return; } } s32 igb_vfta_set(struct e1000_hw *hw , u32 vid , bool add ) { u32 index ; u32 mask ; u32 vfta ; struct igb_adapter *adapter ; s32 ret_val ; { index = (vid >> 5) & 127U; mask = (u32 )(1 << ((int )vid & 31)); adapter = (struct igb_adapter *)hw->back; ret_val = 0; vfta = *(adapter->shadow_vfta + (unsigned long )index); if (((vfta & mask) != 0U) == (int )add) { ret_val = -3; } else if ((int )add) { vfta = vfta | mask; } else { vfta = ~ mask & vfta; } if ((unsigned int )hw->mac.type == 4U || (unsigned int )hw->mac.type == 5U) { igb_write_vfta_i350(hw, index, vfta); } else { igb_write_vfta(hw, index, vfta); } *(adapter->shadow_vfta + (unsigned long )index) = vfta; return (ret_val); } } s32 igb_check_alt_mac_addr(struct e1000_hw *hw ) { u32 i ; s32 ret_val ; u16 offset ; u16 nvm_alt_mac_addr_offset ; u16 nvm_data ; u8 alt_mac_addr[6U] ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; bool tmp___5 ; { ret_val = 0; if ((unsigned int )hw->mac.type > 2U) { goto out; } else { } ret_val = (*(hw->nvm.ops.read))(hw, 55, 1, & nvm_alt_mac_addr_offset); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_check_alt_mac_addr"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "NVM Read Error\n"; descriptor.lineno = 249U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error\n"); } else { } goto out; } else { } if ((unsigned int )nvm_alt_mac_addr_offset == 65535U || (unsigned int )nvm_alt_mac_addr_offset == 0U) { goto out; } else { } if ((unsigned int )hw->bus.func == 1U) { nvm_alt_mac_addr_offset = (unsigned int )nvm_alt_mac_addr_offset + 3U; } else { } if ((unsigned int )hw->bus.func == 2U) { nvm_alt_mac_addr_offset = (unsigned int )nvm_alt_mac_addr_offset + 6U; } else { } if ((unsigned int )hw->bus.func == 3U) { nvm_alt_mac_addr_offset = (unsigned int )nvm_alt_mac_addr_offset + 9U; } else { } i = 0U; goto ldv_47416; ldv_47415: offset = (int )((u16 )(i >> 1)) + (int )nvm_alt_mac_addr_offset; ret_val = (*(hw->nvm.ops.read))(hw, (int )offset, 1, & nvm_data); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_check_alt_mac_addr"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___0.format = "NVM Read Error\n"; descriptor___0.lineno = 269U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "NVM Read Error\n"); } else { } goto out; } else { } alt_mac_addr[i] = (unsigned char )nvm_data; alt_mac_addr[i + 1U] = (unsigned char )((int )nvm_data >> 8); i = i + 2U; ldv_47416: ; if (i <= 5U) { goto ldv_47415; } else { } tmp___5 = is_multicast_ether_addr((u8 const *)(& alt_mac_addr)); if ((int )tmp___5) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_check_alt_mac_addr"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___1.format = "Ignoring Alternate Mac Address with MC bit set\n"; descriptor___1.lineno = 279U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Ignoring Alternate Mac Address with MC bit set\n"); } else { } goto out; } else { } (*(hw->mac.ops.rar_set))(hw, (u8 *)(& alt_mac_addr), 0U); out: ; return (ret_val); } } void igb_rar_set(struct e1000_hw *hw , u8 *addr , u32 index ) { u32 rar_low ; u32 rar_high ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { rar_low = (((unsigned int )*addr | ((unsigned int )*(addr + 1UL) << 8)) | ((unsigned int )*(addr + 2UL) << 16)) | ((unsigned int )*(addr + 3UL) << 24); rar_high = (unsigned int )*(addr + 4UL) | ((unsigned int )*(addr + 5UL) << 8); if (rar_low != 0U || rar_high != 0U) { rar_high = rar_high | 2147483648U; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(rar_low, (void volatile *)hw_addr + (unsigned long )(index <= 15U ? (index + 2688U) * 8U : (index + 2700U) * 8U)); } else { } igb_rd32(hw, 8U); __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(rar_high, (void volatile *)hw_addr___0 + (unsigned long )(index <= 15U ? index * 8U + 21508U : (index + 536870896U) * 8U + 21732U)); } else { } igb_rd32(hw, 8U); return; } } void igb_mta_set(struct e1000_hw *hw , u32 hash_value ) { u32 hash_bit ; u32 hash_reg ; u32 mta ; u8 *hw_addr ; u8 *__var ; long tmp ; { hash_reg = (hash_value >> 5) & (u32 )((int )hw->mac.mta_reg_count + -1); hash_bit = hash_value & 31U; mta = readl((void const volatile *)(hw->hw_addr + ((unsigned long )(hash_reg << 2) + 20992UL))); mta = (u32 )(1 << (int )hash_bit) | mta; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(mta, (void volatile *)hw_addr + (unsigned long )((hash_reg << 2) + 20992U)); } else { } igb_rd32(hw, 8U); return; } } static u32 igb_hash_mc_addr(struct e1000_hw *hw , u8 *mc_addr ) { u32 hash_value ; u32 hash_mask ; u8 bit_shift ; { bit_shift = 0U; hash_mask = (u32 )((int )hw->mac.mta_reg_count * 32 + -1); goto ldv_47450; ldv_47449: bit_shift = (u8 )((int )bit_shift + 1); ldv_47450: ; if (hash_mask >> (int )bit_shift != 255U) { goto ldv_47449; } else { } switch (hw->mac.mc_filter_type) { default: ; case 0U: ; goto ldv_47454; case 1U: bit_shift = (unsigned int )bit_shift + 1U; goto ldv_47454; case 2U: bit_shift = (unsigned int )bit_shift + 2U; goto ldv_47454; case 3U: bit_shift = (unsigned int )bit_shift + 4U; goto ldv_47454; } ldv_47454: hash_value = (u32 )(((int )*(mc_addr + 4UL) >> (8 - (int )bit_shift)) | ((int )*(mc_addr + 5UL) << (int )bit_shift)) & hash_mask; return (hash_value); } } void igb_update_mc_addr_list(struct e1000_hw *hw , u8 *mc_addr_list , u32 mc_addr_count ) { u32 hash_value ; u32 hash_bit ; u32 hash_reg ; int i ; u8 *hw_addr ; u8 *__var ; long tmp ; { memset((void *)(& hw->mac.mta_shadow), 0, 512UL); i = 0; goto ldv_47468; ldv_47467: hash_value = igb_hash_mc_addr(hw, mc_addr_list); hash_reg = (hash_value >> 5) & (u32 )((int )hw->mac.mta_reg_count + -1); hash_bit = hash_value & 31U; hw->mac.mta_shadow[hash_reg] = hw->mac.mta_shadow[hash_reg] | (u32 )(1 << (int )hash_bit); mc_addr_list = mc_addr_list + 6UL; i = i + 1; ldv_47468: ; if ((unsigned int )i < mc_addr_count) { goto ldv_47467; } else { } i = (int )hw->mac.mta_reg_count + -1; goto ldv_47474; ldv_47473: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(hw->mac.mta_shadow[i], (void volatile *)hw_addr + (unsigned long )((i << 2) + 20992)); } else { } i = i - 1; ldv_47474: ; if (i >= 0) { goto ldv_47473; } else { } igb_rd32(hw, 8U); return; } } void igb_clear_hw_cntrs_base(struct e1000_hw *hw ) { { igb_rd32(hw, 16384U); igb_rd32(hw, 16392U); igb_rd32(hw, 16400U); igb_rd32(hw, 16404U); igb_rd32(hw, 16408U); igb_rd32(hw, 16412U); igb_rd32(hw, 16416U); igb_rd32(hw, 16424U); igb_rd32(hw, 16432U); igb_rd32(hw, 16440U); igb_rd32(hw, 16448U); igb_rd32(hw, 16456U); igb_rd32(hw, 16460U); igb_rd32(hw, 16464U); igb_rd32(hw, 16468U); igb_rd32(hw, 16472U); igb_rd32(hw, 16500U); igb_rd32(hw, 16504U); igb_rd32(hw, 16508U); igb_rd32(hw, 16512U); igb_rd32(hw, 16520U); igb_rd32(hw, 16524U); igb_rd32(hw, 16528U); igb_rd32(hw, 16532U); igb_rd32(hw, 16544U); igb_rd32(hw, 16548U); igb_rd32(hw, 16552U); igb_rd32(hw, 16556U); igb_rd32(hw, 16560U); igb_rd32(hw, 16576U); igb_rd32(hw, 16580U); igb_rd32(hw, 16584U); igb_rd32(hw, 16588U); igb_rd32(hw, 16592U); igb_rd32(hw, 16596U); igb_rd32(hw, 16624U); igb_rd32(hw, 16628U); return; } } s32 igb_check_for_copper_link(struct e1000_hw *hw ) { struct e1000_mac_info *mac ; s32 ret_val ; bool link ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { mac = & hw->mac; if (! mac->get_link_status) { ret_val = 0; goto out; } else { } ret_val = igb_phy_has_link(hw, 1U, 0U, & link); if (ret_val != 0) { goto out; } else { } if (! link) { goto out; } else { } mac->get_link_status = 0; igb_check_downshift(hw); if (! mac->autoneg) { ret_val = -3; goto out; } else { } igb_config_collision_dist(hw); ret_val = igb_config_fc_after_link_up(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_check_for_copper_link"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "Error configuring flow control\n"; descriptor.lineno = 577U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Error configuring flow control\n"); } else { } } else { } out: ; return (ret_val); } } s32 igb_setup_link(struct e1000_hw *hw ) { s32 ret_val ; s32 tmp ; struct _ddebug descriptor ; struct net_device *tmp___0 ; long tmp___1 ; struct _ddebug descriptor___0 ; struct net_device *tmp___2 ; long tmp___3 ; u8 *hw_addr ; u8 *__var ; long tmp___4 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___5 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___6 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___7 ; { ret_val = 0; tmp = igb_check_reset_block(hw); if (tmp != 0) { goto out; } else { } if ((unsigned int )hw->fc.requested_mode == 255U) { ret_val = igb_set_default_fc(hw); if (ret_val != 0) { goto out; } else { } } else { } hw->fc.current_mode = hw->fc.requested_mode; descriptor.modname = "igb"; descriptor.function = "igb_setup_link"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "After fix-ups FlowControl is now = %x\n"; descriptor.lineno = 618U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___0, "After fix-ups FlowControl is now = %x\n", (unsigned int )hw->fc.current_mode); } else { } ret_val = (*(hw->mac.ops.setup_physical_interface))(hw); if (ret_val != 0) { goto out; } else { } descriptor___0.modname = "igb"; descriptor___0.function = "igb_setup_link"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___0.format = "Initializing the Flow Control address, type and timer regs\n"; descriptor___0.lineno = 630U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___2, "Initializing the Flow Control address, type and timer regs\n"); } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(34824U, (void volatile *)hw_addr + 48U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(256U, (void volatile *)hw_addr___0 + 44U); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(12746753U, (void volatile *)hw_addr___1 + 40U); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel((unsigned int )hw->fc.pause_time, (void volatile *)hw_addr___2 + 368U); } else { } ret_val = igb_set_fc_watermarks(hw); out: ; return (ret_val); } } void igb_config_collision_dist(struct e1000_hw *hw ) { u32 tctl ; u8 *hw_addr ; u8 *__var ; long tmp ; { tctl = igb_rd32(hw, 1024U); tctl = tctl & 4290777087U; tctl = tctl | 258048U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(tctl, (void volatile *)hw_addr + 1024U); } else { } igb_rd32(hw, 8U); return; } } static s32 igb_set_fc_watermarks(struct e1000_hw *hw ) { s32 ret_val ; u32 fcrtl ; u32 fcrth ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { ret_val = 0; fcrtl = 0U; fcrth = 0U; if (((unsigned int )hw->fc.current_mode & 2U) != 0U) { fcrtl = hw->fc.low_water; if ((int )hw->fc.send_xon) { fcrtl = fcrtl | 2147483648U; } else { } fcrth = hw->fc.high_water; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(fcrtl, (void volatile *)hw_addr + 8544U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(fcrth, (void volatile *)hw_addr___0 + 8552U); } else { } return (ret_val); } } static s32 igb_set_default_fc(struct e1000_hw *hw ) { s32 ret_val ; u16 lan_offset ; u16 nvm_data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { ret_val = 0; if ((unsigned int )hw->mac.type == 4U) { lan_offset = (unsigned int )hw->bus.func != 0U ? (unsigned int )((u16 )((int )hw->bus.func + 1)) * 64U : 0U; ret_val = (*(hw->nvm.ops.read))(hw, (int )((unsigned int )lan_offset + 15U), 1, & nvm_data); } else { ret_val = (*(hw->nvm.ops.read))(hw, 15, 1, & nvm_data); } if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_set_default_fc"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "NVM Read Error\n"; descriptor.lineno = 732U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error\n"); } else { } goto out; } else { } if (((int )nvm_data & 12288) == 0) { hw->fc.requested_mode = 0; } else if (((int )nvm_data & 12288) == 8192) { hw->fc.requested_mode = 2; } else { hw->fc.requested_mode = 3; } out: ; return (ret_val); } } s32 igb_force_mac_fc(struct e1000_hw *hw ) { u32 ctrl ; s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; u8 *hw_addr ; u8 *__var ; long tmp___3 ; { ret_val = 0; ctrl = igb_rd32(hw, 0U); descriptor.modname = "igb"; descriptor.function = "igb_force_mac_fc"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "hw->fc.current_mode = %u\n"; descriptor.lineno = 782U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "hw->fc.current_mode = %u\n", (unsigned int )hw->fc.current_mode); } else { } switch ((unsigned int )hw->fc.current_mode) { case 0U: ctrl = ctrl & 3892314111U; goto ldv_47544; case 1U: ctrl = ctrl & 4026531839U; ctrl = ctrl | 134217728U; goto ldv_47544; case 2U: ctrl = ctrl & 4160749567U; ctrl = ctrl | 268435456U; goto ldv_47544; case 3U: ctrl = ctrl | 402653184U; goto ldv_47544; default: descriptor___0.modname = "igb"; descriptor___0.function = "igb_force_mac_fc"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___0.format = "Flow control param set incorrectly\n"; descriptor___0.lineno = 800U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Flow control param set incorrectly\n"); } else { } ret_val = -3; goto out; } ldv_47544: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(ctrl, (void volatile *)hw_addr); } else { } out: ; return (ret_val); } } s32 igb_config_fc_after_link_up(struct e1000_hw *hw ) { struct e1000_mac_info *mac ; s32 ret_val ; u32 pcs_status_reg ; u32 pcs_adv_reg ; u32 pcs_lp_ability_reg ; u32 pcs_ctrl_reg ; u16 mii_status_reg ; u16 mii_nway_adv_reg ; u16 mii_nway_lp_ability_reg ; u16 speed ; u16 duplex ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___2 ; struct net_device *tmp___5 ; long tmp___6 ; struct _ddebug descriptor___3 ; struct net_device *tmp___7 ; long tmp___8 ; struct _ddebug descriptor___4 ; struct net_device *tmp___9 ; long tmp___10 ; struct _ddebug descriptor___5 ; struct net_device *tmp___11 ; long tmp___12 ; struct _ddebug descriptor___6 ; struct net_device *tmp___13 ; long tmp___14 ; struct _ddebug descriptor___7 ; struct net_device *tmp___15 ; long tmp___16 ; struct _ddebug descriptor___8 ; struct net_device *tmp___17 ; long tmp___18 ; struct _ddebug descriptor___9 ; struct net_device *tmp___19 ; long tmp___20 ; struct _ddebug descriptor___10 ; struct net_device *tmp___21 ; long tmp___22 ; struct _ddebug descriptor___11 ; struct net_device *tmp___23 ; long tmp___24 ; struct _ddebug descriptor___12 ; struct net_device *tmp___25 ; long tmp___26 ; struct _ddebug descriptor___13 ; struct net_device *tmp___27 ; long tmp___28 ; struct _ddebug descriptor___14 ; struct net_device *tmp___29 ; long tmp___30 ; u8 *hw_addr ; u8 *__var ; long tmp___31 ; struct _ddebug descriptor___15 ; struct net_device *tmp___32 ; long tmp___33 ; { mac = & hw->mac; ret_val = 0; if ((int )mac->autoneg_failed) { if ((unsigned int )hw->phy.media_type == 3U) { ret_val = igb_force_mac_fc(hw); } else { } } else if ((unsigned int )hw->phy.media_type == 1U) { ret_val = igb_force_mac_fc(hw); } else { } if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_config_fc_after_link_up"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "Error forcing flow control settings\n"; descriptor.lineno = 842U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Error forcing flow control settings\n"); } else { } goto out; } else { } if ((unsigned int )hw->phy.media_type == 1U && (int )mac->autoneg) { ret_val = (*(hw->phy.ops.read_reg))(hw, 1U, & mii_status_reg); if (ret_val != 0) { goto out; } else { } ret_val = (*(hw->phy.ops.read_reg))(hw, 1U, & mii_status_reg); if (ret_val != 0) { goto out; } else { } if (((int )mii_status_reg & 32) == 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_config_fc_after_link_up"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___0.format = "Copper PHY and Auto Neg has not completed.\n"; descriptor___0.lineno = 866U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Copper PHY and Auto Neg has not completed.\n"); } else { } goto out; } else { } ret_val = (*(hw->phy.ops.read_reg))(hw, 4U, & mii_nway_adv_reg); if (ret_val != 0) { goto out; } else { } ret_val = (*(hw->phy.ops.read_reg))(hw, 5U, & mii_nway_lp_ability_reg); if (ret_val != 0) { goto out; } else { } if (((int )mii_nway_adv_reg & 1024) != 0 && ((int )mii_nway_lp_ability_reg & 1024) != 0) { if ((unsigned int )hw->fc.requested_mode == 3U) { hw->fc.current_mode = 3; descriptor___1.modname = "igb"; descriptor___1.function = "igb_config_fc_after_link_up"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___1.format = "Flow Control = FULL.\n"; descriptor___1.lineno = 928U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Flow Control = FULL.\n"); } else { } } else { hw->fc.current_mode = 1; descriptor___2.modname = "igb"; descriptor___2.function = "igb_config_fc_after_link_up"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___2.format = "Flow Control = RX PAUSE frames only.\n"; descriptor___2.lineno = 931U; descriptor___2.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___5, "Flow Control = RX PAUSE frames only.\n"); } else { } } } else if (((((int )mii_nway_adv_reg & 1024) == 0 && ((int )mii_nway_adv_reg & 2048) != 0) && ((int )mii_nway_lp_ability_reg & 1024) != 0) && ((int )mii_nway_lp_ability_reg & 2048) != 0) { hw->fc.current_mode = 2; descriptor___3.modname = "igb"; descriptor___3.function = "igb_config_fc_after_link_up"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___3.format = "Flow Control = TX PAUSE frames only.\n"; descriptor___3.lineno = 946U; descriptor___3.flags = 0U; tmp___8 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___8 != 0L) { tmp___7 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)tmp___7, "Flow Control = TX PAUSE frames only.\n"); } else { } } else if (((((int )mii_nway_adv_reg & 1024) != 0 && ((int )mii_nway_adv_reg & 2048) != 0) && ((int )mii_nway_lp_ability_reg & 1024) == 0) && ((int )mii_nway_lp_ability_reg & 2048) != 0) { hw->fc.current_mode = 1; descriptor___4.modname = "igb"; descriptor___4.function = "igb_config_fc_after_link_up"; descriptor___4.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___4.format = "Flow Control = RX PAUSE frames only.\n"; descriptor___4.lineno = 960U; descriptor___4.flags = 0U; tmp___10 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___10 != 0L) { tmp___9 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___4, (struct net_device const *)tmp___9, "Flow Control = RX PAUSE frames only.\n"); } else { } } else if (((unsigned int )hw->fc.requested_mode == 0U || (unsigned int )hw->fc.requested_mode == 2U) || (int )hw->fc.strict_ieee) { hw->fc.current_mode = 0; descriptor___5.modname = "igb"; descriptor___5.function = "igb_config_fc_after_link_up"; descriptor___5.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___5.format = "Flow Control = NONE.\n"; descriptor___5.lineno = 986U; descriptor___5.flags = 0U; tmp___12 = ldv__builtin_expect((long )descriptor___5.flags & 1L, 0L); if (tmp___12 != 0L) { tmp___11 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___5, (struct net_device const *)tmp___11, "Flow Control = NONE.\n"); } else { } } else { hw->fc.current_mode = 1; descriptor___6.modname = "igb"; descriptor___6.function = "igb_config_fc_after_link_up"; descriptor___6.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___6.format = "Flow Control = RX PAUSE frames only.\n"; descriptor___6.lineno = 989U; descriptor___6.flags = 0U; tmp___14 = ldv__builtin_expect((long )descriptor___6.flags & 1L, 0L); if (tmp___14 != 0L) { tmp___13 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___6, (struct net_device const *)tmp___13, "Flow Control = RX PAUSE frames only.\n"); } else { } } ret_val = (*(hw->mac.ops.get_speed_and_duplex))(hw, & speed, & duplex); if (ret_val != 0) { descriptor___7.modname = "igb"; descriptor___7.function = "igb_config_fc_after_link_up"; descriptor___7.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___7.format = "Error getting link speed and duplex\n"; descriptor___7.lineno = 998U; descriptor___7.flags = 0U; tmp___16 = ldv__builtin_expect((long )descriptor___7.flags & 1L, 0L); if (tmp___16 != 0L) { tmp___15 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___7, (struct net_device const *)tmp___15, "Error getting link speed and duplex\n"); } else { } goto out; } else { } if ((unsigned int )duplex == 1U) { hw->fc.current_mode = 0; } else { } ret_val = igb_force_mac_fc(hw); if (ret_val != 0) { descriptor___8.modname = "igb"; descriptor___8.function = "igb_config_fc_after_link_up"; descriptor___8.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___8.format = "Error forcing flow control settings\n"; descriptor___8.lineno = 1010U; descriptor___8.flags = 0U; tmp___18 = ldv__builtin_expect((long )descriptor___8.flags & 1L, 0L); if (tmp___18 != 0L) { tmp___17 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___8, (struct net_device const *)tmp___17, "Error forcing flow control settings\n"); } else { } goto out; } else { } } else { } if ((unsigned int )hw->phy.media_type == 3U && (int )mac->autoneg) { pcs_status_reg = igb_rd32(hw, 16908U); if ((pcs_status_reg & 65536U) == 0U) { descriptor___9.modname = "igb"; descriptor___9.function = "igb_config_fc_after_link_up"; descriptor___9.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___9.format = "PCS Auto Neg has not completed.\n"; descriptor___9.lineno = 1027U; descriptor___9.flags = 0U; tmp___20 = ldv__builtin_expect((long )descriptor___9.flags & 1L, 0L); if (tmp___20 != 0L) { tmp___19 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___9, (struct net_device const *)tmp___19, "PCS Auto Neg has not completed.\n"); } else { } return (ret_val); } else { } pcs_adv_reg = igb_rd32(hw, 16920U); pcs_lp_ability_reg = igb_rd32(hw, 16924U); if ((pcs_adv_reg & 128U) != 0U && (pcs_lp_ability_reg & 128U) != 0U) { if ((unsigned int )hw->fc.requested_mode == 3U) { hw->fc.current_mode = 3; descriptor___10.modname = "igb"; descriptor___10.function = "igb_config_fc_after_link_up"; descriptor___10.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___10.format = "Flow Control = FULL.\n"; descriptor___10.lineno = 1083U; descriptor___10.flags = 0U; tmp___22 = ldv__builtin_expect((long )descriptor___10.flags & 1L, 0L); if (tmp___22 != 0L) { tmp___21 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___10, (struct net_device const *)tmp___21, "Flow Control = FULL.\n"); } else { } } else { hw->fc.current_mode = 1; descriptor___11.modname = "igb"; descriptor___11.function = "igb_config_fc_after_link_up"; descriptor___11.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___11.format = "Flow Control = Rx PAUSE frames only.\n"; descriptor___11.lineno = 1086U; descriptor___11.flags = 0U; tmp___24 = ldv__builtin_expect((long )descriptor___11.flags & 1L, 0L); if (tmp___24 != 0L) { tmp___23 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___11, (struct net_device const *)tmp___23, "Flow Control = Rx PAUSE frames only.\n"); } else { } } } else if ((((pcs_adv_reg & 128U) == 0U && (pcs_adv_reg & 256U) != 0U) && (pcs_lp_ability_reg & 128U) != 0U) && (pcs_lp_ability_reg & 256U) != 0U) { hw->fc.current_mode = 2; descriptor___12.modname = "igb"; descriptor___12.function = "igb_config_fc_after_link_up"; descriptor___12.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___12.format = "Flow Control = Tx PAUSE frames only.\n"; descriptor___12.lineno = 1101U; descriptor___12.flags = 0U; tmp___26 = ldv__builtin_expect((long )descriptor___12.flags & 1L, 0L); if (tmp___26 != 0L) { tmp___25 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___12, (struct net_device const *)tmp___25, "Flow Control = Tx PAUSE frames only.\n"); } else { } } else if ((((pcs_adv_reg & 128U) != 0U && (pcs_adv_reg & 256U) != 0U) && (pcs_lp_ability_reg & 128U) == 0U) && (pcs_lp_ability_reg & 256U) != 0U) { hw->fc.current_mode = 1; descriptor___13.modname = "igb"; descriptor___13.function = "igb_config_fc_after_link_up"; descriptor___13.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___13.format = "Flow Control = Rx PAUSE frames only.\n"; descriptor___13.lineno = 1115U; descriptor___13.flags = 0U; tmp___28 = ldv__builtin_expect((long )descriptor___13.flags & 1L, 0L); if (tmp___28 != 0L) { tmp___27 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___13, (struct net_device const *)tmp___27, "Flow Control = Rx PAUSE frames only.\n"); } else { } } else { hw->fc.current_mode = 0; descriptor___14.modname = "igb"; descriptor___14.function = "igb_config_fc_after_link_up"; descriptor___14.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___14.format = "Flow Control = NONE.\n"; descriptor___14.lineno = 1121U; descriptor___14.flags = 0U; tmp___30 = ldv__builtin_expect((long )descriptor___14.flags & 1L, 0L); if (tmp___30 != 0L) { tmp___29 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___14, (struct net_device const *)tmp___29, "Flow Control = NONE.\n"); } else { } } pcs_ctrl_reg = igb_rd32(hw, 16904U); pcs_ctrl_reg = pcs_ctrl_reg | 128U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___31 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___31 == 0L) { writel(pcs_ctrl_reg, (void volatile *)hw_addr + 16904U); } else { } ret_val = igb_force_mac_fc(hw); if (ret_val != 0) { descriptor___15.modname = "igb"; descriptor___15.function = "igb_config_fc_after_link_up"; descriptor___15.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___15.format = "Error forcing flow control settings\n"; descriptor___15.lineno = 1133U; descriptor___15.flags = 0U; tmp___33 = ldv__builtin_expect((long )descriptor___15.flags & 1L, 0L); if (tmp___33 != 0L) { tmp___32 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___15, (struct net_device const *)tmp___32, "Error forcing flow control settings\n"); } else { } return (ret_val); } else { } } else { } out: ; return (ret_val); } } s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw , u16 *speed , u16 *duplex ) { u32 status ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___2 ; struct net_device *tmp___5 ; long tmp___6 ; struct _ddebug descriptor___3 ; struct net_device *tmp___7 ; long tmp___8 ; { status = igb_rd32(hw, 8U); if ((status & 128U) != 0U) { *speed = 1000U; descriptor.modname = "igb"; descriptor.function = "igb_get_speed_and_duplex_copper"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "1000 Mbs, "; descriptor.lineno = 1159U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "1000 Mbs, "); } else { } } else if ((status & 64U) != 0U) { *speed = 100U; descriptor___0.modname = "igb"; descriptor___0.function = "igb_get_speed_and_duplex_copper"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___0.format = "100 Mbs, "; descriptor___0.lineno = 1162U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "100 Mbs, "); } else { } } else { *speed = 10U; descriptor___1.modname = "igb"; descriptor___1.function = "igb_get_speed_and_duplex_copper"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___1.format = "10 Mbs, "; descriptor___1.lineno = 1165U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "10 Mbs, "); } else { } } if ((int )status & 1) { *duplex = 2U; descriptor___2.modname = "igb"; descriptor___2.function = "igb_get_speed_and_duplex_copper"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___2.format = "Full Duplex\n"; descriptor___2.lineno = 1170U; descriptor___2.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___5, "Full Duplex\n"); } else { } } else { *duplex = 1U; descriptor___3.modname = "igb"; descriptor___3.function = "igb_get_speed_and_duplex_copper"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___3.format = "Half Duplex\n"; descriptor___3.lineno = 1173U; descriptor___3.flags = 0U; tmp___8 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___8 != 0L) { tmp___7 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)tmp___7, "Half Duplex\n"); } else { } } return (0); } } s32 igb_get_hw_semaphore(struct e1000_hw *hw ) { u32 swsm ; s32 ret_val ; s32 timeout ; s32 i ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; u32 tmp___2 ; struct _ddebug descriptor___0 ; struct net_device *tmp___3 ; long tmp___4 ; { ret_val = 0; timeout = (int )hw->nvm.word_size + 1; i = 0; goto ldv_47611; ldv_47610: swsm = igb_rd32(hw, 23376U); if ((swsm & 1U) == 0U) { goto ldv_47609; } else { } __const_udelay(214750UL); i = i + 1; ldv_47611: ; if (i < timeout) { goto ldv_47610; } else { } ldv_47609: ; if (i == timeout) { descriptor.modname = "igb"; descriptor.function = "igb_get_hw_semaphore"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "Driver can\'t access device - SMBI bit is set.\n"; descriptor.lineno = 1203U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Driver can\'t access device - SMBI bit is set.\n"); } else { } ret_val = -1; goto out; } else { } i = 0; goto ldv_47620; ldv_47619: swsm = igb_rd32(hw, 23376U); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(swsm | 2U, (void volatile *)hw_addr + 23376U); } else { } tmp___2 = igb_rd32(hw, 23376U); if ((tmp___2 & 2U) != 0U) { goto ldv_47618; } else { } __const_udelay(214750UL); i = i + 1; ldv_47620: ; if (i < timeout) { goto ldv_47619; } else { } ldv_47618: ; if (i == timeout) { igb_put_hw_semaphore(hw); descriptor___0.modname = "igb"; descriptor___0.function = "igb_get_hw_semaphore"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor___0.format = "Driver can\'t access the NVM\n"; descriptor___0.lineno = 1223U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___3, "Driver can\'t access the NVM\n"); } else { } ret_val = -1; goto out; } else { } out: ; return (ret_val); } } void igb_put_hw_semaphore(struct e1000_hw *hw ) { u32 swsm ; u8 *hw_addr ; u8 *__var ; long tmp ; { swsm = igb_rd32(hw, 23376U); swsm = swsm & 4294967292U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(swsm, (void volatile *)hw_addr + 23376U); } else { } return; } } s32 igb_get_auto_rd_done(struct e1000_hw *hw ) { s32 i ; s32 ret_val ; u32 tmp ; struct _ddebug descriptor ; struct net_device *tmp___0 ; long tmp___1 ; { i = 0; ret_val = 0; goto ldv_47636; ldv_47635: tmp = igb_rd32(hw, 16U); if ((tmp & 512U) != 0U) { goto ldv_47634; } else { } usleep_range(1000UL, 2000UL); i = i + 1; ldv_47636: ; if (i <= 9) { goto ldv_47635; } else { } ldv_47634: ; if (i == 10) { descriptor.modname = "igb"; descriptor.function = "igb_get_auto_rd_done"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "Auto read by HW from NVM has not completed.\n"; descriptor.lineno = 1269U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___0, "Auto read by HW from NVM has not completed.\n"); } else { } ret_val = -9; goto out; } else { } out: ; return (ret_val); } } static s32 igb_valid_led_default(struct e1000_hw *hw , u16 *data ) { s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { ret_val = (*(hw->nvm.ops.read))(hw, 4, 1, data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_valid_led_default"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "NVM Read Error\n"; descriptor.lineno = 1292U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error\n"); } else { } goto out; } else { } if ((unsigned int )*data == 0U || (unsigned int )*data == 65535U) { switch ((unsigned int )hw->phy.media_type) { case 3U: *data = 4376U; goto ldv_47649; case 1U: ; default: *data = 35089U; goto ldv_47649; } ldv_47649: ; } else { } out: ; return (ret_val); } } s32 igb_id_led_init(struct e1000_hw *hw ) { struct e1000_mac_info *mac ; s32 ret_val ; u32 ledctl_mask ; u32 ledctl_on ; u32 ledctl_off ; u16 data ; u16 i ; u16 temp ; u16 led_mask ; { mac = & hw->mac; ledctl_mask = 255U; ledctl_on = 14U; ledctl_off = 15U; led_mask = 15U; if ((unsigned int )hw->mac.type == 6U || (unsigned int )hw->mac.type == 7U) { ret_val = igb_valid_led_default_i210(hw, & data); } else { ret_val = igb_valid_led_default(hw, & data); } if (ret_val != 0) { goto out; } else { } mac->ledctl_default = igb_rd32(hw, 3584U); mac->ledctl_mode1 = mac->ledctl_default; mac->ledctl_mode2 = mac->ledctl_default; i = 0U; goto ldv_47682; ldv_47681: temp = (u16 )((int )((short )((int )data >> ((int )i << 2))) & (int )((short )led_mask)); switch ((int )temp) { case 4: ; case 5: ; case 6: mac->ledctl_mode1 = mac->ledctl_mode1 & ~ (ledctl_mask << ((int )i << 3)); mac->ledctl_mode1 = mac->ledctl_mode1 | (ledctl_on << ((int )i << 3)); goto ldv_47668; case 7: ; case 8: ; case 9: mac->ledctl_mode1 = mac->ledctl_mode1 & ~ (ledctl_mask << ((int )i << 3)); mac->ledctl_mode1 = mac->ledctl_mode1 | (ledctl_off << ((int )i << 3)); goto ldv_47668; default: ; goto ldv_47668; } ldv_47668: ; switch ((int )temp) { case 2: ; case 5: ; case 8: mac->ledctl_mode2 = mac->ledctl_mode2 & ~ (ledctl_mask << ((int )i << 3)); mac->ledctl_mode2 = mac->ledctl_mode2 | (ledctl_on << ((int )i << 3)); goto ldv_47676; case 3: ; case 6: ; case 9: mac->ledctl_mode2 = mac->ledctl_mode2 & ~ (ledctl_mask << ((int )i << 3)); mac->ledctl_mode2 = mac->ledctl_mode2 | (ledctl_off << ((int )i << 3)); goto ldv_47676; default: ; goto ldv_47676; } ldv_47676: i = (u16 )((int )i + 1); ldv_47682: ; if ((unsigned int )i <= 3U) { goto ldv_47681; } else { } out: ; return (ret_val); } } s32 igb_cleanup_led(struct e1000_hw *hw ) { u8 *hw_addr ; u8 *__var ; long tmp ; { __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(hw->mac.ledctl_default, (void volatile *)hw_addr + 3584U); } else { } return (0); } } s32 igb_blink_led(struct e1000_hw *hw ) { u32 ledctl_blink ; u32 i ; u32 mode ; u32 led_default ; u8 *hw_addr ; u8 *__var ; long tmp ; { ledctl_blink = 0U; if ((unsigned int )hw->phy.media_type == 2U) { ledctl_blink = 142U; } else { ledctl_blink = hw->mac.ledctl_mode2; i = 0U; goto ldv_47698; ldv_47697: mode = (hw->mac.ledctl_mode2 >> (int )i) & 15U; led_default = hw->mac.ledctl_default >> (int )i; if (((led_default & 64U) == 0U && mode == 14U) || ((led_default & 64U) != 0U && mode == 15U)) { ledctl_blink = (u32 )(~ (15 << (int )i)) & ledctl_blink; ledctl_blink = (u32 )(142 << (int )i) | ledctl_blink; } else { } i = i + 8U; ldv_47698: ; if (i <= 31U) { goto ldv_47697; } else { } } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(ledctl_blink, (void volatile *)hw_addr + 3584U); } else { } return (0); } } s32 igb_led_off(struct e1000_hw *hw ) { u8 *hw_addr ; u8 *__var ; long tmp ; { switch ((unsigned int )hw->phy.media_type) { case 1U: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(hw->mac.ledctl_mode1, (void volatile *)hw_addr + 3584U); } else { } goto ldv_47710; default: ; goto ldv_47710; } ldv_47710: ; return (0); } } s32 igb_disable_pcie_master(struct e1000_hw *hw ) { u32 ctrl ; s32 timeout ; s32 ret_val ; u8 *hw_addr ; u8 *__var ; long tmp ; u32 tmp___0 ; struct _ddebug descriptor ; struct net_device *tmp___1 ; long tmp___2 ; { timeout = 800; ret_val = 0; if ((unsigned int )hw->bus.type != 3U) { goto out; } else { } ctrl = igb_rd32(hw, 0U); ctrl = ctrl | 4U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(ctrl, (void volatile *)hw_addr); } else { } goto ldv_47724; ldv_47723: tmp___0 = igb_rd32(hw, 8U); if ((tmp___0 & 524288U) == 0U) { goto ldv_47722; } else { } __const_udelay(429500UL); timeout = timeout - 1; ldv_47724: ; if (timeout != 0) { goto ldv_47723; } else { } ldv_47722: ; if (timeout == 0) { descriptor.modname = "igb"; descriptor.function = "igb_disable_pcie_master"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "Master requests are pending.\n"; descriptor.lineno = 1492U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___1, "Master requests are pending.\n"); } else { } ret_val = -10; goto out; } else { } out: ; return (ret_val); } } s32 igb_validate_mdi_setting(struct e1000_hw *hw ) { s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { ret_val = 0; if ((unsigned int )hw->mac.type > 2U) { goto out; } else { } if (! hw->mac.autoneg && ((unsigned int )hw->phy.mdix == 0U || (unsigned int )hw->phy.mdix == 3U)) { descriptor.modname = "igb"; descriptor.function = "igb_validate_mdi_setting"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "Invalid MDI setting detected\n"; descriptor.lineno = 1517U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Invalid MDI setting detected\n"); } else { } hw->phy.mdix = 1U; ret_val = -3; goto out; } else { } out: ; return (ret_val); } } s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw , u32 reg , u32 offset , u8 data ) { u32 i ; u32 regvalue ; s32 ret_val ; u8 *hw_addr ; u8 *__var ; long tmp ; struct _ddebug descriptor ; struct net_device *tmp___0 ; long tmp___1 ; { regvalue = 0U; ret_val = 0; regvalue = (unsigned int )data | (offset << 8); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(regvalue, (void volatile *)hw_addr + (unsigned long )reg); } else { } i = 0U; goto ldv_47748; ldv_47747: __const_udelay(21475UL); regvalue = igb_rd32(hw, reg); if ((int )regvalue < 0) { goto ldv_47746; } else { } i = i + 1U; ldv_47748: ; if (i <= 639U) { goto ldv_47747; } else { } ldv_47746: ; if ((int )regvalue >= 0) { descriptor.modname = "igb"; descriptor.function = "igb_write_8bit_ctrl_reg"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_mac.c"; descriptor.format = "Reg %08x did not indicate ready\n"; descriptor.lineno = 1556U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___0, "Reg %08x did not indicate ready\n", reg); } else { } ret_val = -2; goto out; } else { } out: ; return (ret_val); } } bool igb_enable_mng_pass_thru(struct e1000_hw *hw ) { u32 manc ; u32 fwsm ; u32 factps ; bool ret_val ; { ret_val = 0; if (! hw->mac.asf_firmware_present) { goto out; } else { } manc = igb_rd32(hw, 22560U); if ((manc & 131072U) == 0U) { goto out; } else { } if ((int )hw->mac.arc_subsystem_valid) { fwsm = igb_rd32(hw, 23380U); factps = igb_rd32(hw, 23344U); if ((factps & 536870912U) == 0U && (fwsm & 14U) == 4U) { ret_val = 1; goto out; } else { } } else if ((int )manc & 1 && (manc & 2U) == 0U) { ret_val = 1; goto out; } else { } out: ; return (ret_val); } } bool ldv_queue_work_on_157(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_158(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_159(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_160(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_161(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_162(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_163(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_164(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_165(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_166(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_167(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_168(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; int ldv_mutex_trylock_193(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_191(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_194(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_195(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_190(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_192(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_196(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_185(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_187(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_186(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_189(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_188(struct workqueue_struct *ldv_func_arg1 ) ; extern void __udelay(unsigned long ) ; s32 igb_read_invm_version(struct e1000_hw *hw , struct e1000_fw_version *invm_ver ) ; static void igb_raise_eec_clk(struct e1000_hw *hw , u32 *eecd ) { u8 *hw_addr ; u8 *__var ; long tmp ; { *eecd = *eecd | 1U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(*eecd, (void volatile *)hw_addr + 16U); } else { } igb_rd32(hw, 8U); __udelay((unsigned long )hw->nvm.delay_usec); return; } } static void igb_lower_eec_clk(struct e1000_hw *hw , u32 *eecd ) { u8 *hw_addr ; u8 *__var ; long tmp ; { *eecd = *eecd & 4294967294U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(*eecd, (void volatile *)hw_addr + 16U); } else { } igb_rd32(hw, 8U); __udelay((unsigned long )hw->nvm.delay_usec); return; } } static void igb_shift_out_eec_bits(struct e1000_hw *hw , u16 data , u16 count ) { struct e1000_nvm_info *nvm ; u32 eecd ; u32 tmp ; u32 mask ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; { nvm = & hw->nvm; tmp = igb_rd32(hw, 16U); eecd = tmp; mask = (u32 )(1 << ((int )count + -1)); if ((unsigned int )nvm->type == 2U) { eecd = eecd | 8U; } else { } ldv_43854: eecd = eecd & 4294967291U; if (((u32 )data & mask) != 0U) { eecd = eecd | 4U; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(eecd, (void volatile *)hw_addr + 16U); } else { } igb_rd32(hw, 8U); __udelay((unsigned long )nvm->delay_usec); igb_raise_eec_clk(hw, & eecd); igb_lower_eec_clk(hw, & eecd); mask = mask >> 1; if (mask != 0U) { goto ldv_43854; } else { } eecd = eecd & 4294967291U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(eecd, (void volatile *)hw_addr___0 + 16U); } else { } return; } } static u16 igb_shift_in_eec_bits(struct e1000_hw *hw , u16 count ) { u32 eecd ; u32 i ; u16 data ; { eecd = igb_rd32(hw, 16U); eecd = eecd & 4294967283U; data = 0U; i = 0U; goto ldv_43867; ldv_43866: data = (int )data << 1U; igb_raise_eec_clk(hw, & eecd); eecd = igb_rd32(hw, 16U); eecd = eecd & 4294967291U; if ((eecd & 8U) != 0U) { data = (u16 )((unsigned int )data | 1U); } else { } igb_lower_eec_clk(hw, & eecd); i = i + 1U; ldv_43867: ; if ((u32 )count > i) { goto ldv_43866; } else { } return (data); } } static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw , int ee_reg ) { u32 attempts ; u32 i ; u32 reg ; s32 ret_val ; { attempts = 100000U; reg = 0U; ret_val = -1; i = 0U; goto ldv_43879; ldv_43878: ; if (ee_reg == 0) { reg = igb_rd32(hw, 20U); } else { reg = igb_rd32(hw, 4140U); } if ((reg & 2U) != 0U) { ret_val = 0; goto ldv_43877; } else { } __const_udelay(21475UL); i = i + 1U; ldv_43879: ; if (i < attempts) { goto ldv_43878; } else { } ldv_43877: ; return (ret_val); } } s32 igb_acquire_nvm(struct e1000_hw *hw ) { u32 eecd ; u32 tmp ; s32 timeout ; s32 ret_val ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; struct _ddebug descriptor ; struct net_device *tmp___2 ; long tmp___3 ; { tmp = igb_rd32(hw, 16U); eecd = tmp; timeout = 1000; ret_val = 0; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(eecd | 64U, (void volatile *)hw_addr + 16U); } else { } eecd = igb_rd32(hw, 16U); goto ldv_43891; ldv_43890: ; if ((eecd & 128U) != 0U) { goto ldv_43889; } else { } __const_udelay(21475UL); eecd = igb_rd32(hw, 16U); timeout = timeout - 1; ldv_43891: ; if (timeout != 0) { goto ldv_43890; } else { } ldv_43889: ; if (timeout == 0) { eecd = eecd & 4294967231U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(eecd, (void volatile *)hw_addr___0 + 16U); } else { } descriptor.modname = "igb"; descriptor.function = "igb_acquire_nvm"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor.format = "Could not acquire NVM grant\n"; descriptor.lineno = 198U; descriptor.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___2, "Could not acquire NVM grant\n"); } else { } ret_val = -1; } else { } return (ret_val); } } static void igb_standby_nvm(struct e1000_hw *hw ) { struct e1000_nvm_info *nvm ; u32 eecd ; u32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; { nvm = & hw->nvm; tmp = igb_rd32(hw, 16U); eecd = tmp; if ((unsigned int )nvm->type == 2U) { eecd = eecd | 2U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(eecd, (void volatile *)hw_addr + 16U); } else { } igb_rd32(hw, 8U); __udelay((unsigned long )nvm->delay_usec); eecd = eecd & 4294967293U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(eecd, (void volatile *)hw_addr___0 + 16U); } else { } igb_rd32(hw, 8U); __udelay((unsigned long )nvm->delay_usec); } else { } return; } } static void e1000_stop_nvm(struct e1000_hw *hw ) { u32 eecd ; { eecd = igb_rd32(hw, 16U); if ((unsigned int )hw->nvm.type == 2U) { eecd = eecd | 2U; igb_lower_eec_clk(hw, & eecd); } else { } return; } } void igb_release_nvm(struct e1000_hw *hw ) { u32 eecd ; u8 *hw_addr ; u8 *__var ; long tmp ; { e1000_stop_nvm(hw); eecd = igb_rd32(hw, 16U); eecd = eecd & 4294967231U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(eecd, (void volatile *)hw_addr + 16U); } else { } return; } } static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw ) { struct e1000_nvm_info *nvm ; u32 eecd ; u32 tmp ; s32 ret_val ; u16 timeout ; u8 spi_stat_reg ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u16 tmp___1 ; struct _ddebug descriptor ; struct net_device *tmp___2 ; long tmp___3 ; { nvm = & hw->nvm; tmp = igb_rd32(hw, 16U); eecd = tmp; ret_val = 0; timeout = 0U; if ((unsigned int )nvm->type == 2U) { eecd = eecd & 4294967292U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(eecd, (void volatile *)hw_addr + 16U); } else { } igb_rd32(hw, 8U); __const_udelay(4295UL); timeout = 5000U; goto ldv_43932; ldv_43931: igb_shift_out_eec_bits(hw, 5, (int )hw->nvm.opcode_bits); tmp___1 = igb_shift_in_eec_bits(hw, 8); spi_stat_reg = (unsigned char )tmp___1; if (((int )spi_stat_reg & 1) == 0) { goto ldv_43930; } else { } __const_udelay(21475UL); igb_standby_nvm(hw); timeout = (u16 )((int )timeout - 1); ldv_43932: ; if ((unsigned int )timeout != 0U) { goto ldv_43931; } else { } ldv_43930: ; if ((unsigned int )timeout == 0U) { descriptor.modname = "igb"; descriptor.function = "igb_ready_nvm_eeprom"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor.format = "SPI NVM Status error\n"; descriptor.lineno = 305U; descriptor.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___2, "SPI NVM Status error\n"); } else { } ret_val = -1; goto out; } else { } } else { } out: ; return (ret_val); } } s32 igb_read_nvm_spi(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) { struct e1000_nvm_info *nvm ; u32 i ; s32 ret_val ; u16 word_in ; u8 read_opcode ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { nvm = & hw->nvm; i = 0U; read_opcode = 3U; if (((int )nvm->word_size <= (int )offset || (int )words > (int )nvm->word_size - (int )offset) || (unsigned int )words == 0U) { descriptor.modname = "igb"; descriptor.function = "igb_read_nvm_spi"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor.format = "nvm parameter(s) out of bounds\n"; descriptor.lineno = 337U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "nvm parameter(s) out of bounds\n"); } else { } ret_val = -1; goto out; } else { } ret_val = (*(nvm->ops.acquire))(hw); if (ret_val != 0) { goto out; } else { } ret_val = igb_ready_nvm_eeprom(hw); if (ret_val != 0) { goto release; } else { } igb_standby_nvm(hw); if ((unsigned int )nvm->address_bits == 8U && (unsigned int )offset > 127U) { read_opcode = (u8 )((unsigned int )read_opcode | 8U); } else { } igb_shift_out_eec_bits(hw, (int )read_opcode, (int )nvm->opcode_bits); igb_shift_out_eec_bits(hw, (int )((unsigned int )offset * 2U), (int )nvm->address_bits); i = 0U; goto ldv_43952; ldv_43951: word_in = igb_shift_in_eec_bits(hw, 16); *(data + (unsigned long )i) = (u16 )((int )((short )((int )word_in >> 8)) | (int )((short )((int )word_in << 8))); i = i + 1U; ldv_43952: ; if ((u32 )words > i) { goto ldv_43951; } else { } release: (*(nvm->ops.release))(hw); out: ; return (ret_val); } } s32 igb_read_nvm_eerd(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) { struct e1000_nvm_info *nvm ; u32 i ; u32 eerd ; s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; u32 tmp___2 ; { nvm = & hw->nvm; eerd = 0U; ret_val = 0; if (((int )nvm->word_size <= (int )offset || (int )words > (int )nvm->word_size - (int )offset) || (unsigned int )words == 0U) { descriptor.modname = "igb"; descriptor.function = "igb_read_nvm_eerd"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor.format = "nvm parameter(s) out of bounds\n"; descriptor.lineno = 395U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "nvm parameter(s) out of bounds\n"); } else { } ret_val = -1; goto out; } else { } i = 0U; goto ldv_43972; ldv_43971: eerd = (((u32 )offset + i) << 2) + 1U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(eerd, (void volatile *)hw_addr + 20U); } else { } ret_val = igb_poll_eerd_eewr_done(hw, 0); if (ret_val != 0) { goto ldv_43970; } else { } tmp___2 = igb_rd32(hw, 20U); *(data + (unsigned long )i) = (u16 )(tmp___2 >> 16); i = i + 1U; ldv_43972: ; if ((u32 )words > i) { goto ldv_43971; } else { } ldv_43970: ; out: ; return (ret_val); } } s32 igb_write_nvm_spi(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) { struct e1000_nvm_info *nvm ; s32 ret_val ; u16 widx ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u8 write_opcode ; u16 word_out ; { nvm = & hw->nvm; ret_val = -1; widx = 0U; if (((int )nvm->word_size <= (int )offset || (int )words > (int )nvm->word_size - (int )offset) || (unsigned int )words == 0U) { descriptor.modname = "igb"; descriptor.function = "igb_write_nvm_spi"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor.format = "nvm parameter(s) out of bounds\n"; descriptor.lineno = 440U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "nvm parameter(s) out of bounds\n"); } else { } return (ret_val); } else { } goto ldv_43990; ldv_43989: write_opcode = 2U; ret_val = (*(nvm->ops.acquire))(hw); if (ret_val != 0) { return (ret_val); } else { } ret_val = igb_ready_nvm_eeprom(hw); if (ret_val != 0) { (*(nvm->ops.release))(hw); return (ret_val); } else { } igb_standby_nvm(hw); igb_shift_out_eec_bits(hw, 6, (int )nvm->opcode_bits); igb_standby_nvm(hw); if ((unsigned int )nvm->address_bits == 8U && (unsigned int )offset > 127U) { write_opcode = (u8 )((unsigned int )write_opcode | 8U); } else { } igb_shift_out_eec_bits(hw, (int )write_opcode, (int )nvm->opcode_bits); igb_shift_out_eec_bits(hw, (int )((unsigned int )((unsigned short )((int )offset + (int )widx)) * 2U), (int )nvm->address_bits); goto ldv_43988; ldv_43987: word_out = *(data + (unsigned long )widx); word_out = (u16 )((int )((short )((int )word_out >> 8)) | (int )((short )((int )word_out << 8))); igb_shift_out_eec_bits(hw, (int )word_out, 16); widx = (u16 )((int )widx + 1); if ((((int )offset + (int )widx) * 2) % (int )nvm->page_size == 0) { igb_standby_nvm(hw); goto ldv_43986; } else { } ldv_43988: ; if ((int )widx < (int )words) { goto ldv_43987; } else { } ldv_43986: usleep_range(1000UL, 2000UL); (*(nvm->ops.release))(hw); ldv_43990: ; if ((int )widx < (int )words) { goto ldv_43989; } else { } return (ret_val); } } s32 igb_read_part_string(struct e1000_hw *hw , u8 *part_num , u32 part_num_size ) { s32 ret_val ; u16 nvm_data ; u16 pointer ; u16 offset ; u16 length ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___2 ; struct net_device *tmp___5 ; long tmp___6 ; struct _ddebug descriptor___3 ; struct net_device *tmp___7 ; long tmp___8 ; struct _ddebug descriptor___4 ; struct net_device *tmp___9 ; long tmp___10 ; struct _ddebug descriptor___5 ; struct net_device *tmp___11 ; long tmp___12 ; struct _ddebug descriptor___6 ; struct net_device *tmp___13 ; long tmp___14 ; struct _ddebug descriptor___7 ; struct net_device *tmp___15 ; long tmp___16 ; { if ((unsigned long )part_num == (unsigned long )((u8 *)0U)) { descriptor.modname = "igb"; descriptor.function = "igb_read_part_string"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor.format = "PBA string buffer was null\n"; descriptor.lineno = 514U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "PBA string buffer was null\n"); } else { } ret_val = 16; goto out; } else { } ret_val = (*(hw->nvm.ops.read))(hw, 8, 1, & nvm_data); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_read_part_string"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___0.format = "NVM Read Error\n"; descriptor___0.lineno = 521U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "NVM Read Error\n"); } else { } goto out; } else { } ret_val = (*(hw->nvm.ops.read))(hw, 9, 1, & pointer); if (ret_val != 0) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_read_part_string"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___1.format = "NVM Read Error\n"; descriptor___1.lineno = 527U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "NVM Read Error\n"); } else { } goto out; } else { } if ((unsigned int )nvm_data != 64250U) { descriptor___2.modname = "igb"; descriptor___2.function = "igb_read_part_string"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___2.format = "NVM PBA number is not stored as string\n"; descriptor___2.lineno = 536U; descriptor___2.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___5, "NVM PBA number is not stored as string\n"); } else { } if (part_num_size <= 10U) { descriptor___3.modname = "igb"; descriptor___3.function = "igb_read_part_string"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___3.format = "PBA string buffer too small\n"; descriptor___3.lineno = 540U; descriptor___3.flags = 0U; tmp___8 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___8 != 0L) { tmp___7 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)tmp___7, "PBA string buffer too small\n"); } else { } return (17); } else { } *part_num = (unsigned int )((u8 )((int )nvm_data >> 12)) & 15U; *(part_num + 1UL) = (unsigned int )((u8 )((int )nvm_data >> 8)) & 15U; *(part_num + 2UL) = (unsigned int )((u8 )((int )nvm_data >> 4)) & 15U; *(part_num + 3UL) = (unsigned int )((u8 )nvm_data) & 15U; *(part_num + 4UL) = (unsigned int )((u8 )((int )pointer >> 12)) & 15U; *(part_num + 5UL) = (unsigned int )((u8 )((int )pointer >> 8)) & 15U; *(part_num + 6UL) = 45U; *(part_num + 7UL) = 0U; *(part_num + 8UL) = (unsigned int )((u8 )((int )pointer >> 4)) & 15U; *(part_num + 9UL) = (unsigned int )((u8 )pointer) & 15U; *(part_num + 10UL) = 0U; offset = 0U; goto ldv_44010; ldv_44009: ; if ((unsigned int )*(part_num + (unsigned long )offset) <= 9U) { *(part_num + (unsigned long )offset) = (unsigned int )*(part_num + (unsigned long )offset) + 48U; } else if ((unsigned int )*(part_num + (unsigned long )offset) <= 15U) { *(part_num + (unsigned long )offset) = (unsigned int )*(part_num + (unsigned long )offset) + 55U; } else { } offset = (u16 )((int )offset + 1); ldv_44010: ; if ((unsigned int )offset <= 9U) { goto ldv_44009; } else { } goto out; } else { } ret_val = (*(hw->nvm.ops.read))(hw, (int )pointer, 1, & length); if (ret_val != 0) { descriptor___4.modname = "igb"; descriptor___4.function = "igb_read_part_string"; descriptor___4.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___4.format = "NVM Read Error\n"; descriptor___4.lineno = 572U; descriptor___4.flags = 0U; tmp___10 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___10 != 0L) { tmp___9 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___4, (struct net_device const *)tmp___9, "NVM Read Error\n"); } else { } goto out; } else { } if ((unsigned int )length == 65535U || (unsigned int )length == 0U) { descriptor___5.modname = "igb"; descriptor___5.function = "igb_read_part_string"; descriptor___5.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___5.format = "NVM PBA number section invalid length\n"; descriptor___5.lineno = 577U; descriptor___5.flags = 0U; tmp___12 = ldv__builtin_expect((long )descriptor___5.flags & 1L, 0L); if (tmp___12 != 0L) { tmp___11 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___5, (struct net_device const *)tmp___11, "NVM PBA number section invalid length\n"); } else { } ret_val = 18; goto out; } else { } if ((unsigned int )length * 2U - 1U > part_num_size) { descriptor___6.modname = "igb"; descriptor___6.function = "igb_read_part_string"; descriptor___6.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___6.format = "PBA string buffer too small\n"; descriptor___6.lineno = 583U; descriptor___6.flags = 0U; tmp___14 = ldv__builtin_expect((long )descriptor___6.flags & 1L, 0L); if (tmp___14 != 0L) { tmp___13 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___6, (struct net_device const *)tmp___13, "PBA string buffer too small\n"); } else { } ret_val = 17; goto out; } else { } pointer = (u16 )((int )pointer + 1); length = (u16 )((int )length - 1); offset = 0U; goto ldv_44017; ldv_44016: ret_val = (*(hw->nvm.ops.read))(hw, (int )pointer + (int )offset, 1, & nvm_data); if (ret_val != 0) { descriptor___7.modname = "igb"; descriptor___7.function = "igb_read_part_string"; descriptor___7.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___7.format = "NVM Read Error\n"; descriptor___7.lineno = 595U; descriptor___7.flags = 0U; tmp___16 = ldv__builtin_expect((long )descriptor___7.flags & 1L, 0L); if (tmp___16 != 0L) { tmp___15 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___7, (struct net_device const *)tmp___15, "NVM Read Error\n"); } else { } goto out; } else { } *(part_num + (unsigned long )((int )offset * 2)) = (unsigned char )((int )nvm_data >> 8); *(part_num + ((unsigned long )((int )offset * 2) + 1UL)) = (unsigned char )nvm_data; offset = (u16 )((int )offset + 1); ldv_44017: ; if ((int )offset < (int )length) { goto ldv_44016; } else { } *(part_num + (unsigned long )((int )offset * 2)) = 0U; out: ; return (ret_val); } } s32 igb_read_mac_addr(struct e1000_hw *hw ) { u32 rar_high ; u32 rar_low ; u16 i ; { rar_high = igb_rd32(hw, 21508U); rar_low = igb_rd32(hw, 21504U); i = 0U; goto ldv_44026; ldv_44025: hw->mac.perm_addr[(int )i] = (unsigned char )(rar_low >> (int )i * 8); i = (u16 )((int )i + 1); ldv_44026: ; if ((unsigned int )i <= 3U) { goto ldv_44025; } else { } i = 0U; goto ldv_44029; ldv_44028: hw->mac.perm_addr[(int )i + 4] = (unsigned char )(rar_high >> (int )i * 8); i = (u16 )((int )i + 1); ldv_44029: ; if ((unsigned int )i <= 1U) { goto ldv_44028; } else { } i = 0U; goto ldv_44032; ldv_44031: hw->mac.addr[(int )i] = hw->mac.perm_addr[(int )i]; i = (u16 )((int )i + 1); ldv_44032: ; if ((unsigned int )i <= 5U) { goto ldv_44031; } else { } return (0); } } s32 igb_validate_nvm_checksum(struct e1000_hw *hw ) { s32 ret_val ; u16 checksum ; u16 i ; u16 nvm_data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; { ret_val = 0; checksum = 0U; i = 0U; goto ldv_44045; ldv_44044: ret_val = (*(hw->nvm.ops.read))(hw, (int )i, 1, & nvm_data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_validate_nvm_checksum"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor.format = "NVM Read Error\n"; descriptor.lineno = 652U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error\n"); } else { } goto out; } else { } checksum = (int )checksum + (int )nvm_data; i = (u16 )((int )i + 1); ldv_44045: ; if ((unsigned int )i <= 63U) { goto ldv_44044; } else { } if ((unsigned int )checksum != 47802U) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_validate_nvm_checksum"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___0.format = "NVM Checksum Invalid\n"; descriptor___0.lineno = 659U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "NVM Checksum Invalid\n"); } else { } ret_val = -1; goto out; } else { } out: ; return (ret_val); } } s32 igb_update_nvm_checksum(struct e1000_hw *hw ) { s32 ret_val ; u16 checksum ; u16 i ; u16 nvm_data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; { checksum = 0U; i = 0U; goto ldv_44059; ldv_44058: ret_val = (*(hw->nvm.ops.read))(hw, (int )i, 1, & nvm_data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_update_nvm_checksum"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor.format = "NVM Read Error while updating checksum.\n"; descriptor.lineno = 685U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error while updating checksum.\n"); } else { } goto out; } else { } checksum = (int )checksum + (int )nvm_data; i = (u16 )((int )i + 1); ldv_44059: ; if ((unsigned int )i <= 62U) { goto ldv_44058; } else { } checksum = 47802U - (unsigned int )checksum; ret_val = (*(hw->nvm.ops.write))(hw, 63, 1, & checksum); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_update_nvm_checksum"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_nvm.c"; descriptor___0.format = "NVM Write Error while updating checksum.\n"; descriptor___0.lineno = 693U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "NVM Write Error while updating checksum.\n"); } else { } } else { } out: ; return (ret_val); } } void igb_get_fw_version(struct e1000_hw *hw , struct e1000_fw_version *fw_vers ) { u16 eeprom_verh ; u16 eeprom_verl ; u16 etrack_test ; u16 fw_version ; u8 q ; u8 hval ; u8 rem ; u8 result ; u16 comb_verh ; u16 comb_verl ; u16 comb_offset ; bool tmp ; int tmp___0 ; { memset((void *)fw_vers, 0, 20UL); (*(hw->nvm.ops.read))(hw, 67, 1, & etrack_test); switch ((unsigned int )hw->mac.type) { case 7U: igb_read_invm_version(hw, fw_vers); return; case 1U: ; case 2U: ; case 3U: ; if (((int )etrack_test & 61440) != 32768) { (*(hw->nvm.ops.read))(hw, 5, 1, & fw_version); fw_vers->eep_major = (int )fw_version >> 12; fw_vers->eep_minor = (u16 )(((int )fw_version & 4080) >> 4); fw_vers->eep_build = (unsigned int )fw_version & 15U; goto etrack_id; } else { } goto ldv_44082; case 6U: tmp = igb_get_flash_presence_i210(hw); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { igb_read_invm_version(hw, fw_vers); return; } else { } case 4U: (*(hw->nvm.ops.read))(hw, 61, 1, & comb_offset); if ((unsigned int )comb_offset != 0U && (unsigned int )comb_offset != 65535U) { (*(hw->nvm.ops.read))(hw, (int )((unsigned int )comb_offset + 132U), 1, & comb_verh); (*(hw->nvm.ops.read))(hw, (int )((unsigned int )comb_offset + 131U), 1, & comb_verl); if (((unsigned int )comb_verh != 0U && (unsigned int )comb_verl != 0U) && ((unsigned int )comb_verh != 65535U && (unsigned int )comb_verl != 65535U)) { fw_vers->or_valid = 1; fw_vers->or_major = (u16 )((int )comb_verl >> 8); fw_vers->or_build = (u16 )((int )((short )((int )comb_verl << 8)) | (int )((short )((int )comb_verh >> 8))); fw_vers->or_patch = (unsigned int )comb_verh & 255U; } else { } } else { } goto ldv_44082; default: ; return; } ldv_44082: (*(hw->nvm.ops.read))(hw, 5, 1, & fw_version); fw_vers->eep_major = (int )fw_version >> 12; if (((int )fw_version & 3840) == 0) { eeprom_verl = (unsigned int )fw_version & 255U; } else { eeprom_verl = (u16 )(((int )fw_version & 4080) >> 4); } q = (u8 )((unsigned int )eeprom_verl / 16U); hval = (unsigned int )q * 10U; rem = (unsigned int )((u8 )eeprom_verl) & 15U; result = (int )hval + (int )rem; fw_vers->eep_minor = (u16 )result; etrack_id: ; if (((int )etrack_test & 61440) == 32768) { (*(hw->nvm.ops.read))(hw, 66, 1, & eeprom_verl); (*(hw->nvm.ops.read))(hw, 67, 1, & eeprom_verh); fw_vers->etrack_id = (u32 )(((int )eeprom_verh << 16) | (int )eeprom_verl); } else { } return; } } bool ldv_queue_work_on_185(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_186(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_187(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_188(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_189(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_190(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_191(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_192(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_193(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_194(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_195(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_196(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; int ldv_mutex_trylock_221(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_219(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_222(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_223(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_218(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_220(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_224(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_213(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_215(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_214(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_217(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_216(struct workqueue_struct *ldv_func_arg1 ) ; static s32 igb_phy_setup_autoneg(struct e1000_hw *hw ) ; static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw , u16 *phy_ctrl ) ; static s32 igb_wait_autoneg(struct e1000_hw *hw ) ; static s32 igb_set_master_slave_mode(struct e1000_hw *hw ) ; static u16 const e1000_m88_cable_length_table[7U] = { 0U, 50U, 80U, 110U, 140U, 140U, 255U}; static u16 const e1000_igp_2_cable_length_table[113U] = { 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 3U, 5U, 8U, 11U, 13U, 16U, 18U, 21U, 0U, 0U, 0U, 3U, 6U, 10U, 13U, 16U, 19U, 23U, 26U, 29U, 32U, 35U, 38U, 41U, 6U, 10U, 14U, 18U, 22U, 26U, 30U, 33U, 37U, 41U, 44U, 48U, 51U, 54U, 58U, 61U, 21U, 26U, 31U, 35U, 40U, 44U, 49U, 53U, 57U, 61U, 65U, 68U, 72U, 75U, 79U, 82U, 40U, 45U, 51U, 56U, 61U, 66U, 70U, 75U, 79U, 83U, 87U, 91U, 94U, 98U, 101U, 104U, 60U, 66U, 72U, 77U, 82U, 87U, 92U, 96U, 100U, 104U, 108U, 111U, 114U, 117U, 119U, 121U, 83U, 89U, 95U, 100U, 105U, 109U, 113U, 116U, 119U, 122U, 124U, 104U, 109U, 114U, 118U, 121U, 124U}; s32 igb_check_reset_block(struct e1000_hw *hw ) { u32 manc ; { manc = igb_rd32(hw, 22560U); return ((manc & 262144U) != 0U ? 12 : 0); } } s32 igb_get_phy_id(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_id ; { phy = & hw->phy; ret_val = 0; ret_val = (*(phy->ops.read_reg))(hw, 2U, & phy_id); if (ret_val != 0) { goto out; } else { } phy->id = (unsigned int )((int )phy_id << 16); __const_udelay(85900UL); ret_val = (*(phy->ops.read_reg))(hw, 3U, & phy_id); if (ret_val != 0) { goto out; } else { } phy->id = phy->id | ((u32 )phy_id & 4294967280U); phy->revision = (unsigned int )phy_id & 15U; out: ; return (ret_val); } } static s32 igb_phy_reset_dsp(struct e1000_hw *hw ) { s32 ret_val ; { ret_val = 0; if ((unsigned long )hw->phy.ops.write_reg == (unsigned long )((s32 (*)(struct e1000_hw * , u32 , u16 ))0)) { goto out; } else { } ret_val = (*(hw->phy.ops.write_reg))(hw, 30U, 193); if (ret_val != 0) { goto out; } else { } ret_val = (*(hw->phy.ops.write_reg))(hw, 30U, 0); out: ; return (ret_val); } } s32 igb_read_phy_reg_mdic(struct e1000_hw *hw , u32 offset , u16 *data ) { struct e1000_phy_info *phy ; u32 i ; u32 mdic ; s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; struct _ddebug descriptor___0 ; struct net_device *tmp___2 ; long tmp___3 ; struct _ddebug descriptor___1 ; struct net_device *tmp___4 ; long tmp___5 ; { phy = & hw->phy; mdic = 0U; ret_val = 0; if (offset > 31U) { descriptor.modname = "igb"; descriptor.function = "igb_read_phy_reg_mdic"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "PHY Address %d is out of range\n"; descriptor.lineno = 142U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "PHY Address %d is out of range\n", offset); } else { } ret_val = -4; goto out; } else { } mdic = ((offset << 16) | (phy->addr << 21)) | 134217728U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(mdic, (void volatile *)hw_addr + 32U); } else { } i = 0U; goto ldv_43873; ldv_43872: __const_udelay(214750UL); mdic = igb_rd32(hw, 32U); if ((mdic & 268435456U) != 0U) { goto ldv_43871; } else { } i = i + 1U; ldv_43873: ; if (i <= 1919U) { goto ldv_43872; } else { } ldv_43871: ; if ((mdic & 268435456U) == 0U) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_read_phy_reg_mdic"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "MDI Read did not complete\n"; descriptor___0.lineno = 168U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___2, "MDI Read did not complete\n"); } else { } ret_val = -2; goto out; } else { } if ((mdic & 1073741824U) != 0U) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_read_phy_reg_mdic"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "MDI Error\n"; descriptor___1.lineno = 173U; descriptor___1.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___5 != 0L) { tmp___4 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___4, "MDI Error\n"); } else { } ret_val = -2; goto out; } else { } *data = (unsigned short )mdic; out: ; return (ret_val); } } s32 igb_write_phy_reg_mdic(struct e1000_hw *hw , u32 offset , u16 data ) { struct e1000_phy_info *phy ; u32 i ; u32 mdic ; s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; struct _ddebug descriptor___0 ; struct net_device *tmp___2 ; long tmp___3 ; struct _ddebug descriptor___1 ; struct net_device *tmp___4 ; long tmp___5 ; { phy = & hw->phy; mdic = 0U; ret_val = 0; if (offset > 31U) { descriptor.modname = "igb"; descriptor.function = "igb_write_phy_reg_mdic"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "PHY Address %d is out of range\n"; descriptor.lineno = 198U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "PHY Address %d is out of range\n", offset); } else { } ret_val = -4; goto out; } else { } mdic = (((unsigned int )data | (offset << 16)) | (phy->addr << 21)) | 67108864U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(mdic, (void volatile *)hw_addr + 32U); } else { } i = 0U; goto ldv_43893; ldv_43892: __const_udelay(214750UL); mdic = igb_rd32(hw, 32U); if ((mdic & 268435456U) != 0U) { goto ldv_43891; } else { } i = i + 1U; ldv_43893: ; if (i <= 1919U) { goto ldv_43892; } else { } ldv_43891: ; if ((mdic & 268435456U) == 0U) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_write_phy_reg_mdic"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "MDI Write did not complete\n"; descriptor___0.lineno = 225U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___2, "MDI Write did not complete\n"); } else { } ret_val = -2; goto out; } else { } if ((mdic & 1073741824U) != 0U) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_write_phy_reg_mdic"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "MDI Error\n"; descriptor___1.lineno = 230U; descriptor___1.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___5 != 0L) { tmp___4 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___4, "MDI Error\n"); } else { } ret_val = -2; goto out; } else { } out: ; return (ret_val); } } s32 igb_read_phy_reg_i2c(struct e1000_hw *hw , u32 offset , u16 *data ) { struct e1000_phy_info *phy ; u32 i ; u32 i2ccmd ; u8 *hw_addr ; u8 *__var ; long tmp ; struct _ddebug descriptor ; struct net_device *tmp___0 ; long tmp___1 ; struct _ddebug descriptor___0 ; struct net_device *tmp___2 ; long tmp___3 ; { phy = & hw->phy; i2ccmd = 0U; i2ccmd = ((offset << 16) | (phy->addr << 24)) | 134217728U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(i2ccmd, (void volatile *)hw_addr + 4136U); } else { } i = 0U; goto ldv_43909; ldv_43908: __const_udelay(214750UL); i2ccmd = igb_rd32(hw, 4136U); if ((i2ccmd & 536870912U) != 0U) { goto ldv_43907; } else { } i = i + 1U; ldv_43909: ; if (i <= 199U) { goto ldv_43908; } else { } ldv_43907: ; if ((i2ccmd & 536870912U) == 0U) { descriptor.modname = "igb"; descriptor.function = "igb_read_phy_reg_i2c"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "I2CCMD Read did not complete\n"; descriptor.lineno = 271U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___0, "I2CCMD Read did not complete\n"); } else { } return (-2); } else { } if ((int )i2ccmd < 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_read_phy_reg_i2c"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "I2CCMD Error bit set\n"; descriptor___0.lineno = 275U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___2, "I2CCMD Error bit set\n"); } else { } return (-2); } else { } *data = ((unsigned int )((u16 )(i2ccmd >> 8)) & 255U) | (unsigned int )((int )((u16 )i2ccmd) << 8U); return (0); } } s32 igb_write_phy_reg_i2c(struct e1000_hw *hw , u32 offset , u16 data ) { struct e1000_phy_info *phy ; u32 i ; u32 i2ccmd ; u16 phy_data_swapped ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; struct _ddebug descriptor___0 ; struct net_device *tmp___2 ; long tmp___3 ; struct _ddebug descriptor___1 ; struct net_device *tmp___4 ; long tmp___5 ; { phy = & hw->phy; i2ccmd = 0U; if (hw->phy.addr == 0U || hw->phy.addr > 7U) { descriptor.modname = "igb"; descriptor.function = "igb_write_phy_reg_i2c"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "PHY I2C Address %d is out of range.\n"; descriptor.lineno = 302U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "PHY I2C Address %d is out of range.\n", hw->phy.addr); } else { } return (-3); } else { } phy_data_swapped = (u16 )((int )((short )((int )data >> 8)) | (int )((short )((int )data << 8))); i2ccmd = ((offset << 16) | (phy->addr << 24)) | (u32 )phy_data_swapped; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(i2ccmd, (void volatile *)hw_addr + 4136U); } else { } i = 0U; goto ldv_43929; ldv_43928: __const_udelay(214750UL); i2ccmd = igb_rd32(hw, 4136U); if ((i2ccmd & 536870912U) != 0U) { goto ldv_43927; } else { } i = i + 1U; ldv_43929: ; if (i <= 199U) { goto ldv_43928; } else { } ldv_43927: ; if ((i2ccmd & 536870912U) == 0U) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_write_phy_reg_i2c"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "I2CCMD Write did not complete\n"; descriptor___0.lineno = 328U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___2, "I2CCMD Write did not complete\n"); } else { } return (-2); } else { } if ((int )i2ccmd < 0) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_write_phy_reg_i2c"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "I2CCMD Error bit set\n"; descriptor___1.lineno = 332U; descriptor___1.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___5 != 0L) { tmp___4 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___4, "I2CCMD Error bit set\n"); } else { } return (-2); } else { } return (0); } } s32 igb_read_sfp_data_byte(struct e1000_hw *hw , u16 offset , u8 *data ) { u32 i ; u32 i2ccmd ; u32 data_local ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; struct _ddebug descriptor___0 ; struct net_device *tmp___2 ; long tmp___3 ; struct _ddebug descriptor___1 ; struct net_device *tmp___4 ; long tmp___5 ; { i = 0U; i2ccmd = 0U; data_local = 0U; if ((unsigned int )offset > 511U) { descriptor.modname = "igb"; descriptor.function = "igb_read_sfp_data_byte"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "I2CCMD command address exceeds upper limit\n"; descriptor.lineno = 359U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "I2CCMD command address exceeds upper limit\n"); } else { } return (-2); } else { } i2ccmd = (u32 )(((int )offset << 16) | 134217728); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(i2ccmd, (void volatile *)hw_addr + 4136U); } else { } i = 0U; goto ldv_43947; ldv_43946: __const_udelay(214750UL); data_local = igb_rd32(hw, 4136U); if ((data_local & 536870912U) != 0U) { goto ldv_43945; } else { } i = i + 1U; ldv_43947: ; if (i <= 199U) { goto ldv_43946; } else { } ldv_43945: ; if ((data_local & 536870912U) == 0U) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_read_sfp_data_byte"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "I2CCMD Read did not complete\n"; descriptor___0.lineno = 380U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___2, "I2CCMD Read did not complete\n"); } else { } return (-2); } else { } if ((int )data_local < 0) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_read_sfp_data_byte"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "I2CCMD Error bit set\n"; descriptor___1.lineno = 384U; descriptor___1.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___5 != 0L) { tmp___4 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___4, "I2CCMD Error bit set\n"); } else { } return (-2); } else { } *data = (u8 )data_local; return (0); } } s32 igb_read_phy_reg_igp(struct e1000_hw *hw , u32 offset , u16 *data ) { s32 ret_val ; { ret_val = 0; if ((unsigned long )hw->phy.ops.acquire == (unsigned long )((s32 (*)(struct e1000_hw * ))0)) { goto out; } else { } ret_val = (*(hw->phy.ops.acquire))(hw); if (ret_val != 0) { goto out; } else { } if (offset > 15U) { ret_val = igb_write_phy_reg_mdic(hw, 31U, (int )((unsigned short )offset)); if (ret_val != 0) { (*(hw->phy.ops.release))(hw); goto out; } else { } } else { } ret_val = igb_read_phy_reg_mdic(hw, offset & 31U, data); (*(hw->phy.ops.release))(hw); out: ; return (ret_val); } } s32 igb_write_phy_reg_igp(struct e1000_hw *hw , u32 offset , u16 data ) { s32 ret_val ; { ret_val = 0; if ((unsigned long )hw->phy.ops.acquire == (unsigned long )((s32 (*)(struct e1000_hw * ))0)) { goto out; } else { } ret_val = (*(hw->phy.ops.acquire))(hw); if (ret_val != 0) { goto out; } else { } if (offset > 15U) { ret_val = igb_write_phy_reg_mdic(hw, 31U, (int )((unsigned short )offset)); if (ret_val != 0) { (*(hw->phy.ops.release))(hw); goto out; } else { } } else { } ret_val = igb_write_phy_reg_mdic(hw, offset & 31U, (int )data); (*(hw->phy.ops.release))(hw); out: ; return (ret_val); } } s32 igb_copper_link_setup_82580(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { phy = & hw->phy; if ((int )phy->reset_disable) { ret_val = 0; goto out; } else { } if ((unsigned int )phy->type == 8U) { ret_val = (*(hw->phy.ops.reset))(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_copper_link_setup_82580"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Error resetting the PHY.\n"; descriptor.lineno = 491U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Error resetting the PHY.\n"); } else { } goto out; } else { } } else { } ret_val = (*(phy->ops.read_reg))(hw, 22U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (u16 )((unsigned int )phy_data | 32768U); phy_data = (u16 )((unsigned int )phy_data | 3072U); ret_val = (*(phy->ops.write_reg))(hw, 22U, (int )phy_data); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 18U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (unsigned int )phy_data & 63999U; switch ((int )hw->phy.mdix) { case 1: ; goto ldv_43974; case 2: phy_data = (u16 )((unsigned int )phy_data | 512U); goto ldv_43974; case 0: ; default: phy_data = (u16 )((unsigned int )phy_data | 1024U); goto ldv_43974; } ldv_43974: ret_val = (*(hw->phy.ops.write_reg))(hw, 18U, (int )phy_data); out: ; return (ret_val); } } s32 igb_copper_link_setup_m88(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { phy = & hw->phy; if ((int )phy->reset_disable) { ret_val = 0; goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 16U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (u16 )((unsigned int )phy_data | 2048U); phy_data = (unsigned int )phy_data & 65439U; switch ((int )phy->mdix) { case 1: phy_data = phy_data; goto ldv_43986; case 2: phy_data = (u16 )((unsigned int )phy_data | 32U); goto ldv_43986; case 3: phy_data = (u16 )((unsigned int )phy_data | 64U); goto ldv_43986; case 0: ; default: phy_data = (u16 )((unsigned int )phy_data | 96U); goto ldv_43986; } ldv_43986: phy_data = (unsigned int )phy_data & 65533U; if ((int )phy->disable_polarity_correction) { phy_data = (u16 )((unsigned int )phy_data | 2U); } else { } ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )phy_data); if (ret_val != 0) { goto out; } else { } if (phy->revision <= 3U) { ret_val = (*(phy->ops.read_reg))(hw, 20U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (u16 )((unsigned int )phy_data | 112U); if (phy->revision == 2U && phy->id == 21040320U) { phy_data = (unsigned int )phy_data & 61951U; phy_data = (u16 )((unsigned int )phy_data | 2048U); } else { phy_data = (unsigned int )phy_data & 61695U; phy_data = (u16 )((unsigned int )phy_data | 256U); } ret_val = (*(phy->ops.write_reg))(hw, 20U, (int )phy_data); if (ret_val != 0) { goto out; } else { } } else { } ret_val = igb_phy_sw_reset(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_copper_link_setup_m88"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Error committing the PHY changes\n"; descriptor.lineno = 633U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Error committing the PHY changes\n"); } else { } goto out; } else { } out: ; return (ret_val); } } s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; { phy = & hw->phy; if ((int )phy->reset_disable) { return (0); } else { } ret_val = (*(phy->ops.read_reg))(hw, 16U, & phy_data); if (ret_val != 0) { return (ret_val); } else { } phy_data = (unsigned int )phy_data & 65439U; switch ((int )phy->mdix) { case 1: phy_data = phy_data; goto ldv_44000; case 2: phy_data = (u16 )((unsigned int )phy_data | 32U); goto ldv_44000; case 3: ; if (phy->id != 21040272U) { phy_data = (u16 )((unsigned int )phy_data | 64U); goto ldv_44000; } else { } case 0: ; default: phy_data = (u16 )((unsigned int )phy_data | 96U); goto ldv_44000; } ldv_44000: phy_data = (unsigned int )phy_data & 65533U; if ((int )phy->disable_polarity_correction) { phy_data = (u16 )((unsigned int )phy_data | 2U); } else { } if (phy->id == 21040800U) { phy_data = (unsigned int )phy_data & 63487U; ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )phy_data); if (ret_val != 0) { return (ret_val); } else { } ret_val = igb_phy_sw_reset(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_copper_link_setup_m88_gen2"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Error committing the PHY changes\n"; descriptor.lineno = 710U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Error committing the PHY changes\n"); } else { } return (ret_val); } else { } } else { } phy_data = (unsigned int )phy_data & 36863U; phy_data = (u16 )((unsigned int )phy_data | 20480U); phy_data = (u16 )((unsigned int )phy_data | 2048U); ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )phy_data); if (ret_val != 0) { return (ret_val); } else { } ret_val = igb_phy_sw_reset(hw); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_copper_link_setup_m88_gen2"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Error committing the PHY changes\n"; descriptor___0.lineno = 726U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Error committing the PHY changes\n"); } else { } return (ret_val); } else { } ret_val = igb_set_master_slave_mode(hw); if (ret_val != 0) { return (ret_val); } else { } return (0); } } s32 igb_copper_link_setup_igp(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; { phy = & hw->phy; if ((int )phy->reset_disable) { ret_val = 0; goto out; } else { } ret_val = (*(phy->ops.reset))(hw); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_copper_link_setup_igp"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Error resetting the PHY.\n"; descriptor.lineno = 756U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Error resetting the PHY.\n"); } else { } goto out; } else { } msleep(100U); if ((unsigned int )phy->type == 3U) { if ((unsigned long )phy->ops.set_d3_lplu_state != (unsigned long )((s32 (*)(struct e1000_hw * , bool ))0)) { ret_val = (*(phy->ops.set_d3_lplu_state))(hw, 0); } else { } if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_copper_link_setup_igp"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Error Disabling LPLU D3\n"; descriptor___0.lineno = 773U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Error Disabling LPLU D3\n"); } else { } goto out; } else { } } else { } ret_val = (*(phy->ops.set_d0_lplu_state))(hw, 0); if (ret_val != 0) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_copper_link_setup_igp"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "Error Disabling LPLU D0\n"; descriptor___1.lineno = 781U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Error Disabling LPLU D0\n"); } else { } goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 18U, & data); if (ret_val != 0) { goto out; } else { } data = (unsigned int )data & 61439U; switch ((int )phy->mdix) { case 1: data = (unsigned int )data & 57343U; goto ldv_44020; case 2: data = (u16 )((unsigned int )data | 8192U); goto ldv_44020; case 0: ; default: data = (u16 )((unsigned int )data | 4096U); goto ldv_44020; } ldv_44020: ret_val = (*(phy->ops.write_reg))(hw, 18U, (int )data); if (ret_val != 0) { goto out; } else { } if ((int )hw->mac.autoneg) { if ((unsigned int )phy->autoneg_advertised == 32U) { ret_val = (*(phy->ops.read_reg))(hw, 16U, & data); if (ret_val != 0) { goto out; } else { } data = (unsigned int )data & 65407U; ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )data); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 9U, & data); if (ret_val != 0) { goto out; } else { } data = (unsigned int )data & 61439U; ret_val = (*(phy->ops.write_reg))(hw, 9U, (int )data); if (ret_val != 0) { goto out; } else { } } else { } ret_val = (*(phy->ops.read_reg))(hw, 9U, & data); if (ret_val != 0) { goto out; } else { } phy->original_ms_type = ((int )data & 4096) != 0 ? (((int )data & 2048) != 0 ? 1 : 2) : 3; switch ((unsigned int )phy->ms_type) { case 1U: data = (u16 )((unsigned int )data | 6144U); goto ldv_44025; case 2U: data = (u16 )((unsigned int )data | 4096U); data = (unsigned int )data & 63487U; goto ldv_44025; case 3U: data = (unsigned int )data & 61439U; default: ; goto ldv_44025; } ldv_44025: ret_val = (*(phy->ops.write_reg))(hw, 9U, (int )data); if (ret_val != 0) { goto out; } else { } } else { } out: ; return (ret_val); } } static s32 igb_copper_link_autoneg(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_ctrl ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___2 ; struct net_device *tmp___5 ; long tmp___6 ; { phy = & hw->phy; phy->autoneg_advertised = (u16 )((int )phy->autoneg_advertised & (int )phy->autoneg_mask); if ((unsigned int )phy->autoneg_advertised == 0U) { phy->autoneg_advertised = phy->autoneg_mask; } else { } descriptor.modname = "igb"; descriptor.function = "igb_copper_link_autoneg"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Reconfiguring auto-neg advertisement params\n"; descriptor.lineno = 898U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Reconfiguring auto-neg advertisement params\n"); } else { } ret_val = igb_phy_setup_autoneg(hw); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_copper_link_autoneg"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Error Setting up Auto-Negotiation\n"; descriptor___0.lineno = 901U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Error Setting up Auto-Negotiation\n"); } else { } goto out; } else { } descriptor___1.modname = "igb"; descriptor___1.function = "igb_copper_link_autoneg"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "Restarting Auto-Neg\n"; descriptor___1.lineno = 904U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Restarting Auto-Neg\n"); } else { } ret_val = (*(phy->ops.read_reg))(hw, 0U, & phy_ctrl); if (ret_val != 0) { goto out; } else { } phy_ctrl = (u16 )((unsigned int )phy_ctrl | 4608U); ret_val = (*(phy->ops.write_reg))(hw, 0U, (int )phy_ctrl); if (ret_val != 0) { goto out; } else { } if ((int )phy->autoneg_wait_to_complete) { ret_val = igb_wait_autoneg(hw); if (ret_val != 0) { descriptor___2.modname = "igb"; descriptor___2.function = "igb_copper_link_autoneg"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___2.format = "Error while waiting for autoneg to complete\n"; descriptor___2.lineno = 924U; descriptor___2.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___5, "Error while waiting for autoneg to complete\n"); } else { } goto out; } else { } } else { } hw->mac.get_link_status = 1; out: ; return (ret_val); } } static s32 igb_phy_setup_autoneg(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 mii_autoneg_adv_reg ; u16 mii_1000t_ctrl_reg ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___2 ; struct net_device *tmp___5 ; long tmp___6 ; struct _ddebug descriptor___3 ; struct net_device *tmp___7 ; long tmp___8 ; struct _ddebug descriptor___4 ; struct net_device *tmp___9 ; long tmp___10 ; struct _ddebug descriptor___5 ; struct net_device *tmp___11 ; long tmp___12 ; struct _ddebug descriptor___6 ; struct net_device *tmp___13 ; long tmp___14 ; struct _ddebug descriptor___7 ; struct net_device *tmp___15 ; long tmp___16 ; { phy = & hw->phy; mii_1000t_ctrl_reg = 0U; phy->autoneg_advertised = (u16 )((int )phy->autoneg_advertised & (int )phy->autoneg_mask); ret_val = (*(phy->ops.read_reg))(hw, 4U, & mii_autoneg_adv_reg); if (ret_val != 0) { goto out; } else { } if (((int )phy->autoneg_mask & 32) != 0) { ret_val = (*(phy->ops.read_reg))(hw, 9U, & mii_1000t_ctrl_reg); if (ret_val != 0) { goto out; } else { } } else { } mii_autoneg_adv_reg = (unsigned int )mii_autoneg_adv_reg & 65055U; mii_1000t_ctrl_reg = (unsigned int )mii_1000t_ctrl_reg & 64767U; descriptor.modname = "igb"; descriptor.function = "igb_phy_setup_autoneg"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "autoneg_advertised %x\n"; descriptor.lineno = 983U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "autoneg_advertised %x\n", (int )phy->autoneg_advertised); } else { } if ((int )phy->autoneg_advertised & 1) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_phy_setup_autoneg"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Advertise 10mb Half duplex\n"; descriptor___0.lineno = 987U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Advertise 10mb Half duplex\n"); } else { } mii_autoneg_adv_reg = (u16 )((unsigned int )mii_autoneg_adv_reg | 32U); } else { } if (((int )phy->autoneg_advertised & 2) != 0) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_phy_setup_autoneg"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "Advertise 10mb Full duplex\n"; descriptor___1.lineno = 993U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Advertise 10mb Full duplex\n"); } else { } mii_autoneg_adv_reg = (u16 )((unsigned int )mii_autoneg_adv_reg | 64U); } else { } if (((int )phy->autoneg_advertised & 4) != 0) { descriptor___2.modname = "igb"; descriptor___2.function = "igb_phy_setup_autoneg"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___2.format = "Advertise 100mb Half duplex\n"; descriptor___2.lineno = 999U; descriptor___2.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___5, "Advertise 100mb Half duplex\n"); } else { } mii_autoneg_adv_reg = (u16 )((unsigned int )mii_autoneg_adv_reg | 128U); } else { } if (((int )phy->autoneg_advertised & 8) != 0) { descriptor___3.modname = "igb"; descriptor___3.function = "igb_phy_setup_autoneg"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___3.format = "Advertise 100mb Full duplex\n"; descriptor___3.lineno = 1005U; descriptor___3.flags = 0U; tmp___8 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___8 != 0L) { tmp___7 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___3, (struct net_device const *)tmp___7, "Advertise 100mb Full duplex\n"); } else { } mii_autoneg_adv_reg = (u16 )((unsigned int )mii_autoneg_adv_reg | 256U); } else { } if (((int )phy->autoneg_advertised & 16) != 0) { descriptor___4.modname = "igb"; descriptor___4.function = "igb_phy_setup_autoneg"; descriptor___4.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___4.format = "Advertise 1000mb Half duplex request denied!\n"; descriptor___4.lineno = 1011U; descriptor___4.flags = 0U; tmp___10 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___10 != 0L) { tmp___9 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___4, (struct net_device const *)tmp___9, "Advertise 1000mb Half duplex request denied!\n"); } else { } } else { } if (((int )phy->autoneg_advertised & 32) != 0) { descriptor___5.modname = "igb"; descriptor___5.function = "igb_phy_setup_autoneg"; descriptor___5.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___5.format = "Advertise 1000mb Full duplex\n"; descriptor___5.lineno = 1015U; descriptor___5.flags = 0U; tmp___12 = ldv__builtin_expect((long )descriptor___5.flags & 1L, 0L); if (tmp___12 != 0L) { tmp___11 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___5, (struct net_device const *)tmp___11, "Advertise 1000mb Full duplex\n"); } else { } mii_1000t_ctrl_reg = (u16 )((unsigned int )mii_1000t_ctrl_reg | 512U); } else { } switch ((unsigned int )hw->fc.current_mode) { case 0U: mii_autoneg_adv_reg = (unsigned int )mii_autoneg_adv_reg & 62463U; goto ldv_44058; case 1U: mii_autoneg_adv_reg = (u16 )((unsigned int )mii_autoneg_adv_reg | 3072U); goto ldv_44058; case 2U: mii_autoneg_adv_reg = (u16 )((unsigned int )mii_autoneg_adv_reg | 2048U); mii_autoneg_adv_reg = (unsigned int )mii_autoneg_adv_reg & 64511U; goto ldv_44058; case 3U: mii_autoneg_adv_reg = (u16 )((unsigned int )mii_autoneg_adv_reg | 3072U); goto ldv_44058; default: descriptor___6.modname = "igb"; descriptor___6.function = "igb_phy_setup_autoneg"; descriptor___6.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___6.format = "Flow control param set incorrectly\n"; descriptor___6.lineno = 1069U; descriptor___6.flags = 0U; tmp___14 = ldv__builtin_expect((long )descriptor___6.flags & 1L, 0L); if (tmp___14 != 0L) { tmp___13 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___6, (struct net_device const *)tmp___13, "Flow control param set incorrectly\n"); } else { } ret_val = -3; goto out; } ldv_44058: ret_val = (*(phy->ops.write_reg))(hw, 4U, (int )mii_autoneg_adv_reg); if (ret_val != 0) { goto out; } else { } descriptor___7.modname = "igb"; descriptor___7.function = "igb_phy_setup_autoneg"; descriptor___7.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___7.format = "Auto-Neg Advertising %x\n"; descriptor___7.lineno = 1078U; descriptor___7.flags = 0U; tmp___16 = ldv__builtin_expect((long )descriptor___7.flags & 1L, 0L); if (tmp___16 != 0L) { tmp___15 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___7, (struct net_device const *)tmp___15, "Auto-Neg Advertising %x\n", (int )mii_autoneg_adv_reg); } else { } if (((int )phy->autoneg_mask & 32) != 0) { ret_val = (*(phy->ops.write_reg))(hw, 9U, (int )mii_1000t_ctrl_reg); if (ret_val != 0) { goto out; } else { } } else { } out: ; return (ret_val); } } s32 igb_setup_copper_link(struct e1000_hw *hw ) { s32 ret_val ; bool link ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___2 ; struct net_device *tmp___5 ; long tmp___6 ; { if ((int )hw->mac.autoneg) { ret_val = igb_copper_link_autoneg(hw); if (ret_val != 0) { goto out; } else { } } else { descriptor.modname = "igb"; descriptor.function = "igb_setup_copper_link"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Forcing Speed and Duplex\n"; descriptor.lineno = 1117U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Forcing Speed and Duplex\n"); } else { } ret_val = (*(hw->phy.ops.force_speed_duplex))(hw); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_setup_copper_link"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Error Forcing Speed and Duplex\n"; descriptor___0.lineno = 1120U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Error Forcing Speed and Duplex\n"); } else { } goto out; } else { } } ret_val = igb_phy_has_link(hw, 10U, 10U, & link); if (ret_val != 0) { goto out; } else { } if ((int )link) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_setup_copper_link"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "Valid link established!!!\n"; descriptor___1.lineno = 1133U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Valid link established!!!\n"); } else { } igb_config_collision_dist(hw); ret_val = igb_config_fc_after_link_up(hw); } else { descriptor___2.modname = "igb"; descriptor___2.function = "igb_setup_copper_link"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___2.format = "Unable to establish link!!!\n"; descriptor___2.lineno = 1137U; descriptor___2.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___5, "Unable to establish link!!!\n"); } else { } } out: ; return (ret_val); } } s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; bool link ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; { phy = & hw->phy; ret_val = (*(phy->ops.read_reg))(hw, 0U, & phy_data); if (ret_val != 0) { goto out; } else { } igb_phy_force_speed_duplex_setup(hw, & phy_data); ret_val = (*(phy->ops.write_reg))(hw, 0U, (int )phy_data); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 18U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (unsigned int )phy_data & 61439U; phy_data = (unsigned int )phy_data & 57343U; ret_val = (*(phy->ops.write_reg))(hw, 18U, (int )phy_data); if (ret_val != 0) { goto out; } else { } descriptor.modname = "igb"; descriptor.function = "igb_phy_force_speed_duplex_igp"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "IGP PSCR: %X\n"; descriptor.lineno = 1183U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "IGP PSCR: %X\n", (int )phy_data); } else { } __const_udelay(4295UL); if ((int )phy->autoneg_wait_to_complete) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_phy_force_speed_duplex_igp"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Waiting for forced speed/duplex link on IGP phy.\n"; descriptor___0.lineno = 1188U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Waiting for forced speed/duplex link on IGP phy.\n"); } else { } ret_val = igb_phy_has_link(hw, 20U, 10000U, & link); if (ret_val != 0) { goto out; } else { } if (! link) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_phy_force_speed_duplex_igp"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "Link taking longer than expected.\n"; descriptor___1.lineno = 1195U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Link taking longer than expected.\n"); } else { } } else { } ret_val = igb_phy_has_link(hw, 20U, 10000U, & link); if (ret_val != 0) { goto out; } else { } } else { } out: ; return (ret_val); } } s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; bool link ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; bool reset_dsp ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; { phy = & hw->phy; if ((unsigned int )phy->type != 9U) { ret_val = (*(phy->ops.read_reg))(hw, 16U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (unsigned int )phy_data & 65439U; ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )phy_data); if (ret_val != 0) { goto out; } else { } descriptor.modname = "igb"; descriptor.function = "igb_phy_force_speed_duplex_m88"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "M88E1000 PSCR: %X\n"; descriptor.lineno = 1240U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "M88E1000 PSCR: %X\n", (int )phy_data); } else { } } else { } ret_val = (*(phy->ops.read_reg))(hw, 0U, & phy_data); if (ret_val != 0) { goto out; } else { } igb_phy_force_speed_duplex_setup(hw, & phy_data); ret_val = (*(phy->ops.write_reg))(hw, 0U, (int )phy_data); if (ret_val != 0) { goto out; } else { } ret_val = igb_phy_sw_reset(hw); if (ret_val != 0) { goto out; } else { } if ((int )phy->autoneg_wait_to_complete) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_phy_force_speed_duplex_m88"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Waiting for forced speed/duplex link on M88 phy.\n"; descriptor___0.lineno = 1259U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Waiting for forced speed/duplex link on M88 phy.\n"); } else { } ret_val = igb_phy_has_link(hw, 20U, 100000U, & link); if (ret_val != 0) { goto out; } else { } if (! link) { reset_dsp = 1; switch (hw->phy.id) { case 21040576U: ; case 21040272U: ; case 21040128U: reset_dsp = 0; goto ldv_44103; default: ; if ((unsigned int )hw->phy.type != 2U) { reset_dsp = 0; } else { } goto ldv_44103; } ldv_44103: ; if (! reset_dsp) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_phy_force_speed_duplex_m88"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "Link taking longer than expected.\n"; descriptor___1.lineno = 1280U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Link taking longer than expected.\n"); } else { } } else { ret_val = (*(phy->ops.write_reg))(hw, 29U, 29); if (ret_val != 0) { goto out; } else { } ret_val = igb_phy_reset_dsp(hw); if (ret_val != 0) { goto out; } else { } } } else { } ret_val = igb_phy_has_link(hw, 20U, 100000U, & link); if (ret_val != 0) { goto out; } else { } } else { } if ((((unsigned int )hw->phy.type != 2U || hw->phy.id == 21040576U) || hw->phy.id == 21040272U) || hw->phy.id == 21040128U) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 20U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (u16 )((unsigned int )phy_data | 112U); ret_val = (*(phy->ops.write_reg))(hw, 20U, (int )phy_data); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 16U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (u16 )((unsigned int )phy_data | 2048U); ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )phy_data); out: ; return (ret_val); } } static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw , u16 *phy_ctrl ) { struct e1000_mac_info *mac ; u32 ctrl ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___2 ; struct net_device *tmp___5 ; long tmp___6 ; u8 *hw_addr ; u8 *__var ; long tmp___7 ; { mac = & hw->mac; hw->fc.current_mode = 0; ctrl = igb_rd32(hw, 0U); ctrl = ctrl | 6144U; ctrl = ctrl & 4294966527U; ctrl = ctrl & 4294967263U; *phy_ctrl = (unsigned int )*phy_ctrl & 61439U; if (((int )mac->forced_speed_duplex & 5) != 0) { ctrl = ctrl & 4294967294U; *phy_ctrl = (unsigned int )*phy_ctrl & 65279U; descriptor.modname = "igb"; descriptor.function = "igb_phy_force_speed_duplex_setup"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Half Duplex\n"; descriptor.lineno = 1372U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Half Duplex\n"); } else { } } else { ctrl = ctrl | 1U; *phy_ctrl = (u16 )((unsigned int )*phy_ctrl | 256U); descriptor___0.modname = "igb"; descriptor___0.function = "igb_phy_force_speed_duplex_setup"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Full Duplex\n"; descriptor___0.lineno = 1376U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Full Duplex\n"); } else { } } if (((int )mac->forced_speed_duplex & 12) != 0) { ctrl = ctrl | 256U; *phy_ctrl = (u16 )((unsigned int )*phy_ctrl | 8192U); *phy_ctrl = (unsigned int )*phy_ctrl & 65471U; descriptor___1.modname = "igb"; descriptor___1.function = "igb_phy_force_speed_duplex_setup"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "Forcing 100mb\n"; descriptor___1.lineno = 1384U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Forcing 100mb\n"); } else { } } else { ctrl = ctrl & 4294966527U; *phy_ctrl = *phy_ctrl; *phy_ctrl = (unsigned int )*phy_ctrl & 57279U; descriptor___2.modname = "igb"; descriptor___2.function = "igb_phy_force_speed_duplex_setup"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___2.format = "Forcing 10mb\n"; descriptor___2.lineno = 1389U; descriptor___2.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___2, (struct net_device const *)tmp___5, "Forcing 10mb\n"); } else { } } igb_config_collision_dist(hw); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(ctrl, (void volatile *)hw_addr); } else { } return; } } s32 igb_set_d3_lplu_state(struct e1000_hw *hw , bool active ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 data ; { phy = & hw->phy; ret_val = 0; if ((unsigned long )hw->phy.ops.read_reg == (unsigned long )((s32 (*)(struct e1000_hw * , u32 , u16 * ))0)) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 25U, & data); if (ret_val != 0) { goto out; } else { } if (! active) { data = (unsigned int )data & 65531U; ret_val = (*(phy->ops.write_reg))(hw, 25U, (int )data); if (ret_val != 0) { goto out; } else { } if ((unsigned int )phy->smart_speed == 1U) { ret_val = (*(phy->ops.read_reg))(hw, 16U, & data); if (ret_val != 0) { goto out; } else { } data = (u16 )((unsigned int )data | 128U); ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )data); if (ret_val != 0) { goto out; } else { } } else if ((unsigned int )phy->smart_speed == 2U) { ret_val = (*(phy->ops.read_reg))(hw, 16U, & data); if (ret_val != 0) { goto out; } else { } data = (unsigned int )data & 65407U; ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )data); if (ret_val != 0) { goto out; } else { } } else { } } else if (((unsigned int )phy->autoneg_advertised == 47U || (unsigned int )phy->autoneg_advertised == 15U) || (unsigned int )phy->autoneg_advertised == 3U) { data = (u16 )((unsigned int )data | 4U); ret_val = (*(phy->ops.write_reg))(hw, 25U, (int )data); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 16U, & data); if (ret_val != 0) { goto out; } else { } data = (unsigned int )data & 65407U; ret_val = (*(phy->ops.write_reg))(hw, 16U, (int )data); } else { } out: ; return (ret_val); } } s32 igb_check_downshift(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; u16 offset ; u16 mask ; { phy = & hw->phy; switch ((unsigned int )phy->type) { case 9U: ; case 2U: ; case 5U: offset = 17U; mask = 32U; goto ldv_44139; case 4U: ; case 3U: ; case 6U: offset = 19U; mask = 32768U; goto ldv_44139; default: phy->speed_downgraded = 0; ret_val = 0; goto out; } ldv_44139: ret_val = (*(phy->ops.read_reg))(hw, (u32 )offset, & phy_data); if (ret_val == 0) { phy->speed_downgraded = (unsigned int )((int )phy_data & (int )mask) != 0U; } else { } out: ; return (ret_val); } } s32 igb_check_polarity_m88(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 data ; { phy = & hw->phy; ret_val = (*(phy->ops.read_reg))(hw, 17U, & data); if (ret_val == 0) { phy->cable_polarity = ((int )data & 2) != 0; } else { } return (ret_val); } } static s32 igb_check_polarity_igp(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 data ; u16 offset ; u16 mask ; { phy = & hw->phy; ret_val = (*(phy->ops.read_reg))(hw, 17U, & data); if (ret_val != 0) { goto out; } else { } if (((int )data & 49152) == 49152) { offset = 180U; mask = 120U; } else { offset = 17U; mask = 2U; } ret_val = (*(phy->ops.read_reg))(hw, (u32 )offset, & data); if (ret_val == 0) { phy->cable_polarity = (unsigned int )((int )data & (int )mask) != 0U; } else { } out: ; return (ret_val); } } static s32 igb_wait_autoneg(struct e1000_hw *hw ) { s32 ret_val ; u16 i ; u16 phy_status ; { ret_val = 0; i = 45U; goto ldv_44168; ldv_44167: ret_val = (*(hw->phy.ops.read_reg))(hw, 1U, & phy_status); if (ret_val != 0) { goto ldv_44166; } else { } ret_val = (*(hw->phy.ops.read_reg))(hw, 1U, & phy_status); if (ret_val != 0) { goto ldv_44166; } else { } if (((int )phy_status & 32) != 0) { goto ldv_44166; } else { } msleep(100U); i = (u16 )((int )i - 1); ldv_44168: ; if ((unsigned int )i != 0U) { goto ldv_44167; } else { } ldv_44166: ; return (ret_val); } } s32 igb_phy_has_link(struct e1000_hw *hw , u32 iterations , u32 usec_interval , bool *success ) { s32 ret_val ; u16 i ; u16 phy_status ; unsigned long __ms ; unsigned long tmp ; unsigned long __ms___0 ; unsigned long tmp___0 ; { ret_val = 0; i = 0U; goto ldv_44188; ldv_44187: ret_val = (*(hw->phy.ops.read_reg))(hw, 1U, & phy_status); if (ret_val != 0 && usec_interval != 0U) { if (usec_interval > 999U) { __ms = (unsigned long )(usec_interval / 1000U); goto ldv_44180; ldv_44179: __const_udelay(4295000UL); ldv_44180: tmp = __ms; __ms = __ms - 1UL; if (tmp != 0UL) { goto ldv_44179; } else { } } else { __udelay((unsigned long )usec_interval); } } else { } ret_val = (*(hw->phy.ops.read_reg))(hw, 1U, & phy_status); if (ret_val != 0) { goto ldv_44182; } else { } if (((int )phy_status & 4) != 0) { goto ldv_44182; } else { } if (usec_interval > 999U) { __ms___0 = (unsigned long )(usec_interval / 1000U); goto ldv_44185; ldv_44184: __const_udelay(4295000UL); ldv_44185: tmp___0 = __ms___0; __ms___0 = __ms___0 - 1UL; if (tmp___0 != 0UL) { goto ldv_44184; } else { } } else { __udelay((unsigned long )usec_interval); } i = (u16 )((int )i + 1); ldv_44188: ; if ((u32 )i < iterations) { goto ldv_44187; } else { } ldv_44182: *success = (u32 )i < iterations; return (ret_val); } } s32 igb_get_cable_length_m88(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; u16 index ; { phy = & hw->phy; ret_val = (*(phy->ops.read_reg))(hw, 17U, & phy_data); if (ret_val != 0) { goto out; } else { } index = (u16 )(((int )phy_data & 896) >> 7); if ((unsigned int )index > 5U) { ret_val = -2; goto out; } else { } phy->min_cable_length = e1000_m88_cable_length_table[(int )index]; phy->max_cable_length = e1000_m88_cable_length_table[(int )index + 1]; phy->cable_length = (u16 )(((int )phy->min_cable_length + (int )phy->max_cable_length) / 2); out: ; return (ret_val); } } s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; u16 phy_data2 ; u16 index ; u16 default_page ; u16 is_cm ; { phy = & hw->phy; switch (hw->phy.id) { case 21040128U: ret_val = (*(phy->ops.read_reg))(hw, phy->addr + 458768U, & phy_data); if (ret_val != 0) { return (ret_val); } else { } ret_val = (*(phy->ops.read_reg))(hw, 458773U, & phy_data2); if (ret_val != 0) { return (ret_val); } else { } is_cm = ((int )phy_data2 & 1024) == 0; phy->min_cable_length = (u16 )((int )phy_data / ((unsigned int )is_cm != 0U ? 100 : 1)); phy->max_cable_length = (u16 )((int )phy_data / ((unsigned int )is_cm != 0U ? 100 : 1)); phy->cable_length = (u16 )((int )phy_data / ((unsigned int )is_cm != 0U ? 100 : 1)); goto ldv_44208; case 21040800U: ; case 21040576U: ret_val = (*(phy->ops.read_reg))(hw, 22U, & default_page); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.write_reg))(hw, 22U, 7); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, phy->addr + 16U, & phy_data); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 21U, & phy_data2); if (ret_val != 0) { goto out; } else { } is_cm = ((int )phy_data2 & 1024) == 0; phy->min_cable_length = (u16 )((int )phy_data / ((unsigned int )is_cm != 0U ? 100 : 1)); phy->max_cable_length = (u16 )((int )phy_data / ((unsigned int )is_cm != 0U ? 100 : 1)); phy->cable_length = (u16 )((int )phy_data / ((unsigned int )is_cm != 0U ? 100 : 1)); ret_val = (*(phy->ops.write_reg))(hw, 22U, (int )default_page); if (ret_val != 0) { goto out; } else { } goto ldv_44208; case 21040272U: ret_val = (*(phy->ops.read_reg))(hw, 22U, & default_page); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.write_reg))(hw, 22U, 5); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 26U, & phy_data); if (ret_val != 0) { goto out; } else { } index = (u16 )(((int )phy_data & 896) >> 7); if ((unsigned int )index > 5U) { ret_val = -2; goto out; } else { } phy->min_cable_length = e1000_m88_cable_length_table[(int )index]; phy->max_cable_length = e1000_m88_cable_length_table[(int )index + 1]; phy->cable_length = (u16 )(((int )phy->min_cable_length + (int )phy->max_cable_length) / 2); ret_val = (*(phy->ops.write_reg))(hw, 22U, (int )default_page); if (ret_val != 0) { goto out; } else { } goto ldv_44208; default: ret_val = -2; goto out; } ldv_44208: ; out: ; return (ret_val); } } s32 igb_get_cable_length_igp_2(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; u16 i ; u16 agc_value ; u16 cur_agc_index ; u16 max_agc_index ; u16 min_agc_index ; u16 agc_reg_array[4U] ; { phy = & hw->phy; ret_val = 0; agc_value = 0U; max_agc_index = 0U; min_agc_index = 112U; agc_reg_array[0] = 4529U; agc_reg_array[1] = 4785U; agc_reg_array[2] = 5297U; agc_reg_array[3] = 6321U; i = 0U; goto ldv_44228; ldv_44227: ret_val = (*(phy->ops.read_reg))(hw, (u32 )agc_reg_array[(int )i], & phy_data); if (ret_val != 0) { goto out; } else { } cur_agc_index = (u16 )((int )phy_data >> 9); if ((unsigned int )cur_agc_index > 112U || (unsigned int )cur_agc_index == 0U) { ret_val = -2; goto out; } else { } if ((int )((unsigned short )e1000_igp_2_cable_length_table[(int )min_agc_index]) > (int )((unsigned short )e1000_igp_2_cable_length_table[(int )cur_agc_index])) { min_agc_index = cur_agc_index; } else { } if ((int )((unsigned short )e1000_igp_2_cable_length_table[(int )max_agc_index]) < (int )((unsigned short )e1000_igp_2_cable_length_table[(int )cur_agc_index])) { max_agc_index = cur_agc_index; } else { } agc_value = (int )((u16 )e1000_igp_2_cable_length_table[(int )cur_agc_index]) + (int )agc_value; i = (u16 )((int )i + 1); ldv_44228: ; if ((unsigned int )i <= 3U) { goto ldv_44227; } else { } agc_value = (int )agc_value - ((int )((u16 )e1000_igp_2_cable_length_table[(int )min_agc_index]) + (int )((u16 )e1000_igp_2_cable_length_table[(int )max_agc_index])); agc_value = (u16 )((unsigned int )agc_value / 2U); phy->min_cable_length = (u16 )(0 > (int )agc_value + -15 ? 0 : (int )agc_value + -15); phy->max_cable_length = (unsigned int )agc_value + 15U; phy->cable_length = (u16 )(((int )phy->min_cable_length + (int )phy->max_cable_length) / 2); out: ; return (ret_val); } } s32 igb_get_phy_info_m88(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; bool link ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; { phy = & hw->phy; if ((unsigned int )phy->media_type != 1U) { descriptor.modname = "igb"; descriptor.function = "igb_get_phy_info_m88"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Phy info is only valid for copper media\n"; descriptor.lineno = 1916U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Phy info is only valid for copper media\n"); } else { } ret_val = -3; goto out; } else { } ret_val = igb_phy_has_link(hw, 1U, 0U, & link); if (ret_val != 0) { goto out; } else { } if (! link) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_get_phy_info_m88"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Phy info is only valid if link is up\n"; descriptor___0.lineno = 1926U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Phy info is only valid if link is up\n"); } else { } ret_val = -3; goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 16U, & phy_data); if (ret_val != 0) { goto out; } else { } phy->polarity_correction = ((int )phy_data & 2) != 0; ret_val = igb_check_polarity_m88(hw); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 17U, & phy_data); if (ret_val != 0) { goto out; } else { } phy->is_mdix = ((int )phy_data & 64) != 0; if (((int )phy_data & 49152) == 32768) { ret_val = (*(phy->ops.get_cable_length))(hw); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 10U, & phy_data); if (ret_val != 0) { goto out; } else { } phy->local_rx = ((int )phy_data & 8192) != 0; phy->remote_rx = ((int )phy_data & 4096) != 0; } else { phy->cable_length = 255U; phy->local_rx = 255; phy->remote_rx = 255; } out: ; return (ret_val); } } s32 igb_get_phy_info_igp(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 data ; bool link ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { phy = & hw->phy; ret_val = igb_phy_has_link(hw, 1U, 0U, & link); if (ret_val != 0) { goto out; } else { } if (! link) { descriptor.modname = "igb"; descriptor.function = "igb_get_phy_info_igp"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Phy info is only valid if link is up\n"; descriptor.lineno = 1996U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Phy info is only valid if link is up\n"); } else { } ret_val = -3; goto out; } else { } phy->polarity_correction = 1; ret_val = igb_check_polarity_igp(hw); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 17U, & data); if (ret_val != 0) { goto out; } else { } phy->is_mdix = ((int )data & 2048) != 0; if (((int )data & 49152) == 49152) { ret_val = (*(phy->ops.get_cable_length))(hw); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 10U, & data); if (ret_val != 0) { goto out; } else { } phy->local_rx = ((int )data & 8192) != 0; phy->remote_rx = ((int )data & 4096) != 0; } else { phy->cable_length = 255U; phy->local_rx = 255; phy->remote_rx = 255; } out: ; return (ret_val); } } s32 igb_phy_sw_reset(struct e1000_hw *hw ) { s32 ret_val ; u16 phy_ctrl ; { ret_val = 0; if ((unsigned long )hw->phy.ops.read_reg == (unsigned long )((s32 (*)(struct e1000_hw * , u32 , u16 * ))0)) { goto out; } else { } ret_val = (*(hw->phy.ops.read_reg))(hw, 0U, & phy_ctrl); if (ret_val != 0) { goto out; } else { } phy_ctrl = (u16 )((unsigned int )phy_ctrl | 32768U); ret_val = (*(hw->phy.ops.write_reg))(hw, 0U, (int )phy_ctrl); if (ret_val != 0) { goto out; } else { } __const_udelay(4295UL); out: ; return (ret_val); } } s32 igb_phy_hw_reset(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u32 ctrl ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { phy = & hw->phy; ret_val = igb_check_reset_block(hw); if (ret_val != 0) { ret_val = 0; goto out; } else { } ret_val = (*(phy->ops.acquire))(hw); if (ret_val != 0) { goto out; } else { } ctrl = igb_rd32(hw, 0U); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(ctrl | 2147483648U, (void volatile *)hw_addr); } else { } igb_rd32(hw, 8U); __udelay((unsigned long )phy->reset_delay_us); __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(ctrl, (void volatile *)hw_addr___0); } else { } igb_rd32(hw, 8U); __const_udelay(644250UL); (*(phy->ops.release))(hw); ret_val = (*(phy->ops.get_cfg_done))(hw); out: ; return (ret_val); } } s32 igb_phy_init_script_igp3(struct e1000_hw *hw ) { struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { descriptor.modname = "igb"; descriptor.function = "igb_phy_init_script_igp3"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Running IGP 3 PHY init script\n"; descriptor.lineno = 2122U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Running IGP 3 PHY init script\n"); } else { } (*(hw->phy.ops.write_reg))(hw, 12123U, 36888); (*(hw->phy.ops.write_reg))(hw, 12114U, 0); (*(hw->phy.ops.write_reg))(hw, 12209U, 35620); (*(hw->phy.ops.write_reg))(hw, 12210U, 63728); (*(hw->phy.ops.write_reg))(hw, 8208U, 4272); (*(hw->phy.ops.write_reg))(hw, 8209U, 0); (*(hw->phy.ops.write_reg))(hw, 8413U, 9370); (*(hw->phy.ops.write_reg))(hw, 8414U, 211); (*(hw->phy.ops.write_reg))(hw, 10420U, 1230); (*(hw->phy.ops.write_reg))(hw, 12144U, 10724); (*(hw->phy.ops.write_reg))(hw, 0U, 320); (*(hw->phy.ops.write_reg))(hw, 7984U, 5638); (*(hw->phy.ops.write_reg))(hw, 7985U, 47124); (*(hw->phy.ops.write_reg))(hw, 7989U, 42); (*(hw->phy.ops.write_reg))(hw, 7998U, 103); (*(hw->phy.ops.write_reg))(hw, 8020U, 101); (*(hw->phy.ops.write_reg))(hw, 8021U, 42); (*(hw->phy.ops.write_reg))(hw, 8022U, 42); (*(hw->phy.ops.write_reg))(hw, 8050U, 16304); (*(hw->phy.ops.write_reg))(hw, 8054U, 49407); (*(hw->phy.ops.write_reg))(hw, 8055U, 7660); (*(hw->phy.ops.write_reg))(hw, 8056U, 63983); (*(hw->phy.ops.write_reg))(hw, 8057U, 528); (*(hw->phy.ops.write_reg))(hw, 6293U, 3); (*(hw->phy.ops.write_reg))(hw, 6038U, 8); (*(hw->phy.ops.write_reg))(hw, 6040U, 53256); (*(hw->phy.ops.write_reg))(hw, 6296U, 55576); (*(hw->phy.ops.write_reg))(hw, 6266U, 2048); (*(hw->phy.ops.write_reg))(hw, 25U, 141); (*(hw->phy.ops.write_reg))(hw, 27U, 8320); (*(hw->phy.ops.write_reg))(hw, 20U, 69); (*(hw->phy.ops.write_reg))(hw, 0U, 4928); return (0); } } void igb_power_up_phy_copper(struct e1000_hw *hw ) { u16 mii_reg ; { mii_reg = 0U; (*(hw->phy.ops.read_reg))(hw, 0U, & mii_reg); mii_reg = (unsigned int )mii_reg & 63487U; (*(hw->phy.ops.write_reg))(hw, 0U, (int )mii_reg); return; } } void igb_power_down_phy_copper(struct e1000_hw *hw ) { u16 mii_reg ; { mii_reg = 0U; (*(hw->phy.ops.read_reg))(hw, 0U, & mii_reg); mii_reg = (u16 )((unsigned int )mii_reg | 2048U); (*(hw->phy.ops.write_reg))(hw, 0U, (int )mii_reg); usleep_range(1000UL, 2000UL); return; } } static s32 igb_check_polarity_82580(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 data ; { phy = & hw->phy; ret_val = (*(phy->ops.read_reg))(hw, 26U, & data); if (ret_val == 0) { phy->cable_polarity = ((int )data & 1024) != 0; } else { } return (ret_val); } } s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; bool link ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; { phy = & hw->phy; ret_val = (*(phy->ops.read_reg))(hw, 0U, & phy_data); if (ret_val != 0) { goto out; } else { } igb_phy_force_speed_duplex_setup(hw, & phy_data); ret_val = (*(phy->ops.write_reg))(hw, 0U, (int )phy_data); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 18U, & phy_data); if (ret_val != 0) { goto out; } else { } phy_data = (unsigned int )phy_data & 63999U; ret_val = (*(phy->ops.write_reg))(hw, 18U, (int )phy_data); if (ret_val != 0) { goto out; } else { } descriptor.modname = "igb"; descriptor.function = "igb_phy_force_speed_duplex_82580"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "I82580_PHY_CTRL_2: %X\n"; descriptor.lineno = 2295U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "I82580_PHY_CTRL_2: %X\n", (int )phy_data); } else { } __const_udelay(4295UL); if ((int )phy->autoneg_wait_to_complete) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_phy_force_speed_duplex_82580"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___0.format = "Waiting for forced speed/duplex link on 82580 phy\n"; descriptor___0.lineno = 2300U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Waiting for forced speed/duplex link on 82580 phy\n"); } else { } ret_val = igb_phy_has_link(hw, 20U, 100000U, & link); if (ret_val != 0) { goto out; } else { } if (! link) { descriptor___1.modname = "igb"; descriptor___1.function = "igb_phy_force_speed_duplex_82580"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor___1.format = "Link taking longer than expected.\n"; descriptor___1.lineno = 2307U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "Link taking longer than expected.\n"); } else { } } else { } ret_val = igb_phy_has_link(hw, 20U, 100000U, & link); if (ret_val != 0) { goto out; } else { } } else { } out: ; return (ret_val); } } s32 igb_get_phy_info_82580(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 data ; bool link ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { phy = & hw->phy; ret_val = igb_phy_has_link(hw, 1U, 0U, & link); if (ret_val != 0) { goto out; } else { } if (! link) { descriptor.modname = "igb"; descriptor.function = "igb_get_phy_info_82580"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_phy.c"; descriptor.format = "Phy info is only valid if link is up\n"; descriptor.lineno = 2340U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Phy info is only valid if link is up\n"); } else { } ret_val = -3; goto out; } else { } phy->polarity_correction = 1; ret_val = igb_check_polarity_82580(hw); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 26U, & data); if (ret_val != 0) { goto out; } else { } phy->is_mdix = ((int )data & 2048) != 0; if (((int )data & 768) == 512) { ret_val = (*(hw->phy.ops.get_cable_length))(hw); if (ret_val != 0) { goto out; } else { } ret_val = (*(phy->ops.read_reg))(hw, 10U, & data); if (ret_val != 0) { goto out; } else { } phy->local_rx = ((int )data & 8192) != 0; phy->remote_rx = ((int )data & 4096) != 0; } else { phy->cable_length = 255U; phy->local_rx = 255; phy->remote_rx = 255; } out: ; return (ret_val); } } s32 igb_get_cable_length_82580(struct e1000_hw *hw ) { struct e1000_phy_info *phy ; s32 ret_val ; u16 phy_data ; u16 length ; { phy = & hw->phy; ret_val = (*(phy->ops.read_reg))(hw, 31U, & phy_data); if (ret_val != 0) { goto out; } else { } length = (u16 )(((int )phy_data & 1020) >> 2); if ((unsigned int )length == 255U) { ret_val = -2; } else { } phy->cable_length = length; out: ; return (ret_val); } } s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw , u32 offset , u16 data ) { s32 ret_val ; u16 page ; { page = (u16 )(offset >> 16); offset = offset & 65535U; ret_val = (*(hw->phy.ops.acquire))(hw); if (ret_val != 0) { return (ret_val); } else { } ret_val = igb_write_phy_reg_mdic(hw, 22U, (int )page); if (ret_val != 0) { goto release; } else { } ret_val = igb_write_phy_reg_mdic(hw, offset, (int )data); release: (*(hw->phy.ops.release))(hw); return (ret_val); } } s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw , u32 offset , u16 *data ) { s32 ret_val ; u16 page ; { page = (u16 )(offset >> 16); offset = offset & 65535U; ret_val = (*(hw->phy.ops.acquire))(hw); if (ret_val != 0) { return (ret_val); } else { } ret_val = igb_write_phy_reg_mdic(hw, 22U, (int )page); if (ret_val != 0) { goto release; } else { } ret_val = igb_read_phy_reg_mdic(hw, offset, data); release: (*(hw->phy.ops.release))(hw); return (ret_val); } } static s32 igb_set_master_slave_mode(struct e1000_hw *hw ) { s32 ret_val ; u16 phy_data ; s32 tmp ; { ret_val = (*(hw->phy.ops.read_reg))(hw, 9U, & phy_data); if (ret_val != 0) { return (ret_val); } else { } hw->phy.original_ms_type = ((int )phy_data & 4096) != 0 ? (((int )phy_data & 2048) != 0 ? 1 : 2) : 3; switch ((unsigned int )hw->phy.ms_type) { case 1U: phy_data = (u16 )((unsigned int )phy_data | 6144U); goto ldv_44341; case 2U: phy_data = (u16 )((unsigned int )phy_data | 4096U); phy_data = (unsigned int )phy_data & 63487U; goto ldv_44341; case 3U: phy_data = (unsigned int )phy_data & 61439U; default: ; goto ldv_44341; } ldv_44341: tmp = (*(hw->phy.ops.write_reg))(hw, 9U, (int )phy_data); return (tmp); } } bool ldv_queue_work_on_213(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_214(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_215(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_216(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_217(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_218(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_219(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_220(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_221(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_222(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_223(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_224(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; int ldv_mutex_trylock_249(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_247(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_250(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_251(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_246(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_248(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_252(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_241(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_243(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_242(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_245(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_244(struct workqueue_struct *ldv_func_arg1 ) ; s32 igb_read_mbx(struct e1000_hw *hw , u32 *msg , u16 size , u16 mbx_id ) { struct e1000_mbx_info *mbx ; s32 ret_val ; { mbx = & hw->mbx; ret_val = -15; if ((int )mbx->size < (int )size) { size = mbx->size; } else { } if ((unsigned long )mbx->ops.read != (unsigned long )((s32 (*)(struct e1000_hw * , u32 * , u16 , u16 ))0)) { ret_val = (*(mbx->ops.read))(hw, msg, (int )size, (int )mbx_id); } else { } return (ret_val); } } s32 igb_write_mbx(struct e1000_hw *hw , u32 *msg , u16 size , u16 mbx_id ) { struct e1000_mbx_info *mbx ; s32 ret_val ; { mbx = & hw->mbx; ret_val = 0; if ((int )mbx->size < (int )size) { ret_val = -15; } else if ((unsigned long )mbx->ops.write != (unsigned long )((s32 (*)(struct e1000_hw * , u32 * , u16 , u16 ))0)) { ret_val = (*(mbx->ops.write))(hw, msg, (int )size, (int )mbx_id); } else { } return (ret_val); } } s32 igb_check_for_msg(struct e1000_hw *hw , u16 mbx_id ) { struct e1000_mbx_info *mbx ; s32 ret_val ; { mbx = & hw->mbx; ret_val = -15; if ((unsigned long )mbx->ops.check_for_msg != (unsigned long )((s32 (*)(struct e1000_hw * , u16 ))0)) { ret_val = (*(mbx->ops.check_for_msg))(hw, (int )mbx_id); } else { } return (ret_val); } } s32 igb_check_for_ack(struct e1000_hw *hw , u16 mbx_id ) { struct e1000_mbx_info *mbx ; s32 ret_val ; { mbx = & hw->mbx; ret_val = -15; if ((unsigned long )mbx->ops.check_for_ack != (unsigned long )((s32 (*)(struct e1000_hw * , u16 ))0)) { ret_val = (*(mbx->ops.check_for_ack))(hw, (int )mbx_id); } else { } return (ret_val); } } s32 igb_check_for_rst(struct e1000_hw *hw , u16 mbx_id ) { struct e1000_mbx_info *mbx ; s32 ret_val ; { mbx = & hw->mbx; ret_val = -15; if ((unsigned long )mbx->ops.check_for_rst != (unsigned long )((s32 (*)(struct e1000_hw * , u16 ))0)) { ret_val = (*(mbx->ops.check_for_rst))(hw, (int )mbx_id); } else { } return (ret_val); } } static s32 igb_poll_for_msg(struct e1000_hw *hw , u16 mbx_id ) { struct e1000_mbx_info *mbx ; int countdown ; s32 tmp ; { mbx = & hw->mbx; countdown = (int )mbx->timeout; if (countdown == 0 || (unsigned long )mbx->ops.check_for_msg == (unsigned long )((s32 (*)(struct e1000_hw * , u16 ))0)) { goto out; } else { } goto ldv_43872; ldv_43871: countdown = countdown - 1; if (countdown == 0) { goto ldv_43870; } else { } __udelay((unsigned long )mbx->usec_delay); ldv_43872: ; if (countdown != 0) { tmp = (*(mbx->ops.check_for_msg))(hw, (int )mbx_id); if (tmp != 0) { goto ldv_43871; } else { goto ldv_43870; } } else { } ldv_43870: ; if (countdown == 0) { mbx->timeout = 0U; } else { } out: ; return (countdown != 0 ? 0 : -15); } } static s32 igb_poll_for_ack(struct e1000_hw *hw , u16 mbx_id ) { struct e1000_mbx_info *mbx ; int countdown ; s32 tmp ; { mbx = & hw->mbx; countdown = (int )mbx->timeout; if (countdown == 0 || (unsigned long )mbx->ops.check_for_ack == (unsigned long )((s32 (*)(struct e1000_hw * , u16 ))0)) { goto out; } else { } goto ldv_43882; ldv_43881: countdown = countdown - 1; if (countdown == 0) { goto ldv_43880; } else { } __udelay((unsigned long )mbx->usec_delay); ldv_43882: ; if (countdown != 0) { tmp = (*(mbx->ops.check_for_ack))(hw, (int )mbx_id); if (tmp != 0) { goto ldv_43881; } else { goto ldv_43880; } } else { } ldv_43880: ; if (countdown == 0) { mbx->timeout = 0U; } else { } out: ; return (countdown != 0 ? 0 : -15); } } static s32 igb_read_posted_mbx(struct e1000_hw *hw , u32 *msg , u16 size , u16 mbx_id ) { struct e1000_mbx_info *mbx ; s32 ret_val ; { mbx = & hw->mbx; ret_val = -15; if ((unsigned long )mbx->ops.read == (unsigned long )((s32 (*)(struct e1000_hw * , u32 * , u16 , u16 ))0)) { goto out; } else { } ret_val = igb_poll_for_msg(hw, (int )mbx_id); if (ret_val == 0) { ret_val = (*(mbx->ops.read))(hw, msg, (int )size, (int )mbx_id); } else { } out: ; return (ret_val); } } static s32 igb_write_posted_mbx(struct e1000_hw *hw , u32 *msg , u16 size , u16 mbx_id ) { struct e1000_mbx_info *mbx ; s32 ret_val ; { mbx = & hw->mbx; ret_val = -15; if ((unsigned long )mbx->ops.write == (unsigned long )((s32 (*)(struct e1000_hw * , u32 * , u16 , u16 ))0) || mbx->timeout == 0U) { goto out; } else { } ret_val = (*(mbx->ops.write))(hw, msg, (int )size, (int )mbx_id); if (ret_val == 0) { ret_val = igb_poll_for_ack(hw, (int )mbx_id); } else { } out: ; return (ret_val); } } static s32 igb_check_for_bit_pf(struct e1000_hw *hw , u32 mask ) { u32 mbvficr ; u32 tmp ; s32 ret_val ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { tmp = igb_rd32(hw, 3200U); mbvficr = tmp; ret_val = -15; if ((mbvficr & mask) != 0U) { ret_val = 0; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(mask, (void volatile *)hw_addr + 3200U); } else { } } else { } return (ret_val); } } static s32 igb_check_for_msg_pf(struct e1000_hw *hw , u16 vf_number ) { s32 ret_val ; s32 tmp ; { ret_val = -15; tmp = igb_check_for_bit_pf(hw, (u32 )(1 << (int )vf_number)); if (tmp == 0) { ret_val = 0; hw->mbx.stats.reqs = hw->mbx.stats.reqs + 1U; } else { } return (ret_val); } } static s32 igb_check_for_ack_pf(struct e1000_hw *hw , u16 vf_number ) { s32 ret_val ; s32 tmp ; { ret_val = -15; tmp = igb_check_for_bit_pf(hw, (u32 )(65536 << (int )vf_number)); if (tmp == 0) { ret_val = 0; hw->mbx.stats.acks = hw->mbx.stats.acks + 1U; } else { } return (ret_val); } } static s32 igb_check_for_rst_pf(struct e1000_hw *hw , u16 vf_number ) { u32 vflre ; u32 tmp ; s32 ret_val ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { tmp = igb_rd32(hw, 3208U); vflre = tmp; ret_val = -15; if (((u32 )(1 << (int )vf_number) & vflre) != 0U) { ret_val = 0; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel((unsigned int )(1 << (int )vf_number), (void volatile *)hw_addr + 3208U); } else { } hw->mbx.stats.rsts = hw->mbx.stats.rsts + 1U; } else { } return (ret_val); } } static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw , u16 vf_number ) { s32 ret_val ; u32 p2v_mailbox ; u8 *hw_addr ; u8 *__var ; long tmp ; { ret_val = -15; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(8U, (void volatile *)hw_addr + (unsigned long )(((int )vf_number + 768) * 4)); } else { } p2v_mailbox = igb_rd32(hw, (u32 )(((int )vf_number + 768) * 4)); if ((p2v_mailbox & 8U) != 0U) { ret_val = 0; } else { } return (ret_val); } } static s32 igb_write_mbx_pf(struct e1000_hw *hw , u32 *msg , u16 size , u16 vf_number ) { s32 ret_val ; u16 i ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { ret_val = igb_obtain_mbx_lock_pf(hw, (int )vf_number); if (ret_val != 0) { goto out_no_write; } else { } igb_check_for_msg_pf(hw, (int )vf_number); igb_check_for_ack_pf(hw, (int )vf_number); i = 0U; goto ldv_43951; ldv_43950: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(*(msg + (unsigned long )i), (void volatile *)hw_addr + (unsigned long )(((int )vf_number + 32) * 64 + ((int )i << 2))); } else { } i = (u16 )((int )i + 1); ldv_43951: ; if ((int )i < (int )size) { goto ldv_43950; } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(1U, (void volatile *)hw_addr___0 + (unsigned long )(((int )vf_number + 768) * 4)); } else { } hw->mbx.stats.msgs_tx = hw->mbx.stats.msgs_tx + 1U; out_no_write: ; return (ret_val); } } static s32 igb_read_mbx_pf(struct e1000_hw *hw , u32 *msg , u16 size , u16 vf_number ) { s32 ret_val ; u16 i ; u8 *hw_addr ; u8 *__var ; long tmp ; { ret_val = igb_obtain_mbx_lock_pf(hw, (int )vf_number); if (ret_val != 0) { goto out_no_read; } else { } i = 0U; goto ldv_43966; ldv_43965: *(msg + (unsigned long )i) = readl((void const volatile *)(hw->hw_addr + ((unsigned long )(((int )vf_number + 32) * 64) + (unsigned long )((int )i << 2)))); i = (u16 )((int )i + 1); ldv_43966: ; if ((int )i < (int )size) { goto ldv_43965; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(2U, (void volatile *)hw_addr + (unsigned long )(((int )vf_number + 768) * 4)); } else { } hw->mbx.stats.msgs_rx = hw->mbx.stats.msgs_rx + 1U; out_no_read: ; return (ret_val); } } s32 igb_init_mbx_params_pf(struct e1000_hw *hw ) { struct e1000_mbx_info *mbx ; { mbx = & hw->mbx; mbx->timeout = 0U; mbx->usec_delay = 0U; mbx->size = 16U; mbx->ops.read = & igb_read_mbx_pf; mbx->ops.write = & igb_write_mbx_pf; mbx->ops.read_posted = & igb_read_posted_mbx; mbx->ops.write_posted = & igb_write_posted_mbx; mbx->ops.check_for_msg = & igb_check_for_msg_pf; mbx->ops.check_for_ack = & igb_check_for_ack_pf; mbx->ops.check_for_rst = & igb_check_for_rst_pf; mbx->stats.msgs_tx = 0U; mbx->stats.msgs_rx = 0U; mbx->stats.reqs = 0U; mbx->stats.acks = 0U; mbx->stats.rsts = 0U; return (0); } } bool ldv_queue_work_on_241(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_242(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_243(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_244(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_245(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_246(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_247(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_248(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_249(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_250(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_251(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_252(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; int ldv_mutex_trylock_277(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_275(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_278(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_279(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_274(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_276(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_280(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_269(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_271(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_270(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_273(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_272(struct workqueue_struct *ldv_func_arg1 ) ; static s32 igb_update_flash_i210(struct e1000_hw *hw ) ; static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw ) { u32 swsm ; s32 timeout ; s32 i ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; u32 tmp___2 ; struct _ddebug descriptor___0 ; struct net_device *tmp___3 ; long tmp___4 ; { timeout = (int )hw->nvm.word_size + 1; i = 0; goto ldv_43839; ldv_43838: swsm = igb_rd32(hw, 23376U); if ((swsm & 1U) == 0U) { goto ldv_43837; } else { } __const_udelay(214750UL); i = i + 1; ldv_43839: ; if (i < timeout) { goto ldv_43838; } else { } ldv_43837: ; if (i == timeout) { if ((int )hw->dev_spec._82575.clear_semaphore_once) { hw->dev_spec._82575.clear_semaphore_once = 0; igb_put_hw_semaphore(hw); i = 0; goto ldv_43842; ldv_43841: swsm = igb_rd32(hw, 23376U); if ((swsm & 1U) == 0U) { goto ldv_43840; } else { } __const_udelay(214750UL); i = i + 1; ldv_43842: ; if (i < timeout) { goto ldv_43841; } else { } ldv_43840: ; } else { } if (i == timeout) { descriptor.modname = "igb"; descriptor.function = "igb_get_hw_semaphore_i210"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor.format = "Driver can\'t access device - SMBI bit is set.\n"; descriptor.lineno = 76U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Driver can\'t access device - SMBI bit is set.\n"); } else { } return (-1); } else { } } else { } i = 0; goto ldv_43850; ldv_43849: swsm = igb_rd32(hw, 23376U); __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(swsm | 2U, (void volatile *)hw_addr + 23376U); } else { } tmp___2 = igb_rd32(hw, 23376U); if ((tmp___2 & 2U) != 0U) { goto ldv_43848; } else { } __const_udelay(214750UL); i = i + 1; ldv_43850: ; if (i < timeout) { goto ldv_43849; } else { } ldv_43848: ; if (i == timeout) { igb_put_hw_semaphore(hw); descriptor___0.modname = "igb"; descriptor___0.function = "igb_get_hw_semaphore_i210"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor___0.format = "Driver can\'t access the NVM\n"; descriptor___0.lineno = 96U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___3, "Driver can\'t access the NVM\n"); } else { } return (-1); } else { } return (0); } } static s32 igb_acquire_nvm_i210(struct e1000_hw *hw ) { s32 tmp ; { tmp = igb_acquire_swfw_sync_i210(hw, 1); return (tmp); } } static void igb_release_nvm_i210(struct e1000_hw *hw ) { { igb_release_swfw_sync_i210(hw, 1); return; } } s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw , u16 mask ) { u32 swfw_sync ; u32 swmask ; u32 fwmask ; s32 ret_val ; s32 i ; s32 timeout ; s32 tmp ; unsigned long __ms ; unsigned long tmp___0 ; struct _ddebug descriptor ; struct net_device *tmp___1 ; long tmp___2 ; u8 *hw_addr ; u8 *__var ; long tmp___3 ; { swmask = (u32 )mask; fwmask = (u32 )((int )mask << 16); ret_val = 0; i = 0; timeout = 200; goto ldv_43875; ldv_43874: tmp = igb_get_hw_semaphore_i210(hw); if (tmp != 0) { ret_val = -13; goto out; } else { } swfw_sync = igb_rd32(hw, 23388U); if (((fwmask | swmask) & swfw_sync) == 0U) { goto ldv_43869; } else { } igb_put_hw_semaphore(hw); if (1) { __const_udelay(21475000UL); } else { __ms = 5UL; goto ldv_43872; ldv_43871: __const_udelay(4295000UL); ldv_43872: tmp___0 = __ms; __ms = __ms - 1UL; if (tmp___0 != 0UL) { goto ldv_43871; } else { } } i = i + 1; ldv_43875: ; if (i < timeout) { goto ldv_43874; } else { } ldv_43869: ; if (i == timeout) { descriptor.modname = "igb"; descriptor.function = "igb_acquire_swfw_sync_i210"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor.format = "Driver can\'t access resource, SW_FW_SYNC timeout.\n"; descriptor.lineno = 162U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___1, "Driver can\'t access resource, SW_FW_SYNC timeout.\n"); } else { } ret_val = -13; goto out; } else { } swfw_sync = swfw_sync | swmask; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(swfw_sync, (void volatile *)hw_addr + 23388U); } else { } igb_put_hw_semaphore(hw); out: ; return (ret_val); } } void igb_release_swfw_sync_i210(struct e1000_hw *hw , u16 mask ) { u32 swfw_sync ; s32 tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; { goto ldv_43887; ldv_43886: ; ldv_43887: tmp = igb_get_hw_semaphore_i210(hw); if (tmp != 0) { goto ldv_43886; } else { } swfw_sync = igb_rd32(hw, 23388U); swfw_sync = (u32 )(~ ((int )mask)) & swfw_sync; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(swfw_sync, (void volatile *)hw_addr + 23388U); } else { } igb_put_hw_semaphore(hw); return; } } static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) { s32 status ; u16 i ; u16 count ; s32 tmp ; { status = 0; i = 0U; goto ldv_43903; ldv_43902: count = (u16 )(512 < (int )words - (int )i ? 512 : (int )words - (int )i); tmp = (*(hw->nvm.ops.acquire))(hw); if (tmp == 0) { status = igb_read_nvm_eerd(hw, (int )offset, (int )count, data + (unsigned long )i); (*(hw->nvm.ops.release))(hw); } else { status = 13; } if (status != 0) { goto ldv_43901; } else { } i = (unsigned int )i + 512U; ldv_43903: ; if ((int )i < (int )words) { goto ldv_43902; } else { } ldv_43901: ; return (status); } } static s32 igb_write_nvm_srwr(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) { struct e1000_nvm_info *nvm ; u32 i ; u32 k ; u32 eewr ; u32 attempts ; s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u8 *hw_addr ; u8 *__var ; long tmp___1 ; u32 tmp___2 ; struct _ddebug descriptor___0 ; struct net_device *tmp___3 ; long tmp___4 ; { nvm = & hw->nvm; eewr = 0U; attempts = 100000U; ret_val = 0; if (((int )nvm->word_size <= (int )offset || (int )words > (int )nvm->word_size - (int )offset) || (unsigned int )words == 0U) { descriptor.modname = "igb"; descriptor.function = "igb_write_nvm_srwr"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor.format = "nvm parameter(s) out of bounds\n"; descriptor.lineno = 260U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "nvm parameter(s) out of bounds\n"); } else { } ret_val = -1; goto out; } else { } i = 0U; goto ldv_43928; ldv_43927: eewr = ((((u32 )offset + i) << 2) | (u32 )((int )*(data + (unsigned long )i) << 16)) | 1U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(eewr, (void volatile *)hw_addr + 73752U); } else { } k = 0U; goto ldv_43924; ldv_43923: tmp___2 = igb_rd32(hw, 73752U); if ((tmp___2 & 2U) != 0U) { ret_val = 0; goto ldv_43922; } else { } __const_udelay(21475UL); k = k + 1U; ldv_43924: ; if (k < attempts) { goto ldv_43923; } else { } ldv_43922: ; if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_write_nvm_srwr"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor___0.format = "Shadow RAM write EEWR timed out\n"; descriptor___0.lineno = 282U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___3, "Shadow RAM write EEWR timed out\n"); } else { } goto ldv_43926; } else { } i = i + 1U; ldv_43928: ; if ((u32 )words > i) { goto ldv_43927; } else { } ldv_43926: ; out: ; return (ret_val); } } static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) { s32 status ; u16 i ; u16 count ; s32 tmp ; { status = 0; i = 0U; goto ldv_43940; ldv_43939: count = (u16 )(512 < (int )words - (int )i ? 512 : (int )words - (int )i); tmp = (*(hw->nvm.ops.acquire))(hw); if (tmp == 0) { status = igb_write_nvm_srwr(hw, (int )offset, (int )count, data + (unsigned long )i); (*(hw->nvm.ops.release))(hw); } else { status = 13; } if (status != 0) { goto ldv_43938; } else { } i = (unsigned int )i + 512U; ldv_43940: ; if ((int )i < (int )words) { goto ldv_43939; } else { } ldv_43938: ; return (status); } } static s32 igb_read_invm_word_i210(struct e1000_hw *hw , u8 address , u16 *data ) { s32 status ; u32 invm_dword ; u16 i ; u8 record_type ; u8 word_address ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; { status = -19; i = 0U; goto ldv_43955; ldv_43954: invm_dword = igb_rd32(hw, (u32 )(((int )i + 18504) * 4)); record_type = (unsigned int )((unsigned char )invm_dword) & 7U; if ((unsigned int )record_type == 0U) { goto ldv_43951; } else { } if ((unsigned int )record_type == 2U) { i = (unsigned int )i + 1U; } else { } if ((unsigned int )record_type == 4U) { i = (unsigned int )i + 8U; } else { } if ((unsigned int )record_type == 1U) { word_address = (unsigned char )((invm_dword & 65024U) >> 9); if ((int )word_address == (int )address) { *data = (unsigned short )(invm_dword >> 16); descriptor.modname = "igb"; descriptor.function = "igb_read_invm_word_i210"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor.format = "Read INVM Word 0x%02x = %x\n"; descriptor.lineno = 366U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Read INVM Word 0x%02x = %x\n", (int )address, (int )*data); } else { } status = 0; goto ldv_43951; } else { } } else { } i = (u16 )((int )i + 1); ldv_43955: ; if ((unsigned int )i <= 63U) { goto ldv_43954; } else { } ldv_43951: ; if (status != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_read_invm_word_i210"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor___0.format = "Requested word 0x%02x not found in OTP\n"; descriptor___0.lineno = 373U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "Requested word 0x%02x not found in OTP\n", (int )address); } else { } } else { } return (status); } } static s32 igb_read_invm_i210(struct e1000_hw *hw , u16 offset , u16 words , u16 *data ) { s32 ret_val ; s32 tmp ; s32 tmp___0 ; struct _ddebug descriptor ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___0 ; struct net_device *tmp___3 ; long tmp___4 ; { ret_val = 0; switch ((int )offset) { case 0: ret_val = igb_read_invm_word_i210(hw, (int )((unsigned char )offset), data); tmp = igb_read_invm_word_i210(hw, (int )((unsigned int )((u8 )offset) + 1U), data + 1UL); ret_val = tmp | ret_val; tmp___0 = igb_read_invm_word_i210(hw, (int )((unsigned int )((u8 )offset) + 2U), data + 2UL); ret_val = tmp___0 | ret_val; if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_read_invm_i210"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor.format = "MAC Addr not found in iNVM\n"; descriptor.lineno = 399U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp___1, "MAC Addr not found in iNVM\n"); } else { } } else { } goto ldv_43967; case 15: ret_val = igb_read_invm_word_i210(hw, (int )((unsigned char )offset), data); if (ret_val != 0) { *data = 29251U; ret_val = 0; } else { } goto ldv_43967; case 19: ret_val = igb_read_invm_word_i210(hw, (int )((unsigned char )offset), data); if (ret_val != 0) { *data = 193U; ret_val = 0; } else { } goto ldv_43967; case 28: ret_val = igb_read_invm_word_i210(hw, (int )((unsigned char )offset), data); if (ret_val != 0) { *data = 388U; ret_val = 0; } else { } goto ldv_43967; case 31: ret_val = igb_read_invm_word_i210(hw, (int )((unsigned char )offset), data); if (ret_val != 0) { *data = 8204U; ret_val = 0; } else { } goto ldv_43967; case 4: ret_val = igb_read_invm_word_i210(hw, (int )((unsigned char )offset), data); if (ret_val != 0) { *data = 65535U; ret_val = 0; } else { } goto ldv_43967; case 11: *data = hw->subsystem_device_id; goto ldv_43967; case 12: *data = hw->subsystem_vendor_id; goto ldv_43967; case 13: *data = hw->device_id; goto ldv_43967; case 14: *data = hw->vendor_id; goto ldv_43967; default: descriptor___0.modname = "igb"; descriptor___0.function = "igb_read_invm_i210"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor___0.format = "NVM word 0x%02x is not mapped.\n"; descriptor___0.lineno = 449U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___3, "NVM word 0x%02x is not mapped.\n", (int )offset); } else { } *data = 65535U; goto ldv_43967; } ldv_43967: ; return (ret_val); } } s32 igb_read_invm_version(struct e1000_hw *hw , struct e1000_fw_version *invm_ver ) { u32 *record ; u32 *next_record ; u32 i ; u32 invm_dword ; u32 invm_blocks ; u32 buffer[64U] ; s32 status ; u16 version ; { record = (u32 *)0U; next_record = (u32 *)0U; i = 0U; invm_dword = 0U; invm_blocks = 62U; status = -19; version = 0U; i = 0U; goto ldv_43992; ldv_43991: invm_dword = igb_rd32(hw, (i + 18504U) * 4U); buffer[i] = invm_dword; i = i + 1U; ldv_43992: ; if (i <= 63U) { goto ldv_43991; } else { } i = 1U; goto ldv_43996; ldv_43995: record = (u32 *)(& buffer) + (unsigned long )(invm_blocks - i); next_record = (u32 *)(& buffer) + (unsigned long )((invm_blocks - i) + 1U); if (i == 1U && (*record & 8184U) == 0U) { version = 0U; status = 0; goto ldv_43994; } else if (i == 1U && (*record & 8380416U) == 0U) { version = (u16 )((*record & 8184U) >> 3); status = 0; goto ldv_43994; } else if (((*record & 8184U) == 0U && (*record & 3U) == 0U) || ((*record & 3U) != 0U && i != 1U)) { version = (u16 )((*next_record & 8380416U) >> 13); status = 0; goto ldv_43994; } else if ((*record & 8380416U) == 0U && (*record & 3U) == 0U) { version = (u16 )((*record & 8184U) >> 3); status = 0; goto ldv_43994; } else { } i = i + 1U; ldv_43996: ; if (i < invm_blocks) { goto ldv_43995; } else { } ldv_43994: ; if (status == 0) { invm_ver->invm_major = (u8 )(((int )version & 1008) >> 4); invm_ver->invm_minor = (unsigned int )((u8 )version) & 15U; } else { } i = 1U; goto ldv_43999; ldv_43998: record = (u32 *)(& buffer) + (unsigned long )(invm_blocks - i); next_record = (u32 *)(& buffer) + (unsigned long )((invm_blocks - i) + 1U); if (i == 1U && (*record & 528482304U) == 0U) { invm_ver->invm_img_type = 0U; status = 0; goto ldv_43997; } else if (((*record & 3U) == 0U && (*record & 528482304U) == 0U) || ((*record & 3U) != 0U && i != 1U)) { invm_ver->invm_img_type = (u8 )((*next_record & 528482304U) >> 23); status = 0; goto ldv_43997; } else { } i = i + 1U; ldv_43999: ; if (i < invm_blocks) { goto ldv_43998; } else { } ldv_43997: ; return (status); } } static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw ) { s32 status ; s32 (*read_op_ptr)(struct e1000_hw * , u16 , u16 , u16 * ) ; s32 tmp ; { status = 0; tmp = (*(hw->nvm.ops.acquire))(hw); if (tmp == 0) { read_op_ptr = hw->nvm.ops.read; hw->nvm.ops.read = & igb_read_nvm_eerd; status = igb_validate_nvm_checksum(hw); hw->nvm.ops.read = read_op_ptr; (*(hw->nvm.ops.release))(hw); } else { status = 13; } return (status); } } static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw ) { s32 ret_val ; u16 checksum ; u16 i ; u16 nvm_data ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; struct net_device *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___1 ; struct net_device *tmp___3 ; long tmp___4 ; s32 tmp___5 ; { ret_val = 0; checksum = 0U; ret_val = igb_read_nvm_eerd(hw, 0, 1, & nvm_data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_update_nvm_checksum_i210"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor.format = "EEPROM read failed\n"; descriptor.lineno = 604U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "EEPROM read failed\n"); } else { } goto out; } else { } tmp___5 = (*(hw->nvm.ops.acquire))(hw); if (tmp___5 == 0) { i = 0U; goto ldv_44021; ldv_44020: ret_val = igb_read_nvm_eerd(hw, (int )i, 1, & nvm_data); if (ret_val != 0) { (*(hw->nvm.ops.release))(hw); descriptor___0.modname = "igb"; descriptor___0.function = "igb_update_nvm_checksum_i210"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor___0.format = "NVM Read Error while updating checksum.\n"; descriptor___0.lineno = 618U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___1, "NVM Read Error while updating checksum.\n"); } else { } goto out; } else { } checksum = (int )checksum + (int )nvm_data; i = (u16 )((int )i + 1); ldv_44021: ; if ((unsigned int )i <= 62U) { goto ldv_44020; } else { } checksum = 47802U - (unsigned int )checksum; ret_val = igb_write_nvm_srwr(hw, 63, 1, & checksum); if (ret_val != 0) { (*(hw->nvm.ops.release))(hw); descriptor___1.modname = "igb"; descriptor___1.function = "igb_update_nvm_checksum_i210"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor___1.format = "NVM Write Error while updating checksum.\n"; descriptor___1.lineno = 628U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___3, "NVM Write Error while updating checksum.\n"); } else { } goto out; } else { } (*(hw->nvm.ops.release))(hw); ret_val = igb_update_flash_i210(hw); } else { ret_val = -13; } out: ; return (ret_val); } } static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw ) { s32 ret_val ; u32 i ; u32 reg ; { ret_val = -1; i = 0U; goto ldv_44032; ldv_44031: reg = igb_rd32(hw, 16U); if ((reg & 67108864U) != 0U) { ret_val = 0; goto ldv_44030; } else { } __const_udelay(21475UL); i = i + 1U; ldv_44032: ; if (i <= 19999U) { goto ldv_44031; } else { } ldv_44030: ; return (ret_val); } } bool igb_get_flash_presence_i210(struct e1000_hw *hw ) { u32 eec ; bool ret_val ; { eec = 0U; ret_val = 0; eec = igb_rd32(hw, 16U); if ((eec & 524288U) != 0U) { ret_val = 1; } else { } return (ret_val); } } static s32 igb_update_flash_i210(struct e1000_hw *hw ) { s32 ret_val ; u32 flup ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; u32 tmp___1 ; u8 *hw_addr ; u8 *__var ; long tmp___2 ; struct _ddebug descriptor___0 ; struct net_device *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___1 ; struct net_device *tmp___5 ; long tmp___6 ; { ret_val = 0; ret_val = igb_pool_flash_update_done_i210(hw); if (ret_val == -1) { descriptor.modname = "igb"; descriptor.function = "igb_update_flash_i210"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor.format = "Flash update time out\n"; descriptor.lineno = 693U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "Flash update time out\n"); } else { } goto out; } else { } tmp___1 = igb_rd32(hw, 16U); flup = tmp___1 | 8388608U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(flup, (void volatile *)hw_addr + 16U); } else { } ret_val = igb_pool_flash_update_done_i210(hw); if (ret_val != 0) { descriptor___0.modname = "igb"; descriptor___0.function = "igb_update_flash_i210"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor___0.format = "Flash update complete\n"; descriptor___0.lineno = 702U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___0, (struct net_device const *)tmp___3, "Flash update complete\n"); } else { } } else { descriptor___1.modname = "igb"; descriptor___1.function = "igb_update_flash_i210"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor___1.format = "Flash update time out\n"; descriptor___1.lineno = 704U; descriptor___1.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor___1, (struct net_device const *)tmp___5, "Flash update time out\n"); } else { } } out: ; return (ret_val); } } s32 igb_valid_led_default_i210(struct e1000_hw *hw , u16 *data ) { s32 ret_val ; struct _ddebug descriptor ; struct net_device *tmp ; long tmp___0 ; { ret_val = (*(hw->nvm.ops.read))(hw, 4, 1, data); if (ret_val != 0) { descriptor.modname = "igb"; descriptor.function = "igb_valid_led_default_i210"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/e1000_i210.c"; descriptor.format = "NVM Read Error\n"; descriptor.lineno = 724U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = igb_get_hw_dev(hw); __dynamic_netdev_dbg(& descriptor, (struct net_device const *)tmp, "NVM Read Error\n"); } else { } goto out; } else { } if ((unsigned int )*data == 0U || (unsigned int )*data == 65535U) { switch ((unsigned int )hw->phy.media_type) { case 3U: *data = 280U; goto ldv_44060; case 1U: ; default: *data = 2073U; goto ldv_44060; } ldv_44060: ; } else { } out: ; return (ret_val); } } static s32 __igb_access_xmdio_reg(struct e1000_hw *hw , u16 address , u8 dev_addr , u16 *data , bool read ) { s32 ret_val ; { ret_val = 0; ret_val = (*(hw->phy.ops.write_reg))(hw, 13U, (int )dev_addr); if (ret_val != 0) { return (ret_val); } else { } ret_val = (*(hw->phy.ops.write_reg))(hw, 14U, (int )address); if (ret_val != 0) { return (ret_val); } else { } ret_val = (*(hw->phy.ops.write_reg))(hw, 13U, (int )((unsigned int )((u16 )dev_addr) | 16384U)); if (ret_val != 0) { return (ret_val); } else { } if ((int )read) { ret_val = (*(hw->phy.ops.read_reg))(hw, 14U, data); } else { ret_val = (*(hw->phy.ops.write_reg))(hw, 14U, (int )*data); } if (ret_val != 0) { return (ret_val); } else { } ret_val = (*(hw->phy.ops.write_reg))(hw, 13U, 0); if (ret_val != 0) { return (ret_val); } else { } return (ret_val); } } s32 igb_read_xmdio_reg(struct e1000_hw *hw , u16 addr , u8 dev_addr , u16 *data ) { s32 tmp ; { tmp = __igb_access_xmdio_reg(hw, (int )addr, (int )dev_addr, data, 1); return (tmp); } } s32 igb_write_xmdio_reg(struct e1000_hw *hw , u16 addr , u8 dev_addr , u16 data ) { s32 tmp ; { tmp = __igb_access_xmdio_reg(hw, (int )addr, (int )dev_addr, & data, 0); return (tmp); } } s32 igb_init_nvm_params_i210(struct e1000_hw *hw ) { s32 ret_val ; struct e1000_nvm_info *nvm ; bool tmp ; { ret_val = 0; nvm = & hw->nvm; nvm->ops.acquire = & igb_acquire_nvm_i210; nvm->ops.release = & igb_release_nvm_i210; nvm->ops.valid_led_default = & igb_valid_led_default_i210; tmp = igb_get_flash_presence_i210(hw); if ((int )tmp) { hw->nvm.type = 3; nvm->ops.read = & igb_read_nvm_srrd_i210; nvm->ops.write = & igb_write_nvm_srwr_i210; nvm->ops.validate = & igb_validate_nvm_checksum_i210; nvm->ops.update = & igb_update_nvm_checksum_i210; } else { hw->nvm.type = 4; nvm->ops.read = & igb_read_invm_i210; nvm->ops.write = (s32 (*)(struct e1000_hw * , u16 , u16 , u16 * ))0; nvm->ops.validate = (s32 (*)(struct e1000_hw * ))0; nvm->ops.update = (s32 (*)(struct e1000_hw * ))0; } return (ret_val); } } s32 igb_pll_workaround_i210(struct e1000_hw *hw ) { s32 ret_val ; u32 wuc ; u32 mdicnfg ; u32 ctrl ; u32 ctrl_ext ; u32 reg_val ; u16 nvm_word ; u16 phy_word ; u16 pci_word ; u16 tmp_nvm ; int i ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___2 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___3 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___4 ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___5 ; u8 *hw_addr___6 ; u8 *__var___6 ; long tmp___6 ; { wuc = igb_rd32(hw, 22528U); mdicnfg = igb_rd32(hw, 3588U); reg_val = mdicnfg & 2147483647U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(reg_val, (void volatile *)hw_addr + 3588U); } else { } ret_val = igb_read_invm_word_i210(hw, 10, & nvm_word); if (ret_val != 0) { nvm_word = 8239U; } else { } tmp_nvm = (u16 )((unsigned int )nvm_word | 16U); i = 0; goto ldv_44125; ldv_44124: igb_read_phy_reg_gs40g(hw, 16515086U, & phy_word); if (((int )phy_word & 255) != 255) { ret_val = 0; goto ldv_44105; } else { ret_val = -2; } ctrl = igb_rd32(hw, 0U); __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(ctrl | 2147483648U, (void volatile *)hw_addr___0); } else { } ctrl_ext = igb_rd32(hw, 24U); ctrl_ext = ctrl_ext | 1310720U; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(ctrl_ext, (void volatile *)hw_addr___1 + 24U); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(0U, (void volatile *)hw_addr___2 + 22528U); } else { } reg_val = (u32 )(((int )tmp_nvm << 16) | 160); __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(reg_val, (void volatile *)hw_addr___3 + 73764U); } else { } igb_read_pci_cfg(hw, 68U, & pci_word); pci_word = (u16 )((unsigned int )pci_word | 3U); igb_write_pci_cfg(hw, 68U, & pci_word); usleep_range(1000UL, 2000UL); pci_word = (unsigned int )pci_word & 65532U; igb_write_pci_cfg(hw, 68U, & pci_word); reg_val = (u32 )(((int )nvm_word << 16) | 160); __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(reg_val, (void volatile *)hw_addr___4 + 73764U); } else { } __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(wuc, (void volatile *)hw_addr___5 + 22528U); } else { } i = i + 1; ldv_44125: ; if (i <= 4) { goto ldv_44124; } else { } ldv_44105: __var___6 = (u8 *)0U; hw_addr___6 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___6 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(mdicnfg, (void volatile *)hw_addr___6 + 3588U); } else { } return (ret_val); } } bool ldv_queue_work_on_269(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_270(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_271(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_272(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_273(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_274(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_275(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_276(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_277(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_278(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_279(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_280(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void clear_bit_unlock(long nr , unsigned long volatile *addr ) { { __asm__ volatile ("": : : "memory"); clear_bit(nr, addr); return; } } extern void __dynamic_pr_debug(struct _ddebug * , char const * , ...) ; extern void __might_fault(char const * , int ) ; bool ldv_is_err(void const *ptr ) ; __inline static u64 div_u64_rem(u64 dividend , u32 divisor , u32 *remainder ) { { *remainder = (u32 )(dividend % (u64 )divisor); return (dividend / (u64 )divisor); } } __inline static u64 div_u64(u64 dividend , u32 divisor ) { u32 remainder ; u64 tmp ; { tmp = div_u64_rem(dividend, divisor, & remainder); return (tmp); } } __inline static bool IS_ERR(void const *ptr ) ; int ldv_mutex_trylock_305(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_303(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_306(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_307(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_302(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_304(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_308(struct mutex *ldv_func_arg1 ) ; extern unsigned long _raw_spin_lock_irqsave(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irqrestore(raw_spinlock_t * , unsigned long ) ; __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) { { _raw_spin_unlock_irqrestore(& lock->__annonCompField17.rlock, flags); return; } } __inline static s64 timespec_to_ns(struct timespec const *ts ) { { return ((long long )ts->tv_sec * 1000000000LL + (long long )ts->tv_nsec); } } extern struct timespec ns_to_timespec(s64 const ) ; __inline static ktime_t ktime_set(s64 const secs , unsigned long const nsecs ) { ktime_t __constr_expr_0 ; long tmp ; ktime_t __constr_expr_1 ; { tmp = ldv__builtin_expect((long long )secs > 9223372035LL, 0L); if (tmp != 0L) { __constr_expr_0.tv64 = 9223372036854775807LL; return (__constr_expr_0); } else { } __constr_expr_1.tv64 = (long long )secs * 1000000000LL + (long long )nsecs; return (__constr_expr_1); } } __inline static ktime_t ns_to_ktime(u64 ns ) { ktime_t ktime_zero ; ktime_t __constr_expr_0 ; { ktime_zero.tv64 = 0LL; __constr_expr_0.tv64 = (long long )((unsigned long long )ktime_zero.tv64 + ns); return (__constr_expr_0); } } extern ktime_t ktime_get_with_offset(enum tk_offsets ) ; __inline static ktime_t ktime_get_real(void) { ktime_t tmp ; { tmp = ktime_get_with_offset(0); return (tmp); } } extern void init_timer_key(struct timer_list * , unsigned int , char const * , struct lock_class_key * ) ; extern void delayed_work_timer_fn(unsigned long ) ; bool ldv_queue_work_on_297(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_299(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_298(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_301(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_300(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_310(struct work_struct *ldv_func_arg1 ) ; extern bool cancel_delayed_work_sync(struct delayed_work * ) ; bool ldv_cancel_delayed_work_sync_309(struct delayed_work *ldv_func_arg1 ) ; __inline static bool queue_work___0(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_297(8192, wq, work); return (tmp); } } __inline static bool queue_delayed_work(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = ldv_queue_delayed_work_on_298(8192, wq, dwork, delay); return (tmp); } } __inline static bool schedule_work___0(struct work_struct *work ) { bool tmp ; { tmp = queue_work___0(system_wq, work); return (tmp); } } __inline static bool schedule_delayed_work(struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = queue_delayed_work(system_wq, dwork, delay); return (tmp); } } void call_and_disable_all_12(int state ) ; void invoke_work_11(void) ; void invoke_work_12(void) ; void call_and_disable_work_12(struct work_struct *work ) ; void disable_work_12(struct work_struct *work ) ; void activate_work_12(struct work_struct *work , int state ) ; void call_and_disable_work_11(struct work_struct *work ) ; extern unsigned long _copy_from_user(void * , void const * , unsigned int ) ; extern unsigned long _copy_to_user(void * , void const * , unsigned int ) ; extern void __copy_from_user_overflow(void) ; extern void __copy_to_user_overflow(void) ; __inline static unsigned long copy_from_user(void *to , void const *from , unsigned long n ) { int sz ; unsigned long tmp ; long tmp___0 ; { tmp = __builtin_object_size((void const *)to, 0); sz = (int )tmp; __might_fault("./arch/x86/include/asm/uaccess.h", 697); tmp___0 = ldv__builtin_expect((long )(sz < 0 || (unsigned long )sz >= n), 1L); if (tmp___0 != 0L) { n = _copy_from_user(to, from, (unsigned int )n); } else { __copy_from_user_overflow(); } return (n); } } __inline static unsigned long copy_to_user(void *to , void const *from , unsigned long n ) { int sz ; unsigned long tmp ; long tmp___0 ; { tmp = __builtin_object_size(from, 0); sz = (int )tmp; __might_fault("./arch/x86/include/asm/uaccess.h", 732); tmp___0 = ldv__builtin_expect((long )(sz < 0 || (unsigned long )sz >= n), 1L); if (tmp___0 != 0L) { n = _copy_to_user(to, from, (unsigned int )n); } else { __copy_to_user_overflow(); } return (n); } } __inline static struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb ) { unsigned char *tmp ; { tmp = skb_end_pointer((struct sk_buff const *)skb); return (& ((struct skb_shared_info *)tmp)->hwtstamps); } } __inline static void timecounter_adjtime(struct timecounter *tc , s64 delta ) { { tc->nsec = tc->nsec + (unsigned long long )delta; return; } } extern void timecounter_init(struct timecounter * , struct cyclecounter const * , u64 ) ; extern u64 timecounter_read(struct timecounter * ) ; extern u64 timecounter_cyc2time(struct timecounter * , cycle_t ) ; extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info * , struct device * ) ; extern int ptp_clock_unregister(struct ptp_clock * ) ; extern int ptp_find_pin(struct ptp_clock * , enum ptp_pin_function , unsigned int ) ; static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter ) ; static cycle_t igb_ptp_read_82576(struct cyclecounter const *cc ) { struct igb_adapter *igb ; struct cyclecounter const *__mptr ; struct e1000_hw *hw ; u64 val ; u32 lo ; u32 hi ; { __mptr = cc; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcd70UL; hw = & igb->hw; lo = igb_rd32(hw, 46592U); hi = igb_rd32(hw, 46596U); val = (unsigned long long )hi << 32; val = (u64 )lo | val; return (val); } } static cycle_t igb_ptp_read_82580(struct cyclecounter const *cc ) { struct igb_adapter *igb ; struct cyclecounter const *__mptr ; struct e1000_hw *hw ; u32 lo ; u32 hi ; u64 val ; { __mptr = cc; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcd70UL; hw = & igb->hw; igb_rd32(hw, 46840U); lo = igb_rd32(hw, 46592U); hi = igb_rd32(hw, 46596U); val = (unsigned long long )hi << 32; val = (u64 )lo | val; return (val); } } static void igb_ptp_read_i210(struct igb_adapter *adapter , struct timespec *ts ) { struct e1000_hw *hw ; u32 sec ; u32 nsec ; { hw = & adapter->hw; igb_rd32(hw, 46840U); nsec = igb_rd32(hw, 46592U); sec = igb_rd32(hw, 46596U); ts->tv_sec = (__kernel_time_t )sec; ts->tv_nsec = (long )nsec; return; } } static void igb_ptp_write_i210(struct igb_adapter *adapter , struct timespec const *ts ) { struct e1000_hw *hw ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; { hw = & adapter->hw; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel((unsigned int )ts->tv_nsec, (void volatile *)hw_addr + 46592U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel((unsigned int )ts->tv_sec, (void volatile *)hw_addr___0 + 46596U); } else { } return; } } static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter , struct skb_shared_hwtstamps *hwtstamps , u64 systim ) { unsigned long flags ; u64 ns ; raw_spinlock_t *tmp ; { switch ((unsigned int )adapter->hw.mac.type) { case 2U: ; case 3U: ; case 5U: ; case 4U: tmp = spinlock_check(& adapter->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp); ns = timecounter_cyc2time(& adapter->tc, systim); spin_unlock_irqrestore(& adapter->tmreg_lock, flags); memset((void *)hwtstamps, 0, 8UL); hwtstamps->hwtstamp = ns_to_ktime(ns); goto ldv_48354; case 6U: ; case 7U: memset((void *)hwtstamps, 0, 8UL); hwtstamps->hwtstamp = ktime_set((s64 const )(systim >> 32), (unsigned long const )systim & 4294967295UL); goto ldv_48354; default: ; goto ldv_48354; } ldv_48354: ; return; } } static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp , s32 ppb ) { struct igb_adapter *igb ; struct ptp_clock_info const *__mptr ; struct e1000_hw *hw ; int neg_adj ; u64 rate ; u32 incvalue ; u8 *hw_addr ; u8 *__var ; long tmp ; { __mptr = (struct ptp_clock_info const *)ptp; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf80UL; hw = & igb->hw; neg_adj = 0; if (ppb < 0) { neg_adj = 1; ppb = - ppb; } else { } rate = (u64 )ppb; rate = rate << 14; rate = div_u64(rate, 1953125U); incvalue = 8388608U; if (neg_adj != 0) { incvalue = incvalue - (u32 )rate; } else { incvalue = (u32 )rate + incvalue; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel((incvalue & 16777215U) | 16777216U, (void volatile *)hw_addr + 46600U); } else { } return (0); } } static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp , s32 ppb ) { struct igb_adapter *igb ; struct ptp_clock_info const *__mptr ; struct e1000_hw *hw ; int neg_adj ; u64 rate ; u32 inca ; u8 *hw_addr ; u8 *__var ; long tmp ; { __mptr = (struct ptp_clock_info const *)ptp; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf80UL; hw = & igb->hw; neg_adj = 0; if (ppb < 0) { neg_adj = 1; ppb = - ppb; } else { } rate = (u64 )ppb; rate = rate << 26; rate = div_u64(rate, 1953125U); inca = (u32 )rate & 2147483647U; if (neg_adj != 0) { inca = inca | 2147483648U; } else { } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(inca, (void volatile *)hw_addr + 46600U); } else { } return (0); } } static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp , s64 delta ) { struct igb_adapter *igb ; struct ptp_clock_info const *__mptr ; unsigned long flags ; raw_spinlock_t *tmp ; { __mptr = (struct ptp_clock_info const *)ptp; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf80UL; tmp = spinlock_check(& igb->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp); timecounter_adjtime(& igb->tc, delta); spin_unlock_irqrestore(& igb->tmreg_lock, flags); return (0); } } static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp , s64 delta ) { struct igb_adapter *igb ; struct ptp_clock_info const *__mptr ; unsigned long flags ; struct timespec now ; struct timespec then ; struct timespec tmp ; raw_spinlock_t *tmp___0 ; { __mptr = (struct ptp_clock_info const *)ptp; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf80UL; tmp = ns_to_timespec(delta); then = tmp; tmp___0 = spinlock_check(& igb->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp___0); igb_ptp_read_i210(igb, & now); now = timespec_add(now, then); igb_ptp_write_i210(igb, (struct timespec const *)(& now)); spin_unlock_irqrestore(& igb->tmreg_lock, flags); return (0); } } static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp , struct timespec *ts ) { struct igb_adapter *igb ; struct ptp_clock_info const *__mptr ; unsigned long flags ; u64 ns ; raw_spinlock_t *tmp ; { __mptr = (struct ptp_clock_info const *)ptp; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf80UL; tmp = spinlock_check(& igb->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp); ns = timecounter_read(& igb->tc); spin_unlock_irqrestore(& igb->tmreg_lock, flags); *ts = ns_to_timespec((s64 const )ns); return (0); } } static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp , struct timespec *ts ) { struct igb_adapter *igb ; struct ptp_clock_info const *__mptr ; unsigned long flags ; raw_spinlock_t *tmp ; { __mptr = (struct ptp_clock_info const *)ptp; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf80UL; tmp = spinlock_check(& igb->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp); igb_ptp_read_i210(igb, ts); spin_unlock_irqrestore(& igb->tmreg_lock, flags); return (0); } } static int igb_ptp_settime_82576(struct ptp_clock_info *ptp , struct timespec const *ts ) { struct igb_adapter *igb ; struct ptp_clock_info const *__mptr ; unsigned long flags ; u64 ns ; s64 tmp ; raw_spinlock_t *tmp___0 ; { __mptr = (struct ptp_clock_info const *)ptp; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf80UL; tmp = timespec_to_ns(ts); ns = (u64 )tmp; tmp___0 = spinlock_check(& igb->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp___0); timecounter_init(& igb->tc, (struct cyclecounter const *)(& igb->cc), ns); spin_unlock_irqrestore(& igb->tmreg_lock, flags); return (0); } } static int igb_ptp_settime_i210(struct ptp_clock_info *ptp , struct timespec const *ts ) { struct igb_adapter *igb ; struct ptp_clock_info const *__mptr ; unsigned long flags ; raw_spinlock_t *tmp ; { __mptr = (struct ptp_clock_info const *)ptp; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf80UL; tmp = spinlock_check(& igb->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp); igb_ptp_write_i210(igb, ts); spin_unlock_irqrestore(& igb->tmreg_lock, flags); return (0); } } static void igb_pin_direction(int pin , int input , u32 *ctrl , u32 *ctrl_ext ) { u32 *ptr ; u32 mask[4U] ; { ptr = pin <= 1 ? ctrl : ctrl_ext; mask[0] = 4194304U; mask[1] = 8388608U; mask[2] = 1024U; mask[3] = 2048U; if (input != 0) { *ptr = *ptr & ~ mask[pin]; } else { *ptr = *ptr | mask[pin]; } return; } } static void igb_pin_extts(struct igb_adapter *igb , int chan , int pin ) { u32 aux0_sel_sdp[4U] ; u32 aux1_sel_sdp[4U] ; u32 ts_sdp_en[4U] ; struct e1000_hw *hw ; u32 ctrl ; u32 ctrl_ext ; u32 tssdp ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; { aux0_sel_sdp[0] = 0U; aux0_sel_sdp[1] = 1U; aux0_sel_sdp[2] = 2U; aux0_sel_sdp[3] = 3U; aux1_sel_sdp[0] = 0U; aux1_sel_sdp[1] = 8U; aux1_sel_sdp[2] = 16U; aux1_sel_sdp[3] = 24U; ts_sdp_en[0] = 256U; ts_sdp_en[1] = 2048U; ts_sdp_en[2] = 16384U; ts_sdp_en[3] = 131072U; hw = & igb->hw; tssdp = 0U; ctrl = igb_rd32(hw, 0U); ctrl_ext = igb_rd32(hw, 24U); tssdp = igb_rd32(hw, 60U); igb_pin_direction(pin, 1, & ctrl, & ctrl_ext); tssdp = ~ ts_sdp_en[pin] & tssdp; if (chan == 1) { tssdp = tssdp & 4294967271U; tssdp = (aux1_sel_sdp[pin] | tssdp) | 32U; } else { tssdp = tssdp & 4294967292U; tssdp = (aux0_sel_sdp[pin] | tssdp) | 4U; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(tssdp, (void volatile *)hw_addr + 60U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(ctrl, (void volatile *)hw_addr___0); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(ctrl_ext, (void volatile *)hw_addr___1 + 24U); } else { } return; } } static void igb_pin_perout(struct igb_adapter *igb , int chan , int pin ) { u32 aux0_sel_sdp[4U] ; u32 aux1_sel_sdp[4U] ; u32 ts_sdp_en[4U] ; u32 ts_sdp_sel_tt0[4U] ; u32 ts_sdp_sel_tt1[4U] ; u32 ts_sdp_sel_clr[4U] ; struct e1000_hw *hw ; u32 ctrl ; u32 ctrl_ext ; u32 tssdp ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; { aux0_sel_sdp[0] = 0U; aux0_sel_sdp[1] = 1U; aux0_sel_sdp[2] = 2U; aux0_sel_sdp[3] = 3U; aux1_sel_sdp[0] = 0U; aux1_sel_sdp[1] = 8U; aux1_sel_sdp[2] = 16U; aux1_sel_sdp[3] = 24U; ts_sdp_en[0] = 256U; ts_sdp_en[1] = 2048U; ts_sdp_en[2] = 16384U; ts_sdp_en[3] = 131072U; ts_sdp_sel_tt0[0] = 0U; ts_sdp_sel_tt0[1] = 0U; ts_sdp_sel_tt0[2] = 0U; ts_sdp_sel_tt0[3] = 0U; ts_sdp_sel_tt1[0] = 64U; ts_sdp_sel_tt1[1] = 512U; ts_sdp_sel_tt1[2] = 4096U; ts_sdp_sel_tt1[3] = 32768U; ts_sdp_sel_clr[0] = 192U; ts_sdp_sel_clr[1] = 1536U; ts_sdp_sel_clr[2] = 12288U; ts_sdp_sel_clr[3] = 98304U; hw = & igb->hw; tssdp = 0U; ctrl = igb_rd32(hw, 0U); ctrl_ext = igb_rd32(hw, 24U); tssdp = igb_rd32(hw, 60U); igb_pin_direction(pin, 0, & ctrl, & ctrl_ext); if ((tssdp & 3U) == aux0_sel_sdp[pin]) { tssdp = tssdp & 4294967291U; } else { } if ((tssdp & 24U) == aux1_sel_sdp[pin]) { tssdp = tssdp & 4294967263U; } else { } tssdp = ~ ts_sdp_sel_clr[pin] & tssdp; if (chan == 1) { tssdp = ts_sdp_sel_tt1[pin] | tssdp; } else { tssdp = ts_sdp_sel_tt0[pin] | tssdp; } tssdp = ts_sdp_en[pin] | tssdp; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(tssdp, (void volatile *)hw_addr + 60U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(ctrl, (void volatile *)hw_addr___0); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(ctrl_ext, (void volatile *)hw_addr___1 + 24U); } else { } return; } } static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp , struct ptp_clock_request *rq , int on ) { struct igb_adapter *igb ; struct ptp_clock_info const *__mptr ; struct e1000_hw *hw ; u32 tsauxc ; u32 tsim ; u32 tsauxc_mask ; u32 tsim_mask ; u32 trgttiml ; u32 trgttimh ; unsigned long flags ; struct timespec ts ; int pin ; s64 ns ; raw_spinlock_t *tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; raw_spinlock_t *tmp___2 ; int i ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___3 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___4 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___5 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___6 ; raw_spinlock_t *tmp___7 ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___8 ; { __mptr = (struct ptp_clock_info const *)ptp; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf80UL; hw = & igb->hw; pin = -1; switch ((unsigned int )rq->type) { case 0U: ; if (on != 0) { pin = ptp_find_pin(igb->ptp_clock, 1, rq->__annonCompField115.extts.index); if (pin < 0) { return (-16); } else { } } else { } if (rq->__annonCompField115.extts.index == 1U) { tsauxc_mask = 1024U; tsim_mask = 64U; } else { tsauxc_mask = 256U; tsim_mask = 32U; } tmp = spinlock_check(& igb->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp); tsauxc = igb_rd32(hw, 46656U); tsim = igb_rd32(hw, 46708U); if (on != 0) { igb_pin_extts(igb, (int )rq->__annonCompField115.extts.index, pin); tsauxc = tsauxc | tsauxc_mask; tsim = tsim | tsim_mask; } else { tsauxc = ~ tsauxc_mask & tsauxc; tsim = ~ tsim_mask & tsim; } __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(tsauxc, (void volatile *)hw_addr + 46656U); } else { } __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(tsim, (void volatile *)hw_addr___0 + 46708U); } else { } spin_unlock_irqrestore(& igb->tmreg_lock, flags); return (0); case 1U: ; if (on != 0) { pin = ptp_find_pin(igb->ptp_clock, 2, rq->__annonCompField115.perout.index); if (pin < 0) { return (-16); } else { } } else { } ts.tv_sec = (__kernel_time_t )rq->__annonCompField115.perout.period.sec; ts.tv_nsec = (long )rq->__annonCompField115.perout.period.nsec; ns = timespec_to_ns((struct timespec const *)(& ts)); ns = ns >> 1; if (on != 0 && ns <= 499999LL) { return (-22); } else { } ts = ns_to_timespec(ns); if (rq->__annonCompField115.perout.index == 1U) { tsauxc_mask = 2U; tsim_mask = 16U; trgttiml = 46668U; trgttimh = 46672U; } else { tsauxc_mask = 1U; tsim_mask = 8U; trgttiml = 46660U; trgttimh = 46664U; } tmp___2 = spinlock_check(& igb->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp___2); tsauxc = igb_rd32(hw, 46656U); tsim = igb_rd32(hw, 46708U); if (on != 0) { i = (int )rq->__annonCompField115.perout.index; igb_pin_perout(igb, i, pin); igb->perout[i].start.tv_sec = (__kernel_time_t )rq->__annonCompField115.perout.start.sec; igb->perout[i].start.tv_nsec = (long )rq->__annonCompField115.perout.start.nsec; igb->perout[i].period.tv_sec = ts.tv_sec; igb->perout[i].period.tv_nsec = ts.tv_nsec; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel((unsigned int )rq->__annonCompField115.perout.start.sec, (void volatile *)hw_addr___1 + (unsigned long )trgttimh); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(rq->__annonCompField115.perout.start.nsec, (void volatile *)hw_addr___2 + (unsigned long )trgttiml); } else { } tsauxc = tsauxc | tsauxc_mask; tsim = tsim | tsim_mask; } else { tsauxc = ~ tsauxc_mask & tsauxc; tsim = ~ tsim_mask & tsim; } __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(tsauxc, (void volatile *)hw_addr___3 + 46656U); } else { } __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(tsim, (void volatile *)hw_addr___4 + 46708U); } else { } spin_unlock_irqrestore(& igb->tmreg_lock, flags); return (0); case 2U: tmp___7 = spinlock_check(& igb->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp___7); tsim = igb_rd32(hw, 46708U); if (on != 0) { tsim = tsim | 1U; } else { tsim = tsim & 4294967294U; } __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___8 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___8 == 0L) { writel(tsim, (void volatile *)hw_addr___5 + 46708U); } else { } spin_unlock_irqrestore(& igb->tmreg_lock, flags); return (0); } return (-95); } } static int igb_ptp_feature_enable(struct ptp_clock_info *ptp , struct ptp_clock_request *rq , int on ) { { return (-95); } } static int igb_ptp_verify_pin(struct ptp_clock_info *ptp , unsigned int pin , enum ptp_pin_function func , unsigned int chan ) { { switch ((unsigned int )func) { case 0U: ; case 1U: ; case 2U: ; goto ldv_48576; case 3U: ; return (-1); } ldv_48576: ; return (0); } } static void igb_ptp_tx_work(struct work_struct *work ) { struct igb_adapter *adapter ; struct work_struct const *__mptr ; struct e1000_hw *hw ; u32 tsynctxctl ; { __mptr = (struct work_struct const *)work; adapter = (struct igb_adapter *)__mptr + 0xffffffffffffce38UL; hw = & adapter->hw; if ((unsigned long )adapter->ptp_tx_skb == (unsigned long )((struct sk_buff *)0)) { return; } else { } if ((long )((adapter->ptp_tx_start - (unsigned long )jiffies) + 3750UL) < 0L) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = (struct sk_buff *)0; clear_bit_unlock(3L, (unsigned long volatile *)(& adapter->state)); adapter->tx_hwtstamp_timeouts = adapter->tx_hwtstamp_timeouts + 1U; dev_warn((struct device const *)(& (adapter->pdev)->dev), "clearing Tx timestamp hang\n"); return; } else { } tsynctxctl = igb_rd32(hw, 46612U); if ((int )tsynctxctl & 1) { igb_ptp_tx_hwtstamp(adapter); } else { schedule_work___0(& adapter->ptp_tx_work); } return; } } static void igb_ptp_overflow_check(struct work_struct *work ) { struct igb_adapter *igb ; struct work_struct const *__mptr ; struct timespec ts ; struct _ddebug descriptor ; long tmp ; { __mptr = (struct work_struct const *)work; igb = (struct igb_adapter *)__mptr + 0xffffffffffffcf18UL; (*(igb->ptp_caps.gettime64))(& igb->ptp_caps, & ts); descriptor.modname = "igb"; descriptor.function = "igb_ptp_overflow_check"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/11386/dscv_tempdir/dscv/ri/32_7a/drivers/net/ethernet/intel/igb/igb_ptp.c"; descriptor.format = "igb overflow check at %lld.%09lu\n"; descriptor.lineno = 633U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "igb overflow check at %lld.%09lu\n", (long long )ts.tv_sec, ts.tv_nsec); } else { } schedule_delayed_work(& igb->ptp_overflow_work, 135000UL); return; } } void igb_ptp_rx_hang(struct igb_adapter *adapter ) { struct e1000_hw *hw ; u32 tsyncrxctl ; u32 tmp ; unsigned long rx_event ; { hw = & adapter->hw; tmp = igb_rd32(hw, 46624U); tsyncrxctl = tmp; if ((unsigned int )hw->mac.type != 2U) { return; } else { } if ((tsyncrxctl & 1U) == 0U) { adapter->last_rx_ptp_check = jiffies; return; } else { } rx_event = adapter->last_rx_ptp_check; if ((long )(rx_event - adapter->last_rx_timestamp) < 0L) { rx_event = adapter->last_rx_timestamp; } else { } if ((long )((rx_event - (unsigned long )jiffies) + 1250UL) < 0L) { igb_rd32(hw, 46632U); adapter->last_rx_ptp_check = jiffies; adapter->rx_hwtstamp_cleared = adapter->rx_hwtstamp_cleared + 1U; dev_warn((struct device const *)(& (adapter->pdev)->dev), "clearing Rx timestamp hang\n"); } else { } return; } } static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter ) { struct e1000_hw *hw ; struct skb_shared_hwtstamps shhwtstamps ; u64 regval ; u32 tmp ; u32 tmp___0 ; { hw = & adapter->hw; tmp = igb_rd32(hw, 46616U); regval = (u64 )tmp; tmp___0 = igb_rd32(hw, 46620U); regval = ((unsigned long long )tmp___0 << 32) | regval; igb_ptp_systim_to_hwtstamp(adapter, & shhwtstamps, regval); skb_tstamp_tx(adapter->ptp_tx_skb, & shhwtstamps); dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = (struct sk_buff *)0; clear_bit_unlock(3L, (unsigned long volatile *)(& adapter->state)); return; } } void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector , unsigned char *va , struct sk_buff *skb ) { __le64 *regval ; struct skb_shared_hwtstamps *tmp ; { regval = (__le64 *)va; tmp = skb_hwtstamps(skb); igb_ptp_systim_to_hwtstamp(q_vector->adapter, tmp, *(regval + 1UL)); return; } } void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector , struct sk_buff *skb ) { struct igb_adapter *adapter ; struct e1000_hw *hw ; u64 regval ; u32 tmp ; u32 tmp___0 ; u32 tmp___1 ; struct skb_shared_hwtstamps *tmp___2 ; { adapter = q_vector->adapter; hw = & adapter->hw; tmp = igb_rd32(hw, 46624U); if ((tmp & 1U) == 0U) { return; } else { } tmp___0 = igb_rd32(hw, 46628U); regval = (u64 )tmp___0; tmp___1 = igb_rd32(hw, 46632U); regval = ((unsigned long long )tmp___1 << 32) | regval; tmp___2 = skb_hwtstamps(skb); igb_ptp_systim_to_hwtstamp(adapter, tmp___2, regval); adapter->last_rx_timestamp = jiffies; return; } } int igb_ptp_get_ts_config(struct net_device *netdev , struct ifreq *ifr ) { struct igb_adapter *adapter ; void *tmp ; struct hwtstamp_config *config ; unsigned long tmp___0 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; config = & adapter->tstamp_config; tmp___0 = copy_to_user(ifr->ifr_ifru.ifru_data, (void const *)config, 12UL); return (tmp___0 != 0UL ? -14 : 0); } } static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter , struct hwtstamp_config *config ) { struct e1000_hw *hw ; u32 tsync_tx_ctl ; u32 tsync_rx_ctl ; u32 tsync_rx_cfg ; bool is_l4 ; bool is_l2 ; u32 regval ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___2 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___3 ; u8 *hw_addr___4 ; u8 *__var___4 ; long tmp___4 ; u32 ftqf ; u8 *hw_addr___5 ; u8 *__var___5 ; long tmp___5 ; u8 *hw_addr___6 ; u8 *__var___6 ; long tmp___6 ; u8 *hw_addr___7 ; u8 *__var___7 ; long tmp___7 ; u8 *hw_addr___8 ; u8 *__var___8 ; long tmp___8 ; u8 *hw_addr___9 ; u8 *__var___9 ; long tmp___9 ; { hw = & adapter->hw; tsync_tx_ctl = 16U; tsync_rx_ctl = 16U; tsync_rx_cfg = 0U; is_l4 = 0; is_l2 = 0; if (config->flags != 0) { return (-22); } else { } switch (config->tx_type) { case 0: tsync_tx_ctl = 0U; case 1: ; goto ldv_48657; default: ; return (-34); } ldv_48657: ; switch (config->rx_filter) { case 0: tsync_rx_ctl = 0U; goto ldv_48660; case 4: tsync_rx_ctl = tsync_rx_ctl | 2U; tsync_rx_cfg = 0U; is_l4 = 1; goto ldv_48660; case 5: tsync_rx_ctl = tsync_rx_ctl | 2U; tsync_rx_cfg = 1U; is_l4 = 1; goto ldv_48660; case 12: ; case 9: ; case 6: ; case 13: ; case 10: ; case 7: ; case 14: ; case 11: ; case 8: tsync_rx_ctl = tsync_rx_ctl | 10U; config->rx_filter = 12; is_l2 = 1; is_l4 = 1; goto ldv_48660; case 3: ; case 1: ; if ((unsigned int )hw->mac.type != 2U) { tsync_rx_ctl = tsync_rx_ctl | 8U; config->rx_filter = 1; goto ldv_48660; } else { } default: config->rx_filter = 0; return (-34); } ldv_48660: ; if ((unsigned int )hw->mac.type == 1U) { if ((tsync_rx_ctl | tsync_tx_ctl) != 0U) { return (-22); } else { } return (0); } else { } if ((unsigned int )hw->mac.type > 2U && tsync_rx_ctl != 0U) { tsync_rx_ctl = 16U; tsync_rx_ctl = tsync_rx_ctl | 8U; config->rx_filter = 1; is_l2 = 1; is_l4 = 1; if ((unsigned int )hw->mac.type == 6U || (unsigned int )hw->mac.type == 7U) { regval = igb_rd32(hw, 9220U); regval = regval | 2147483648U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(regval, (void volatile *)hw_addr + 9220U); } else { } } else { } } else { } regval = igb_rd32(hw, 46612U); regval = regval & 4294967279U; regval = regval | tsync_tx_ctl; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(regval, (void volatile *)hw_addr___0 + 46612U); } else { } regval = igb_rd32(hw, 46624U); regval = regval & 4294967265U; regval = regval | tsync_rx_ctl; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(regval, (void volatile *)hw_addr___1 + 46624U); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(tsync_rx_cfg, (void volatile *)hw_addr___2 + 24400U); } else { } if ((int )is_l2) { __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(1140885751U, (void volatile *)hw_addr___3 + 23740U); } else { } } else { __var___4 = (u8 *)0U; hw_addr___4 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___4 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(0U, (void volatile *)hw_addr___4 + 23740U); } else { } } if ((int )is_l4) { ftqf = 4160782353U; ftqf = ftqf & 4026531839U; __var___5 = (u8 *)0U; hw_addr___5 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___5 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(16129U, (void volatile *)hw_addr___5 + 23180U); } else { } __var___6 = (u8 *)0U; hw_addr___6 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___6 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(528384U, (void volatile *)hw_addr___6 + 23212U); } else { } if ((unsigned int )hw->mac.type == 2U) { __var___7 = (u8 *)0U; hw_addr___7 = *((u8 * volatile *)(& hw->hw_addr)); tmp___7 = ldv__builtin_expect((unsigned long )hw_addr___7 == (unsigned long )((u8 *)0U), 0L); if (tmp___7 == 0L) { writel(16129U, (void volatile *)hw_addr___7 + 22988U); } else { } ftqf = ftqf & 2147483647U; } else { } __var___8 = (u8 *)0U; hw_addr___8 = *((u8 * volatile *)(& hw->hw_addr)); tmp___8 = ldv__builtin_expect((unsigned long )hw_addr___8 == (unsigned long )((u8 *)0U), 0L); if (tmp___8 == 0L) { writel(ftqf, (void volatile *)hw_addr___8 + 23020U); } else { } } else { __var___9 = (u8 *)0U; hw_addr___9 = *((u8 * volatile *)(& hw->hw_addr)); tmp___9 = ldv__builtin_expect((unsigned long )hw_addr___9 == (unsigned long )((u8 *)0U), 0L); if (tmp___9 == 0L) { writel(4026531840U, (void volatile *)hw_addr___9 + 23020U); } else { } } igb_rd32(hw, 8U); regval = igb_rd32(hw, 46616U); regval = igb_rd32(hw, 46620U); regval = igb_rd32(hw, 46628U); regval = igb_rd32(hw, 46632U); return (0); } } int igb_ptp_set_ts_config(struct net_device *netdev , struct ifreq *ifr ) { struct igb_adapter *adapter ; void *tmp ; struct hwtstamp_config config ; int err ; unsigned long tmp___0 ; unsigned long tmp___1 ; { tmp = netdev_priv((struct net_device const *)netdev); adapter = (struct igb_adapter *)tmp; tmp___0 = copy_from_user((void *)(& config), (void const *)ifr->ifr_ifru.ifru_data, 12UL); if (tmp___0 != 0UL) { return (-14); } else { } err = igb_ptp_set_timestamp_mode(adapter, & config); if (err != 0) { return (err); } else { } memcpy((void *)(& adapter->tstamp_config), (void const *)(& config), 12UL); tmp___1 = copy_to_user(ifr->ifr_ifru.ifru_data, (void const *)(& config), 12UL); return (tmp___1 != 0UL ? -14 : 0); } } void igb_ptp_init(struct igb_adapter *adapter ) { struct e1000_hw *hw ; struct net_device *netdev ; int i ; u8 *hw_addr ; u8 *__var ; long tmp ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___0 ; struct ptp_pin_desc *ppd ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___1 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_0 ; struct timespec ts ; ktime_t tmp___2 ; struct timespec tmp___3 ; ktime_t tmp___4 ; struct lock_class_key __key___1 ; atomic_long_t __constr_expr_1 ; struct lock_class_key __key___2 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___5 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___6 ; bool tmp___7 ; { hw = & adapter->hw; netdev = adapter->netdev; switch ((unsigned int )hw->mac.type) { case 2U: snprintf((char *)(& adapter->ptp_caps.name), 16UL, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = & __this_module; adapter->ptp_caps.max_adj = 999999881; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = & igb_ptp_adjfreq_82576; adapter->ptp_caps.adjtime = & igb_ptp_adjtime_82576; adapter->ptp_caps.gettime64 = & igb_ptp_gettime_82576; adapter->ptp_caps.settime64 = & igb_ptp_settime_82576; adapter->ptp_caps.enable = & igb_ptp_feature_enable; adapter->cc.read = & igb_ptp_read_82576; adapter->cc.mask = 0xffffffffffffffffULL; adapter->cc.mult = 1U; adapter->cc.shift = 19U; __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp == 0L) { writel(25165824U, (void volatile *)hw_addr + 46600U); } else { } goto ldv_48726; case 3U: ; case 5U: ; case 4U: snprintf((char *)(& adapter->ptp_caps.name), 16UL, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = & __this_module; adapter->ptp_caps.max_adj = 62499999; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = & igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = & igb_ptp_adjtime_82576; adapter->ptp_caps.gettime64 = & igb_ptp_gettime_82576; adapter->ptp_caps.settime64 = & igb_ptp_settime_82576; adapter->ptp_caps.enable = & igb_ptp_feature_enable; adapter->cc.read = & igb_ptp_read_82580; adapter->cc.mask = 1099511627775ULL; adapter->cc.mult = 1U; adapter->cc.shift = 0U; __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(0U, (void volatile *)hw_addr___0 + 46656U); } else { } goto ldv_48726; case 6U: ; case 7U: i = 0; goto ldv_48737; ldv_48736: ppd = (struct ptp_pin_desc *)(& adapter->sdp_config) + (unsigned long )i; snprintf((char *)(& ppd->name), 64UL, "SDP%d", i); ppd->index = (unsigned int )i; ppd->func = 0U; i = i + 1; ldv_48737: ; if (i <= 3) { goto ldv_48736; } else { } snprintf((char *)(& adapter->ptp_caps.name), 16UL, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = & __this_module; adapter->ptp_caps.max_adj = 62499999; adapter->ptp_caps.n_ext_ts = 2; adapter->ptp_caps.n_per_out = 2; adapter->ptp_caps.n_pins = 4; adapter->ptp_caps.pps = 1; adapter->ptp_caps.pin_config = (struct ptp_pin_desc *)(& adapter->sdp_config); adapter->ptp_caps.adjfreq = & igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = & igb_ptp_adjtime_i210; adapter->ptp_caps.gettime64 = & igb_ptp_gettime_i210; adapter->ptp_caps.settime64 = & igb_ptp_settime_i210; adapter->ptp_caps.enable = & igb_ptp_feature_enable_i210; adapter->ptp_caps.verify = & igb_ptp_verify_pin; __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(0U, (void volatile *)hw_addr___1 + 46656U); } else { } goto ldv_48726; default: adapter->ptp_clock = (struct ptp_clock *)0; return; } ldv_48726: igb_rd32(hw, 8U); spinlock_check(& adapter->tmreg_lock); __raw_spin_lock_init(& adapter->tmreg_lock.__annonCompField17.rlock, "&(&adapter->tmreg_lock)->rlock", & __key); __init_work(& adapter->ptp_tx_work, 0); __constr_expr_0.counter = 137438953408L; adapter->ptp_tx_work.data = __constr_expr_0; lockdep_init_map(& adapter->ptp_tx_work.lockdep_map, "(&adapter->ptp_tx_work)", & __key___0, 0); INIT_LIST_HEAD(& adapter->ptp_tx_work.entry); adapter->ptp_tx_work.func = & igb_ptp_tx_work; if ((unsigned int )hw->mac.type == 6U || (unsigned int )hw->mac.type == 7U) { tmp___2 = ktime_get_real(); tmp___3 = ns_to_timespec(tmp___2.tv64); ts = tmp___3; igb_ptp_settime_i210(& adapter->ptp_caps, (struct timespec const *)(& ts)); } else { tmp___4 = ktime_get_real(); timecounter_init(& adapter->tc, (struct cyclecounter const *)(& adapter->cc), (u64 )tmp___4.tv64); __init_work(& adapter->ptp_overflow_work.work, 0); __constr_expr_1.counter = 137438953408L; adapter->ptp_overflow_work.work.data = __constr_expr_1; lockdep_init_map(& adapter->ptp_overflow_work.work.lockdep_map, "(&(&adapter->ptp_overflow_work)->work)", & __key___1, 0); INIT_LIST_HEAD(& adapter->ptp_overflow_work.work.entry); adapter->ptp_overflow_work.work.func = & igb_ptp_overflow_check; init_timer_key(& adapter->ptp_overflow_work.timer, 2097152U, "(&(&adapter->ptp_overflow_work)->timer)", & __key___2); adapter->ptp_overflow_work.timer.function = & delayed_work_timer_fn; adapter->ptp_overflow_work.timer.data = (unsigned long )(& adapter->ptp_overflow_work); schedule_delayed_work(& adapter->ptp_overflow_work, 135000UL); } if ((unsigned int )hw->mac.type > 2U) { __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___5 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___5 == 0L) { writel(2U, (void volatile *)hw_addr___2 + 46708U); } else { } __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___6 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___6 == 0L) { writel(524288U, (void volatile *)hw_addr___3 + 208U); } else { } } else { } adapter->tstamp_config.rx_filter = 0; adapter->tstamp_config.tx_type = 0; adapter->ptp_clock = ptp_clock_register(& adapter->ptp_caps, & (adapter->pdev)->dev); tmp___7 = IS_ERR((void const *)adapter->ptp_clock); if ((int )tmp___7) { adapter->ptp_clock = (struct ptp_clock *)0; dev_err((struct device const *)(& (adapter->pdev)->dev), "ptp_clock_register failed\n"); } else { _dev_info((struct device const *)(& (adapter->pdev)->dev), "added PHC on %s\n", (char *)(& (adapter->netdev)->name)); adapter->flags = adapter->flags | 32U; } return; } } void igb_ptp_stop(struct igb_adapter *adapter ) { { switch ((unsigned int )adapter->hw.mac.type) { case 2U: ; case 3U: ; case 5U: ; case 4U: ldv_cancel_delayed_work_sync_309(& adapter->ptp_overflow_work); goto ldv_48763; case 6U: ; case 7U: ; goto ldv_48763; default: ; return; } ldv_48763: ldv_cancel_work_sync_310(& adapter->ptp_tx_work); if ((unsigned long )adapter->ptp_tx_skb != (unsigned long )((struct sk_buff *)0)) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = (struct sk_buff *)0; clear_bit_unlock(3L, (unsigned long volatile *)(& adapter->state)); } else { } if ((unsigned long )adapter->ptp_clock != (unsigned long )((struct ptp_clock *)0)) { ptp_clock_unregister(adapter->ptp_clock); _dev_info((struct device const *)(& (adapter->pdev)->dev), "removed PHC on %s\n", (char *)(& (adapter->netdev)->name)); adapter->flags = adapter->flags & 4294967263U; } else { } return; } } void igb_ptp_reset(struct igb_adapter *adapter ) { struct e1000_hw *hw ; unsigned long flags ; raw_spinlock_t *tmp ; u8 *hw_addr ; u8 *__var ; long tmp___0 ; u8 *hw_addr___0 ; u8 *__var___0 ; long tmp___1 ; u8 *hw_addr___1 ; u8 *__var___1 ; long tmp___2 ; u8 *hw_addr___2 ; u8 *__var___2 ; long tmp___3 ; u8 *hw_addr___3 ; u8 *__var___3 ; long tmp___4 ; struct timespec ts ; ktime_t tmp___5 ; struct timespec tmp___6 ; ktime_t tmp___7 ; { hw = & adapter->hw; if ((adapter->flags & 32U) == 0U) { return; } else { } igb_ptp_set_timestamp_mode(adapter, & adapter->tstamp_config); tmp = spinlock_check(& adapter->tmreg_lock); flags = _raw_spin_lock_irqsave(tmp); switch ((unsigned int )adapter->hw.mac.type) { case 2U: __var = (u8 *)0U; hw_addr = *((u8 * volatile *)(& hw->hw_addr)); tmp___0 = ldv__builtin_expect((unsigned long )hw_addr == (unsigned long )((u8 *)0U), 0L); if (tmp___0 == 0L) { writel(25165824U, (void volatile *)hw_addr + 46600U); } else { } goto ldv_48779; case 3U: ; case 5U: ; case 4U: ; case 6U: ; case 7U: __var___0 = (u8 *)0U; hw_addr___0 = *((u8 * volatile *)(& hw->hw_addr)); tmp___1 = ldv__builtin_expect((unsigned long )hw_addr___0 == (unsigned long )((u8 *)0U), 0L); if (tmp___1 == 0L) { writel(0U, (void volatile *)hw_addr___0 + 46656U); } else { } __var___1 = (u8 *)0U; hw_addr___1 = *((u8 * volatile *)(& hw->hw_addr)); tmp___2 = ldv__builtin_expect((unsigned long )hw_addr___1 == (unsigned long )((u8 *)0U), 0L); if (tmp___2 == 0L) { writel(0U, (void volatile *)hw_addr___1 + 60U); } else { } __var___2 = (u8 *)0U; hw_addr___2 = *((u8 * volatile *)(& hw->hw_addr)); tmp___3 = ldv__builtin_expect((unsigned long )hw_addr___2 == (unsigned long )((u8 *)0U), 0L); if (tmp___3 == 0L) { writel(2U, (void volatile *)hw_addr___2 + 46708U); } else { } __var___3 = (u8 *)0U; hw_addr___3 = *((u8 * volatile *)(& hw->hw_addr)); tmp___4 = ldv__builtin_expect((unsigned long )hw_addr___3 == (unsigned long )((u8 *)0U), 0L); if (tmp___4 == 0L) { writel(524288U, (void volatile *)hw_addr___3 + 208U); } else { } goto ldv_48779; default: ; goto out; } ldv_48779: ; if ((unsigned int )hw->mac.type == 6U || (unsigned int )hw->mac.type == 7U) { tmp___5 = ktime_get_real(); tmp___6 = ns_to_timespec(tmp___5.tv64); ts = tmp___6; igb_ptp_write_i210(adapter, (struct timespec const *)(& ts)); } else { tmp___7 = ktime_get_real(); timecounter_init(& adapter->tc, (struct cyclecounter const *)(& adapter->cc), (u64 )tmp___7.tv64); } out: spin_unlock_irqrestore(& adapter->tmreg_lock, flags); return; } } void call_and_disable_all_11(int state ) { { if (ldv_work_11_0 == state) { call_and_disable_work_11(ldv_work_struct_11_0); } else { } if (ldv_work_11_1 == state) { call_and_disable_work_11(ldv_work_struct_11_1); } else { } if (ldv_work_11_2 == state) { call_and_disable_work_11(ldv_work_struct_11_2); } else { } if (ldv_work_11_3 == state) { call_and_disable_work_11(ldv_work_struct_11_3); } else { } return; } } void call_and_disable_all_12(int state ) { { if (ldv_work_12_0 == state) { call_and_disable_work_12(ldv_work_struct_12_0); } else { } if (ldv_work_12_1 == state) { call_and_disable_work_12(ldv_work_struct_12_1); } else { } if (ldv_work_12_2 == state) { call_and_disable_work_12(ldv_work_struct_12_2); } else { } if (ldv_work_12_3 == state) { call_and_disable_work_12(ldv_work_struct_12_3); } else { } return; } } void invoke_work_11(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_11_0 == 2 || ldv_work_11_0 == 3) { ldv_work_11_0 = 4; igb_ptp_tx_work(ldv_work_struct_11_0); ldv_work_11_0 = 1; } else { } goto ldv_48812; case 1: ; if (ldv_work_11_1 == 2 || ldv_work_11_1 == 3) { ldv_work_11_1 = 4; igb_ptp_tx_work(ldv_work_struct_11_0); ldv_work_11_1 = 1; } else { } goto ldv_48812; case 2: ; if (ldv_work_11_2 == 2 || ldv_work_11_2 == 3) { ldv_work_11_2 = 4; igb_ptp_tx_work(ldv_work_struct_11_0); ldv_work_11_2 = 1; } else { } goto ldv_48812; case 3: ; if (ldv_work_11_3 == 2 || ldv_work_11_3 == 3) { ldv_work_11_3 = 4; igb_ptp_tx_work(ldv_work_struct_11_0); ldv_work_11_3 = 1; } else { } goto ldv_48812; default: ldv_stop(); } ldv_48812: ; return; } } void invoke_work_12(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_12_0 == 2 || ldv_work_12_0 == 3) { ldv_work_12_0 = 4; igb_ptp_overflow_check(ldv_work_struct_12_0); ldv_work_12_0 = 1; } else { } goto ldv_48823; case 1: ; if (ldv_work_12_1 == 2 || ldv_work_12_1 == 3) { ldv_work_12_1 = 4; igb_ptp_overflow_check(ldv_work_struct_12_0); ldv_work_12_1 = 1; } else { } goto ldv_48823; case 2: ; if (ldv_work_12_2 == 2 || ldv_work_12_2 == 3) { ldv_work_12_2 = 4; igb_ptp_overflow_check(ldv_work_struct_12_0); ldv_work_12_2 = 1; } else { } goto ldv_48823; case 3: ; if (ldv_work_12_3 == 2 || ldv_work_12_3 == 3) { ldv_work_12_3 = 4; igb_ptp_overflow_check(ldv_work_struct_12_0); ldv_work_12_3 = 1; } else { } goto ldv_48823; default: ldv_stop(); } ldv_48823: ; return; } } void call_and_disable_work_12(struct work_struct *work ) { { if ((ldv_work_12_0 == 2 || ldv_work_12_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_0) { igb_ptp_overflow_check(work); ldv_work_12_0 = 1; return; } else { } if ((ldv_work_12_1 == 2 || ldv_work_12_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_1) { igb_ptp_overflow_check(work); ldv_work_12_1 = 1; return; } else { } if ((ldv_work_12_2 == 2 || ldv_work_12_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_2) { igb_ptp_overflow_check(work); ldv_work_12_2 = 1; return; } else { } if ((ldv_work_12_3 == 2 || ldv_work_12_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_3) { igb_ptp_overflow_check(work); ldv_work_12_3 = 1; return; } else { } return; } } void work_init_11(void) { { ldv_work_11_0 = 0; ldv_work_11_1 = 0; ldv_work_11_2 = 0; ldv_work_11_3 = 0; return; } } void activate_work_11(struct work_struct *work , int state ) { { if (ldv_work_11_0 == 0) { ldv_work_struct_11_0 = work; ldv_work_11_0 = state; return; } else { } if (ldv_work_11_1 == 0) { ldv_work_struct_11_1 = work; ldv_work_11_1 = state; return; } else { } if (ldv_work_11_2 == 0) { ldv_work_struct_11_2 = work; ldv_work_11_2 = state; return; } else { } if (ldv_work_11_3 == 0) { ldv_work_struct_11_3 = work; ldv_work_11_3 = state; return; } else { } return; } } void disable_work_11(struct work_struct *work ) { { if ((ldv_work_11_0 == 3 || ldv_work_11_0 == 2) && (unsigned long )ldv_work_struct_11_0 == (unsigned long )work) { ldv_work_11_0 = 1; } else { } if ((ldv_work_11_1 == 3 || ldv_work_11_1 == 2) && (unsigned long )ldv_work_struct_11_1 == (unsigned long )work) { ldv_work_11_1 = 1; } else { } if ((ldv_work_11_2 == 3 || ldv_work_11_2 == 2) && (unsigned long )ldv_work_struct_11_2 == (unsigned long )work) { ldv_work_11_2 = 1; } else { } if ((ldv_work_11_3 == 3 || ldv_work_11_3 == 2) && (unsigned long )ldv_work_struct_11_3 == (unsigned long )work) { ldv_work_11_3 = 1; } else { } return; } } void disable_work_12(struct work_struct *work ) { { if ((ldv_work_12_0 == 3 || ldv_work_12_0 == 2) && (unsigned long )ldv_work_struct_12_0 == (unsigned long )work) { ldv_work_12_0 = 1; } else { } if ((ldv_work_12_1 == 3 || ldv_work_12_1 == 2) && (unsigned long )ldv_work_struct_12_1 == (unsigned long )work) { ldv_work_12_1 = 1; } else { } if ((ldv_work_12_2 == 3 || ldv_work_12_2 == 2) && (unsigned long )ldv_work_struct_12_2 == (unsigned long )work) { ldv_work_12_2 = 1; } else { } if ((ldv_work_12_3 == 3 || ldv_work_12_3 == 2) && (unsigned long )ldv_work_struct_12_3 == (unsigned long )work) { ldv_work_12_3 = 1; } else { } return; } } void activate_work_12(struct work_struct *work , int state ) { { if (ldv_work_12_0 == 0) { ldv_work_struct_12_0 = work; ldv_work_12_0 = state; return; } else { } if (ldv_work_12_1 == 0) { ldv_work_struct_12_1 = work; ldv_work_12_1 = state; return; } else { } if (ldv_work_12_2 == 0) { ldv_work_struct_12_2 = work; ldv_work_12_2 = state; return; } else { } if (ldv_work_12_3 == 0) { ldv_work_struct_12_3 = work; ldv_work_12_3 = state; return; } else { } return; } } void work_init_12(void) { { ldv_work_12_0 = 0; ldv_work_12_1 = 0; ldv_work_12_2 = 0; ldv_work_12_3 = 0; return; } } void call_and_disable_work_11(struct work_struct *work ) { { if ((ldv_work_11_0 == 2 || ldv_work_11_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_0) { igb_ptp_tx_work(work); ldv_work_11_0 = 1; return; } else { } if ((ldv_work_11_1 == 2 || ldv_work_11_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_1) { igb_ptp_tx_work(work); ldv_work_11_1 = 1; return; } else { } if ((ldv_work_11_2 == 2 || ldv_work_11_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_2) { igb_ptp_tx_work(work); ldv_work_11_2 = 1; return; } else { } if ((ldv_work_11_3 == 2 || ldv_work_11_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_3) { igb_ptp_tx_work(work); ldv_work_11_3 = 1; return; } else { } return; } } __inline static bool IS_ERR(void const *ptr ) { bool tmp ; { tmp = ldv_is_err(ptr); return (tmp); } } bool ldv_queue_work_on_297(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_298(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_299(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_300(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_301(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_302(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_303(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_304(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_305(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_306(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_307(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_308(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } bool ldv_cancel_delayed_work_sync_309(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___23 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_11(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_cancel_work_sync_310(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___24 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_11(ldv_func_arg1); return (ldv_func_res); } } long ldv_ptr_err(void const *ptr ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; int ldv_mutex_trylock_337(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_335(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_338(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_339(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_334(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_336(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_340(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_329(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_331(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_330(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_333(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_332(struct workqueue_struct *ldv_func_arg1 ) ; extern void *devm_kmalloc(struct device * , size_t , gfp_t ) ; __inline static void *devm_kzalloc(struct device *dev , size_t size , gfp_t gfp ) { void *tmp ; { tmp = devm_kmalloc(dev, size, gfp | 32768U); return (tmp); } } extern struct i2c_client *i2c_new_device(struct i2c_adapter * , struct i2c_board_info const * ) ; extern struct device *devm_hwmon_device_register_with_groups(struct device * , char const * , void * , struct attribute_group const ** ) ; static struct i2c_board_info i350_sensor_info = {{'i', '3', '5', '0', 'b', 'b', '\000'}, (unsigned short)0, 124U, 0, 0, 0, 0, 0}; static ssize_t igb_hwmon_show_location(struct device *dev , struct device_attribute *attr , char *buf ) { struct hwmon_attr *igb_attr ; struct device_attribute const *__mptr ; int tmp ; { __mptr = (struct device_attribute const *)attr; igb_attr = (struct hwmon_attr *)__mptr; tmp = sprintf(buf, "loc%u\n", (int )(igb_attr->sensor)->location); return ((ssize_t )tmp); } } static ssize_t igb_hwmon_show_temp(struct device *dev , struct device_attribute *attr , char *buf ) { struct hwmon_attr *igb_attr ; struct device_attribute const *__mptr ; unsigned int value ; int tmp ; { __mptr = (struct device_attribute const *)attr; igb_attr = (struct hwmon_attr *)__mptr; (*((igb_attr->hw)->mac.ops.get_thermal_sensor_data))(igb_attr->hw); value = (unsigned int )(igb_attr->sensor)->temp; value = value * 1000U; tmp = sprintf(buf, "%u\n", value); return ((ssize_t )tmp); } } static ssize_t igb_hwmon_show_cautionthresh(struct device *dev , struct device_attribute *attr , char *buf ) { struct hwmon_attr *igb_attr ; struct device_attribute const *__mptr ; unsigned int value ; int tmp ; { __mptr = (struct device_attribute const *)attr; igb_attr = (struct hwmon_attr *)__mptr; value = (unsigned int )(igb_attr->sensor)->caution_thresh; value = value * 1000U; tmp = sprintf(buf, "%u\n", value); return ((ssize_t )tmp); } } static ssize_t igb_hwmon_show_maxopthresh(struct device *dev , struct device_attribute *attr , char *buf ) { struct hwmon_attr *igb_attr ; struct device_attribute const *__mptr ; unsigned int value ; int tmp ; { __mptr = (struct device_attribute const *)attr; igb_attr = (struct hwmon_attr *)__mptr; value = (unsigned int )(igb_attr->sensor)->max_op_thresh; value = value * 1000U; tmp = sprintf(buf, "%u\n", value); return ((ssize_t )tmp); } } static int igb_add_hwmon_attr(struct igb_adapter *adapter , unsigned int offset , int type ) { int rc ; unsigned int n_attr ; struct hwmon_attr *igb_attr ; struct lock_class_key __key ; { n_attr = (adapter->igb_hwmon_buff)->n_hwmon; igb_attr = (struct hwmon_attr *)(& (adapter->igb_hwmon_buff)->hwmon_list) + (unsigned long )n_attr; switch (type) { case 0: igb_attr->dev_attr.show = & igb_hwmon_show_location; snprintf((char *)(& igb_attr->name), 12UL, "temp%u_label", offset + 1U); goto ldv_48277; case 1: igb_attr->dev_attr.show = & igb_hwmon_show_temp; snprintf((char *)(& igb_attr->name), 12UL, "temp%u_input", offset + 1U); goto ldv_48277; case 2: igb_attr->dev_attr.show = & igb_hwmon_show_cautionthresh; snprintf((char *)(& igb_attr->name), 12UL, "temp%u_max", offset + 1U); goto ldv_48277; case 3: igb_attr->dev_attr.show = & igb_hwmon_show_maxopthresh; snprintf((char *)(& igb_attr->name), 12UL, "temp%u_crit", offset + 1U); goto ldv_48277; default: rc = -1; return (rc); } ldv_48277: igb_attr->sensor = (struct e1000_thermal_diode_data *)(& adapter->hw.mac.thermal_sensor_data.sensor) + (unsigned long )offset; igb_attr->hw = & adapter->hw; igb_attr->dev_attr.store = (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0; igb_attr->dev_attr.attr.mode = 292U; igb_attr->dev_attr.attr.name = (char const *)(& igb_attr->name); igb_attr->dev_attr.attr.key = & __key; (adapter->igb_hwmon_buff)->attrs[n_attr] = & igb_attr->dev_attr.attr; (adapter->igb_hwmon_buff)->n_hwmon = (adapter->igb_hwmon_buff)->n_hwmon + 1U; return (0); } } static void igb_sysfs_del_adapter(struct igb_adapter *adapter ) { { return; } } void igb_sysfs_exit(struct igb_adapter *adapter ) { { igb_sysfs_del_adapter(adapter); return; } } int igb_sysfs_init(struct igb_adapter *adapter ) { struct hwmon_buff *igb_hwmon ; struct i2c_client *client ; struct device *hwmon_dev ; unsigned int i ; int rc ; void *tmp ; long tmp___0 ; bool tmp___1 ; { rc = 0; if ((unsigned long )adapter->hw.mac.ops.init_thermal_sensor_thresh == (unsigned long )((s32 (*)(struct e1000_hw * ))0)) { goto exit; } else { } rc = (*(adapter->hw.mac.ops.init_thermal_sensor_thresh))(& adapter->hw); if (rc != 0) { goto exit; } else { } tmp = devm_kzalloc(& (adapter->pdev)->dev, 1120UL, 208U); igb_hwmon = (struct hwmon_buff *)tmp; if ((unsigned long )igb_hwmon == (unsigned long )((struct hwmon_buff *)0)) { rc = -12; goto exit; } else { } adapter->igb_hwmon_buff = igb_hwmon; i = 0U; goto ldv_48300; ldv_48299: ; if ((unsigned int )adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0U) { goto ldv_48298; } else { } rc = igb_add_hwmon_attr(adapter, i, 2); if (rc != 0) { goto exit; } else { } rc = igb_add_hwmon_attr(adapter, i, 0); if (rc != 0) { goto exit; } else { } rc = igb_add_hwmon_attr(adapter, i, 1); if (rc != 0) { goto exit; } else { } rc = igb_add_hwmon_attr(adapter, i, 3); if (rc != 0) { goto exit; } else { } ldv_48298: i = i + 1U; ldv_48300: ; if (i <= 2U) { goto ldv_48299; } else { } client = i2c_new_device(& adapter->i2c_adap, (struct i2c_board_info const *)(& i350_sensor_info)); if ((unsigned long )client == (unsigned long )((struct i2c_client *)0)) { _dev_info((struct device const *)(& (adapter->pdev)->dev), "Failed to create new i2c device.\n"); rc = -19; goto exit; } else { } adapter->i2c_client = client; igb_hwmon->groups[0] = (struct attribute_group const *)(& igb_hwmon->group); igb_hwmon->group.attrs = (struct attribute **)(& igb_hwmon->attrs); hwmon_dev = devm_hwmon_device_register_with_groups(& (adapter->pdev)->dev, (char const *)(& client->name), (void *)igb_hwmon, (struct attribute_group const **)(& igb_hwmon->groups)); tmp___1 = IS_ERR((void const *)hwmon_dev); if ((int )tmp___1) { tmp___0 = PTR_ERR((void const *)hwmon_dev); rc = (int )tmp___0; goto err; } else { } goto exit; err: igb_sysfs_del_adapter(adapter); exit: ; return (rc); } } __inline static long PTR_ERR(void const *ptr ) { long tmp ; { tmp = ldv_ptr_err(ptr); return (tmp); } } bool ldv_queue_work_on_329(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_330(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_331(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_11(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_332(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_11(2); return; } } bool ldv_queue_delayed_work_on_333(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_11(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_334(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_335(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_336(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_337(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_338(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_339(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_340(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static void ldv_error(void) { { ERROR: ; __VERIFIER_error(); } } __inline static int ldv_undef_int_negative(void) { int ret ; int tmp ; { tmp = ldv_undef_int(); ret = tmp; if (ret >= 0) { ldv_stop(); } else { } return (ret); } } bool ldv_is_err(void const *ptr ) { { return ((unsigned long )ptr > 2012UL); } } void *ldv_err_ptr(long error ) { { return ((void *)(2012L - error)); } } long ldv_ptr_err(void const *ptr ) { { return ((long )(2012UL - (unsigned long )ptr)); } } bool ldv_is_err_or_null(void const *ptr ) { bool tmp ; int tmp___0 ; { if ((unsigned long )ptr == (unsigned long )((void const *)0)) { tmp___0 = 1; } else { tmp = ldv_is_err(ptr); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((bool )tmp___0); } } static int ldv_mutex_i_mutex_of_inode = 1; int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_i_mutex_of_inode = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_i_mutex_of_inode = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock ) { { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } ldv_mutex_i_mutex_of_inode = 2; return; } } int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_i_mutex_of_inode = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_i_mutex_of_inode = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_i_mutex_of_inode == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock ) { { if (ldv_mutex_i_mutex_of_inode != 2) { ldv_error(); } else { } ldv_mutex_i_mutex_of_inode = 1; return; } } void ldv_usb_lock_device_i_mutex_of_inode(void) { { ldv_mutex_lock_i_mutex_of_inode((struct mutex *)0); return; } } int ldv_usb_trylock_device_i_mutex_of_inode(void) { int tmp ; { tmp = ldv_mutex_trylock_i_mutex_of_inode((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_i_mutex_of_inode(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_i_mutex_of_inode((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_i_mutex_of_inode(void) { { ldv_mutex_unlock_i_mutex_of_inode((struct mutex *)0); return; } } static int ldv_mutex_lock = 1; int ldv_mutex_lock_interruptible_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_lock = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_lock = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_lock(struct mutex *lock ) { { if (ldv_mutex_lock != 1) { ldv_error(); } else { } ldv_mutex_lock = 2; return; } } int ldv_mutex_trylock_lock(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_lock != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_lock = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_lock != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_lock = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_lock(struct mutex *lock ) { { if (ldv_mutex_lock != 2) { ldv_error(); } else { } ldv_mutex_lock = 1; return; } } void ldv_usb_lock_device_lock(void) { { ldv_mutex_lock_lock((struct mutex *)0); return; } } int ldv_usb_trylock_device_lock(void) { int tmp ; { tmp = ldv_mutex_trylock_lock((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_lock(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_lock((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_lock(void) { { ldv_mutex_unlock_lock((struct mutex *)0); return; } } static int ldv_mutex_mutex_of_device = 1; int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_mutex_of_device = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_mutex_of_device = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_mutex_of_device(struct mutex *lock ) { { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } ldv_mutex_mutex_of_device = 2; return; } } int ldv_mutex_trylock_mutex_of_device(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_mutex_of_device = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_mutex_of_device = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_device == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_mutex_of_device(struct mutex *lock ) { { if (ldv_mutex_mutex_of_device != 2) { ldv_error(); } else { } ldv_mutex_mutex_of_device = 1; return; } } void ldv_usb_lock_device_mutex_of_device(void) { { ldv_mutex_lock_mutex_of_device((struct mutex *)0); return; } } int ldv_usb_trylock_device_mutex_of_device(void) { int tmp ; { tmp = ldv_mutex_trylock_mutex_of_device((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_mutex_of_device(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_mutex_of_device((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_mutex_of_device(void) { { ldv_mutex_unlock_mutex_of_device((struct mutex *)0); return; } } void ldv_check_final_state(void) { { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } if (ldv_mutex_lock != 1) { ldv_error(); } else { } if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } return; } }