extern void __VERIFIER_error() __attribute__ ((__noreturn__)); /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef unsigned char __u8; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef long __kernel_long_t; typedef unsigned long __kernel_ulong_t; typedef int __kernel_pid_t; typedef __kernel_long_t __kernel_suseconds_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_long_t __kernel_ssize_t; typedef __kernel_long_t __kernel_off_t; typedef long long __kernel_loff_t; typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef __u32 __be32; struct kernel_symbol { unsigned long value ; char const *name ; }; struct module; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef unsigned short umode_t; typedef __kernel_off_t off_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef unsigned long uintptr_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u16 uint16_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef u64 dma_addr_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; typedef unsigned int oom_flags_t; typedef u64 phys_addr_t; typedef phys_addr_t resource_size_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct callback_head { struct callback_head *next ; void (*func)(struct callback_head * ) ; }; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct __anonstruct____missing_field_name_9 { unsigned int a ; unsigned int b ; }; struct __anonstruct____missing_field_name_10 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4 ; struct __anonstruct____missing_field_name_10 __annonCompField5 ; }; struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6 ; }; typedef unsigned long pteval_t; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct __anonstruct_pte_t_11 { pteval_t pte ; }; typedef struct __anonstruct_pte_t_11 pte_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_12 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_12 pgd_t; struct page; typedef struct page *pgtable_t; struct file; struct seq_file; struct thread_struct; struct mm_struct; struct task_struct; struct cpumask; struct paravirt_callee_save { void *func ; }; struct pv_irq_ops { struct paravirt_callee_save save_fl ; struct paravirt_callee_save restore_fl ; struct paravirt_callee_save irq_disable ; struct paravirt_callee_save irq_enable ; void (*safe_halt)(void) ; void (*halt)(void) ; void (*adjust_exception_frame)(void) ; }; struct qspinlock { atomic_t val ; }; typedef struct qspinlock arch_spinlock_t; struct qrwlock { atomic_t cnts ; arch_spinlock_t lock ; }; typedef struct qrwlock arch_rwlock_t; typedef void (*ctor_fn_t)(void); struct device; struct file_operations; struct completion; struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct timespec; struct compat_timespec; struct __anonstruct_futex_16 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_17 { clockid_t clockid ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_18 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion____missing_field_name_15 { struct __anonstruct_futex_16 futex ; struct __anonstruct_nanosleep_17 nanosleep ; struct __anonstruct_poll_18 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion____missing_field_name_15 __annonCompField7 ; }; struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion____missing_field_name_19 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion____missing_field_name_19 __annonCompField8 ; }; struct cpumask { unsigned long bits[128U] ; }; typedef struct cpumask cpumask_t; typedef struct cpumask *cpumask_var_t; struct static_key; struct fregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct____missing_field_name_29 { u64 rip ; u64 rdp ; }; struct __anonstruct____missing_field_name_30 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion____missing_field_name_28 { struct __anonstruct____missing_field_name_29 __annonCompField12 ; struct __anonstruct____missing_field_name_30 __annonCompField13 ; }; union __anonunion____missing_field_name_31 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct fxregs_state { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion____missing_field_name_28 __annonCompField14 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion____missing_field_name_31 __annonCompField15 ; }; struct swregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct xstate_header { u64 xfeatures ; u64 xcomp_bv ; u64 reserved[6U] ; }; struct xregs_state { struct fxregs_state i387 ; struct xstate_header header ; u8 __reserved[464U] ; }; union fpregs_state { struct fregs_state fsave ; struct fxregs_state fxsave ; struct swregs_state soft ; struct xregs_state xsave ; }; struct fpu { union fpregs_state state ; unsigned int last_cpu ; unsigned char fpstate_active ; unsigned char fpregs_active ; unsigned char counter ; }; struct seq_operations; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct fpu fpu ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_nr ; unsigned long error_code ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; typedef atomic64_t atomic_long_t; struct lockdep_map; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; }; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache[2U] ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 1 ; unsigned char hardirqs_off : 1 ; unsigned short references : 12 ; unsigned int pin_count ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct____missing_field_name_35 { u8 __padding[24U] ; struct lockdep_map dep_map ; }; union __anonunion____missing_field_name_34 { struct raw_spinlock rlock ; struct __anonstruct____missing_field_name_35 __annonCompField17 ; }; struct spinlock { union __anonunion____missing_field_name_34 __annonCompField18 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_36 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_36 rwlock_t; struct static_key { atomic_t enabled ; }; struct seqcount { unsigned int sequence ; struct lockdep_map dep_map ; }; typedef struct seqcount seqcount_t; struct __anonstruct_seqlock_t_45 { struct seqcount seqcount ; spinlock_t lock ; }; typedef struct __anonstruct_seqlock_t_45 seqlock_t; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; struct timeval { __kernel_time_t tv_sec ; __kernel_suseconds_t tv_usec ; }; struct user_namespace; struct __anonstruct_kuid_t_46 { uid_t val ; }; typedef struct __anonstruct_kuid_t_46 kuid_t; struct __anonstruct_kgid_t_47 { gid_t val ; }; typedef struct __anonstruct_kgid_t_47 kgid_t; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; kuid_t uid ; kgid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; struct vm_area_struct; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct __anonstruct_nodemask_t_48 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_48 nodemask_t; struct optimistic_spin_queue { atomic_t tail ; }; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct task_struct *owner ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct rw_semaphore; struct rw_semaphore { long count ; struct list_head wait_list ; raw_spinlock_t wait_lock ; struct optimistic_spin_queue osq ; struct task_struct *owner ; struct lockdep_map dep_map ; }; struct completion { unsigned int done ; wait_queue_head_t wait ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct notifier_block; struct timer_list { struct hlist_node entry ; unsigned long expires ; void (*function)(unsigned long ) ; unsigned long data ; u32 flags ; int slack ; int start_pid ; void *start_site ; char start_comm[16U] ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct rb_node { unsigned long __rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct nsproxy; struct workqueue_struct; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; struct workqueue_struct *wq ; int cpu ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct blocking_notifier_head { struct rw_semaphore rwsem ; struct notifier_block *head ; }; struct resource { resource_size_t start ; resource_size_t end ; char const *name ; unsigned long flags ; struct resource *parent ; struct resource *sibling ; struct resource *child ; }; struct pci_dev; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_late)(struct device * ) ; int (*resume_early)(struct device * ) ; int (*freeze_late)(struct device * ) ; int (*thaw_early)(struct device * ) ; int (*poweroff_late)(struct device * ) ; int (*restore_early)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; struct wakeup_source; struct wake_irq; struct pm_subsys_data { spinlock_t lock ; unsigned int refcount ; struct list_head clock_list ; }; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char async_suspend : 1 ; bool is_prepared ; bool is_suspended ; bool is_noirq_suspended ; bool is_late_suspended ; bool ignore_children ; bool early_init ; bool direct_complete ; spinlock_t lock ; struct list_head entry ; struct completion completion ; struct wakeup_source *wakeup ; bool wakeup_path ; bool syscore ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; struct wake_irq *wakeirq ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; unsigned char no_callbacks : 1 ; unsigned char irq_safe : 1 ; unsigned char use_autosuspend : 1 ; unsigned char timer_autosuspends : 1 ; unsigned char memalloc_noio : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; int autosuspend_delay ; unsigned long last_busy ; unsigned long active_jiffies ; unsigned long suspended_jiffies ; unsigned long accounting_timestamp ; struct pm_subsys_data *subsys_data ; void (*set_latency_tolerance)(struct device * , s32 ) ; struct dev_pm_qos *qos ; }; struct dev_pm_domain { struct dev_pm_ops ops ; void (*detach)(struct device * , bool ) ; int (*activate)(struct device * ) ; void (*sync)(struct device * ) ; void (*dismiss)(struct device * ) ; }; struct pci_bus; struct __anonstruct_mm_context_t_115 { void *ldt ; int size ; unsigned short ia32_compat ; struct mutex lock ; void *vdso ; atomic_t perf_rdpmc_allowed ; }; typedef struct __anonstruct_mm_context_t_115 mm_context_t; struct llist_node; struct llist_node { struct llist_node *next ; }; struct cred; struct inode; struct arch_uprobe_task { unsigned long saved_scratch_register ; unsigned int saved_trap_nr ; unsigned int saved_tf ; }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; struct __anonstruct____missing_field_name_148 { struct arch_uprobe_task autask ; unsigned long vaddr ; }; struct __anonstruct____missing_field_name_149 { struct callback_head dup_xol_work ; unsigned long dup_xol_addr ; }; union __anonunion____missing_field_name_147 { struct __anonstruct____missing_field_name_148 __annonCompField33 ; struct __anonstruct____missing_field_name_149 __annonCompField34 ; }; struct uprobe; struct return_instance; struct uprobe_task { enum uprobe_task_state state ; union __anonunion____missing_field_name_147 __annonCompField35 ; struct uprobe *active_uprobe ; unsigned long xol_vaddr ; struct return_instance *return_instances ; unsigned int depth ; }; struct xol_area; struct uprobes_state { struct xol_area *xol_area ; }; struct address_space; struct mem_cgroup; typedef void compound_page_dtor(struct page * ); union __anonunion____missing_field_name_150 { struct address_space *mapping ; void *s_mem ; }; union __anonunion____missing_field_name_152 { unsigned long index ; void *freelist ; bool pfmemalloc ; }; struct __anonstruct____missing_field_name_156 { unsigned short inuse ; unsigned short objects : 15 ; unsigned char frozen : 1 ; }; union __anonunion____missing_field_name_155 { atomic_t _mapcount ; struct __anonstruct____missing_field_name_156 __annonCompField38 ; int units ; }; struct __anonstruct____missing_field_name_154 { union __anonunion____missing_field_name_155 __annonCompField39 ; atomic_t _count ; }; union __anonunion____missing_field_name_153 { unsigned long counters ; struct __anonstruct____missing_field_name_154 __annonCompField40 ; unsigned int active ; }; struct __anonstruct____missing_field_name_151 { union __anonunion____missing_field_name_152 __annonCompField37 ; union __anonunion____missing_field_name_153 __annonCompField41 ; }; struct __anonstruct____missing_field_name_158 { struct page *next ; int pages ; int pobjects ; }; struct slab; struct __anonstruct____missing_field_name_159 { compound_page_dtor *compound_dtor ; unsigned long compound_order ; }; union __anonunion____missing_field_name_157 { struct list_head lru ; struct __anonstruct____missing_field_name_158 __annonCompField43 ; struct slab *slab_page ; struct callback_head callback_head ; struct __anonstruct____missing_field_name_159 __annonCompField44 ; pgtable_t pmd_huge_pte ; }; struct kmem_cache; union __anonunion____missing_field_name_160 { unsigned long private ; spinlock_t *ptl ; struct kmem_cache *slab_cache ; struct page *first_page ; }; struct page { unsigned long flags ; union __anonunion____missing_field_name_150 __annonCompField36 ; struct __anonstruct____missing_field_name_151 __annonCompField42 ; union __anonunion____missing_field_name_157 __annonCompField45 ; union __anonunion____missing_field_name_160 __annonCompField46 ; struct mem_cgroup *mem_cgroup ; }; struct page_frag { struct page *page ; __u32 offset ; __u32 size ; }; struct __anonstruct_shared_161 { struct rb_node rb ; unsigned long rb_subtree_last ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; struct rb_node vm_rb ; unsigned long rb_subtree_gap ; struct mm_struct *vm_mm ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct __anonstruct_shared_161 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct task_rss_stat { int events ; int count[3U] ; }; struct mm_rss_stat { atomic_long_t count[3U] ; }; struct kioctx_table; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; u32 vmacache_seqnum ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; unsigned long mmap_base ; unsigned long mmap_legacy_base ; unsigned long task_size ; unsigned long highest_vm_end ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; atomic_long_t nr_ptes ; atomic_long_t nr_pmds ; int map_count ; spinlock_t page_table_lock ; struct rw_semaphore mmap_sem ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long pinned_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long def_flags ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[46U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_var_t cpu_vm_mask_var ; mm_context_t context ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct kioctx_table *ioctx_table ; struct task_struct *owner ; struct file *exe_file ; struct mmu_notifier_mm *mmu_notifier_mm ; struct cpumask cpumask_allocation ; unsigned long numa_next_scan ; unsigned long numa_scan_offset ; int numa_scan_seq ; bool tlb_flush_pending ; struct uprobes_state uprobes_state ; void *bd_addr ; }; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; union __anonunion____missing_field_name_166 { unsigned long bitmap[4U] ; struct callback_head callback_head ; }; struct idr_layer { int prefix ; int layer ; struct idr_layer *ary[256U] ; int count ; union __anonunion____missing_field_name_166 __annonCompField47 ; }; struct idr { struct idr_layer *hint ; struct idr_layer *top ; int layers ; int cur ; spinlock_t lock ; int id_free_cnt ; struct idr_layer *id_free ; }; struct ida_bitmap { long nr_busy ; unsigned long bitmap[15U] ; }; struct ida { struct idr idr ; struct ida_bitmap *free_bitmap ; }; struct dentry; struct iattr; struct super_block; struct file_system_type; struct kernfs_open_node; struct kernfs_iattrs; struct kernfs_root; struct kernfs_elem_dir { unsigned long subdirs ; struct rb_root children ; struct kernfs_root *root ; }; struct kernfs_node; struct kernfs_elem_symlink { struct kernfs_node *target_kn ; }; struct kernfs_ops; struct kernfs_elem_attr { struct kernfs_ops const *ops ; struct kernfs_open_node *open ; loff_t size ; struct kernfs_node *notify_next ; }; union __anonunion____missing_field_name_171 { struct kernfs_elem_dir dir ; struct kernfs_elem_symlink symlink ; struct kernfs_elem_attr attr ; }; struct kernfs_node { atomic_t count ; atomic_t active ; struct lockdep_map dep_map ; struct kernfs_node *parent ; char const *name ; struct rb_node rb ; void const *ns ; unsigned int hash ; union __anonunion____missing_field_name_171 __annonCompField48 ; void *priv ; unsigned short flags ; umode_t mode ; unsigned int ino ; struct kernfs_iattrs *iattr ; }; struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root * , int * , char * ) ; int (*show_options)(struct seq_file * , struct kernfs_root * ) ; int (*mkdir)(struct kernfs_node * , char const * , umode_t ) ; int (*rmdir)(struct kernfs_node * ) ; int (*rename)(struct kernfs_node * , struct kernfs_node * , char const * ) ; }; struct kernfs_root { struct kernfs_node *kn ; unsigned int flags ; struct ida ino_ida ; struct kernfs_syscall_ops *syscall_ops ; struct list_head supers ; wait_queue_head_t deactivate_waitq ; }; struct kernfs_open_file { struct kernfs_node *kn ; struct file *file ; void *priv ; struct mutex mutex ; int event ; struct list_head list ; char *prealloc_buf ; size_t atomic_write_len ; bool mmapped ; struct vm_operations_struct const *vm_ops ; }; struct kernfs_ops { int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; ssize_t (*read)(struct kernfs_open_file * , char * , size_t , loff_t ) ; size_t atomic_write_len ; bool prealloc ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; int (*mmap)(struct kernfs_open_file * , struct vm_area_struct * ) ; struct lock_class_key lockdep_key ; }; struct sock; struct kobject; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct kobj_ns_type_operations { enum kobj_ns_type type ; bool (*current_may_mount)(void) ; void *(*grab_current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; void (*drop_ns)(void * ) ; }; struct bin_attribute; struct attribute { char const *name ; umode_t mode ; bool ignore_lockdep ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; umode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; struct bin_attribute **bin_attrs ; }; struct bin_attribute { struct attribute attr ; size_t size ; void *private ; ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; }; struct kref { atomic_t refcount ; }; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct kernfs_node *sd ; struct kref kref ; struct delayed_work release ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *argv[3U] ; char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct kernel_param; struct kernel_param_ops { unsigned int flags ; int (*set)(char const * , struct kernel_param const * ) ; int (*get)(char * , struct kernel_param const * ) ; void (*free)(void * ) ; }; struct kparam_string; struct kparam_array; union __anonunion____missing_field_name_172 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; struct module *mod ; struct kernel_param_ops const *ops ; u16 const perm ; s8 level ; u8 flags ; union __anonunion____missing_field_name_172 __annonCompField49 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int elemsize ; unsigned int *num ; struct kernel_param_ops const *ops ; void *elem ; }; struct latch_tree_node { struct rb_node node[2U] ; }; struct mod_arch_specific { }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; struct completion *kobj_completion ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct exception_table_entry; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; struct mod_tree_node { struct module *mod ; struct latch_tree_node node ; }; struct module_sect_attrs; struct module_notes_attrs; struct tracepoint; struct trace_event_call; struct trace_enum_map; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct mutex param_lock ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; bool sig_ok ; bool async_probe_requested ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; struct mod_tree_node mtn_core ; struct mod_tree_node mtn_init ; unsigned int init_ro_size ; unsigned int core_ro_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; char *args ; void *percpu ; unsigned int percpu_size ; unsigned int num_tracepoints ; struct tracepoint * const *tracepoints_ptrs ; unsigned int num_trace_bprintk_fmt ; char const **trace_bprintk_fmt_start ; struct trace_event_call **trace_events ; unsigned int num_trace_events ; struct trace_enum_map **trace_enums ; unsigned int num_trace_enums ; unsigned int num_ftrace_callsites ; unsigned long *ftrace_callsites ; bool klp_alive ; struct list_head source_list ; struct list_head target_list ; void (*exit)(void) ; atomic_t refcnt ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct plist_head { struct list_head node_list ; }; struct plist_node { int prio ; struct list_head prio_list ; struct list_head node_list ; }; typedef unsigned long cputime_t; struct sem_undo_list; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct user_struct; struct sysv_shm { struct list_head shm_clist ; }; struct __anonstruct_sigset_t_180 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_180 sigset_t; struct siginfo; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_182 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_183 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_184 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_185 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__addr_bnd_187 { void *_lower ; void *_upper ; }; struct __anonstruct__sigfault_186 { void *_addr ; short _addr_lsb ; struct __anonstruct__addr_bnd_187 _addr_bnd ; }; struct __anonstruct__sigpoll_188 { long _band ; int _fd ; }; struct __anonstruct__sigsys_189 { void *_call_addr ; int _syscall ; unsigned int _arch ; }; union __anonunion__sifields_181 { int _pad[28U] ; struct __anonstruct__kill_182 _kill ; struct __anonstruct__timer_183 _timer ; struct __anonstruct__rt_184 _rt ; struct __anonstruct__sigchld_185 _sigchld ; struct __anonstruct__sigfault_186 _sigfault ; struct __anonstruct__sigpoll_188 _sigpoll ; struct __anonstruct__sigsys_189 _sigsys ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_181 _sifields ; }; typedef struct siginfo siginfo_t; struct sigpending { struct list_head list ; sigset_t signal ; }; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct callback_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct percpu_counter { raw_spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct seccomp_filter; struct seccomp { int mode ; struct seccomp_filter *filter ; }; struct rt_mutex { raw_spinlock_t wait_lock ; struct rb_root waiters ; struct rb_node *waiters_leftmost ; struct task_struct *owner ; int save_state ; char const *name ; char const *file ; int line ; void *magic ; }; struct rt_mutex_waiter; struct rlimit { __kernel_ulong_t rlim_cur ; __kernel_ulong_t rlim_max ; }; struct timerqueue_node { struct rb_node node ; ktime_t expires ; }; struct timerqueue_head { struct rb_root head ; struct timerqueue_node *next ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct timerqueue_node node ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; int index ; clockid_t clockid ; struct timerqueue_head active ; ktime_t (*get_time)(void) ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; seqcount_t seq ; struct hrtimer *running ; unsigned int cpu ; unsigned int active_bases ; unsigned int clock_was_set_seq ; bool migration_enabled ; bool nohz_active ; unsigned char in_hrtirq : 1 ; unsigned char hres_active : 1 ; unsigned char hang_detected : 1 ; ktime_t expires_next ; struct hrtimer *next_timer ; unsigned int nr_events ; unsigned int nr_retries ; unsigned int nr_hangs ; unsigned int max_hang_time ; struct hrtimer_clock_base clock_base[4U] ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct assoc_array_ptr; struct assoc_array { struct assoc_array_ptr *root ; unsigned long nr_leaves_on_tree ; }; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct key_type; struct keyring_index_key { struct key_type *type ; char const *description ; size_t desc_len ; }; union __anonunion____missing_field_name_196 { struct list_head graveyard_link ; struct rb_node serial_node ; }; struct key_user; union __anonunion____missing_field_name_197 { time_t expiry ; time_t revoked_at ; }; struct __anonstruct____missing_field_name_199 { struct key_type *type ; char *description ; }; union __anonunion____missing_field_name_198 { struct keyring_index_key index_key ; struct __anonstruct____missing_field_name_199 __annonCompField52 ; }; union __anonunion_type_data_200 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; int reject_error ; }; union __anonunion_payload_202 { unsigned long value ; void *rcudata ; void *data ; void *data2[2U] ; }; union __anonunion____missing_field_name_201 { union __anonunion_payload_202 payload ; struct assoc_array keys ; }; struct key { atomic_t usage ; key_serial_t serial ; union __anonunion____missing_field_name_196 __annonCompField50 ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion____missing_field_name_197 __annonCompField51 ; time_t last_used_at ; kuid_t uid ; kgid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; union __anonunion____missing_field_name_198 __annonCompField53 ; union __anonunion_type_data_200 type_data ; union __anonunion____missing_field_name_201 __annonCompField54 ; }; struct audit_context; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; kgid_t small_block[32U] ; kgid_t *blocks[0U] ; }; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; kuid_t uid ; kgid_t gid ; kuid_t suid ; kgid_t sgid ; kuid_t euid ; kgid_t egid ; kuid_t fsuid ; kgid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *session_keyring ; struct key *process_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; void *security ; struct user_struct *user ; struct user_namespace *user_ns ; struct group_info *group_info ; struct callback_head rcu ; }; struct percpu_ref; typedef void percpu_ref_func_t(struct percpu_ref * ); struct percpu_ref { atomic_long_t count ; unsigned long percpu_count_ptr ; percpu_ref_func_t *release ; percpu_ref_func_t *confirm_switch ; bool force_atomic ; struct callback_head rcu ; }; struct cgroup; struct cgroup_root; struct cgroup_subsys; struct cgroup_taskset; struct cgroup_subsys_state { struct cgroup *cgroup ; struct cgroup_subsys *ss ; struct percpu_ref refcnt ; struct cgroup_subsys_state *parent ; struct list_head sibling ; struct list_head children ; int id ; unsigned int flags ; u64 serial_nr ; struct callback_head callback_head ; struct work_struct destroy_work ; }; struct css_set { atomic_t refcount ; struct hlist_node hlist ; struct list_head tasks ; struct list_head mg_tasks ; struct list_head cgrp_links ; struct cgroup *dfl_cgrp ; struct cgroup_subsys_state *subsys[12U] ; struct list_head mg_preload_node ; struct list_head mg_node ; struct cgroup *mg_src_cgrp ; struct css_set *mg_dst_cset ; struct list_head e_cset_node[12U] ; struct callback_head callback_head ; }; struct cgroup { struct cgroup_subsys_state self ; unsigned long flags ; int id ; int populated_cnt ; struct kernfs_node *kn ; struct kernfs_node *procs_kn ; struct kernfs_node *populated_kn ; unsigned int subtree_control ; unsigned int child_subsys_mask ; struct cgroup_subsys_state *subsys[12U] ; struct cgroup_root *root ; struct list_head cset_links ; struct list_head e_csets[12U] ; struct list_head pidlists ; struct mutex pidlist_mutex ; wait_queue_head_t offline_waitq ; struct work_struct release_agent_work ; }; struct cgroup_root { struct kernfs_root *kf_root ; unsigned int subsys_mask ; int hierarchy_id ; struct cgroup cgrp ; atomic_t nr_cgrps ; struct list_head root_list ; unsigned int flags ; struct idr cgroup_idr ; char release_agent_path[4096U] ; char name[64U] ; }; struct cftype { char name[64U] ; int private ; umode_t mode ; size_t max_write_len ; unsigned int flags ; struct cgroup_subsys *ss ; struct list_head node ; struct kernfs_ops *kf_ops ; u64 (*read_u64)(struct cgroup_subsys_state * , struct cftype * ) ; s64 (*read_s64)(struct cgroup_subsys_state * , struct cftype * ) ; int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; int (*write_u64)(struct cgroup_subsys_state * , struct cftype * , u64 ) ; int (*write_s64)(struct cgroup_subsys_state * , struct cftype * , s64 ) ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; struct lock_class_key lockdep_key ; }; struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state * ) ; int (*css_online)(struct cgroup_subsys_state * ) ; void (*css_offline)(struct cgroup_subsys_state * ) ; void (*css_released)(struct cgroup_subsys_state * ) ; void (*css_free)(struct cgroup_subsys_state * ) ; void (*css_reset)(struct cgroup_subsys_state * ) ; void (*css_e_css_changed)(struct cgroup_subsys_state * ) ; int (*can_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*cancel_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*fork)(struct task_struct * ) ; void (*exit)(struct cgroup_subsys_state * , struct cgroup_subsys_state * , struct task_struct * ) ; void (*bind)(struct cgroup_subsys_state * ) ; int disabled ; int early_init ; bool broken_hierarchy ; bool warned_broken_hierarchy ; int id ; char const *name ; struct cgroup_root *root ; struct idr css_idr ; struct list_head cfts ; struct cftype *dfl_cftypes ; struct cftype *legacy_cftypes ; unsigned int depends_on ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; struct nameidata; struct cfs_rq; struct task_group; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct cputime { cputime_t utime ; cputime_t stime ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct task_cputime_atomic { atomic64_t utime ; atomic64_t stime ; atomic64_t sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic ; int running ; }; struct autogroup; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; struct list_head thread_head ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; unsigned char is_child_subreaper : 1 ; unsigned char has_child_subreaper : 1 ; int posix_timer_id ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; struct autogroup *autogroup ; seqlock_t stats_lock ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; unsigned int audit_tty_log_passwd ; struct tty_audit_buf *tty_audit_buf ; oom_flags_t oom_flags ; short oom_score_adj ; short oom_score_adj_min ; struct mutex cred_guard_mutex ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t fanotify_listeners ; atomic_long_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; kuid_t uid ; atomic_long_t locked_vm ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; u64 blkio_start ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; u64 freepages_start ; u64 freepages_delay ; u32 freepages_count ; }; struct wake_q_node { struct wake_q_node *next ; }; struct io_context; struct pipe_inode_info; struct load_weight { unsigned long weight ; u32 inv_weight ; }; struct sched_avg { u64 last_runnable_update ; s64 decay_count ; unsigned long load_avg_contrib ; unsigned long utilization_avg_contrib ; u32 runnable_avg_sum ; u32 avg_period ; u32 running_avg_sum ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; int depth ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; struct sched_avg avg ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned long watchdog_stamp ; unsigned int time_slice ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct sched_dl_entity { struct rb_node rb_node ; u64 dl_runtime ; u64 dl_deadline ; u64 dl_period ; u64 dl_bw ; s64 runtime ; u64 deadline ; unsigned int flags ; int dl_throttled ; int dl_new ; int dl_boosted ; int dl_yielded ; struct hrtimer dl_timer ; }; struct memcg_oom_info { struct mem_cgroup *memcg ; gfp_t gfp_mask ; int order ; unsigned char may_oom : 1 ; }; struct sched_class; struct files_struct; struct compat_robust_list_head; struct numa_group; struct ftrace_ret_stack; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; struct llist_node wake_entry ; int on_cpu ; struct task_struct *last_wakee ; unsigned long wakee_flips ; unsigned long wakee_flip_decay_ts ; int wake_cpu ; int on_rq ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct task_group *sched_task_group ; struct sched_dl_entity dl ; struct hlist_head preempt_notifiers ; unsigned int btrace_seq ; unsigned int policy ; int nr_cpus_allowed ; cpumask_t cpus_allowed ; unsigned long rcu_tasks_nvcsw ; bool rcu_tasks_holdout ; struct list_head rcu_tasks_holdout_list ; int rcu_tasks_idle_cpu ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct rb_node pushable_dl_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; u32 vmacache_seqnum ; struct vm_area_struct *vmacache[4U] ; struct task_rss_stat rss_stat ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned long jobctl ; unsigned int personality ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char sched_reset_on_fork : 1 ; unsigned char sched_contributes_to_load : 1 ; unsigned char sched_migrated : 1 ; unsigned char memcg_kmem_skip_account : 1 ; unsigned char brk_randomized : 1 ; unsigned long atomic_flags ; struct restart_block restart_block ; pid_t pid ; pid_t tgid ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct list_head thread_node ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; u64 start_time ; u64 real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; char comm[16U] ; struct nameidata *nameidata ; struct sysv_sem sysvsem ; struct sysv_shm sysvshm ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct callback_head *task_works ; struct audit_context *audit_context ; kuid_t loginuid ; unsigned int sessionid ; struct seccomp seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; raw_spinlock_t pi_lock ; struct wake_q_node wake_q ; struct rb_root pi_waiters ; struct rb_node *pi_waiters_leftmost ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct blk_plug *plug ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; seqcount_t mems_allowed_seq ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp[2U] ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; short pref_node_fork ; int numa_scan_seq ; unsigned int numa_scan_period ; unsigned int numa_scan_period_max ; int numa_preferred_nid ; unsigned long numa_migrate_retry ; u64 node_stamp ; u64 last_task_numa_placement ; u64 last_sum_exec_runtime ; struct callback_head numa_work ; struct list_head numa_entry ; struct numa_group *numa_group ; unsigned long *numa_faults ; unsigned long total_numa_faults ; unsigned long numa_faults_locality[3U] ; unsigned long numa_pages_migrated ; struct callback_head rcu ; struct pipe_inode_info *splice_pipe ; struct page_frag task_frag ; struct task_delay_info *delays ; int make_it_fail ; int nr_dirtied ; int nr_dirtied_pause ; unsigned long dirty_paused_when ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; unsigned int kasan_depth ; int curr_ret_stack ; struct ftrace_ret_stack *ret_stack ; unsigned long long ftrace_timestamp ; atomic_t trace_overrun ; atomic_t tracing_graph_pause ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_oom_info memcg_oom ; struct uprobe_task *utask ; unsigned int sequential_io ; unsigned int sequential_io_avg ; unsigned long task_state_change ; int pagefault_disabled ; }; struct drm_property; struct intel_dvo_device; struct drm_atomic_state; struct drm_connector; struct drm_crtc; struct drm_plane; struct drm_panel; struct i915_power_well; struct drm_i915_gem_object; struct device_attribute; struct drm_fb_helper; struct drm_i915_private; struct drm_plane_state; struct drm_display_mode; struct i2c_adapter; struct fb_info; struct fb_var_screeninfo; struct drm_gem_object; struct drm_encoder; struct dma_buf_attachment; struct drm_minor; struct mipi_dsi_host; struct dma_buf; struct drm_device; struct mipi_dsi_device; struct drm_framebuffer; struct backlight_device; struct drm_file; struct drm_dp_mst_topology_mgr; struct klist_node; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct path; struct seq_file { char *buf ; size_t size ; size_t from ; size_t count ; size_t pad_until ; loff_t index ; loff_t read_pos ; u64 version ; struct mutex lock ; struct seq_operations const *op ; int poll_event ; struct user_namespace *user_ns ; void *private ; }; struct seq_operations { void *(*start)(struct seq_file * , loff_t * ) ; void (*stop)(struct seq_file * , void * ) ; void *(*next)(struct seq_file * , void * , loff_t * ) ; int (*show)(struct seq_file * , void * ) ; }; struct pinctrl; struct pinctrl_state; struct dev_pin_info { struct pinctrl *p ; struct pinctrl_state *default_state ; struct pinctrl_state *sleep_state ; struct pinctrl_state *idle_state ; }; struct dma_map_ops; struct dev_archdata { struct dma_map_ops *dma_ops ; void *iommu ; }; struct pdev_archdata { }; struct device_private; struct device_driver; struct driver_private; struct class; struct subsys_private; struct bus_type; struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; struct bus_type { char const *name ; char const *dev_name ; struct device *dev_root ; struct device_attribute *dev_attrs ; struct attribute_group const **bus_groups ; struct attribute_group const **dev_groups ; struct attribute_group const **drv_groups ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*online)(struct device * ) ; int (*offline)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct iommu_ops const *iommu_ops ; struct subsys_private *p ; struct lock_class_key lock_key ; }; struct device_type; enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; struct of_device_id; struct acpi_device_id; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; enum probe_type probe_type ; struct of_device_id const *of_match_table ; struct acpi_device_id const *acpi_match_table ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct attribute_group const **dev_groups ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct subsys_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * , kuid_t * , kgid_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct dma_coherent_mem; struct cma; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type const *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; void *driver_data ; struct dev_pm_info power ; struct dev_pm_domain *pm_domain ; struct dev_pin_info *pins ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; unsigned long dma_pfn_offset ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct cma *cma_area ; struct dev_archdata archdata ; struct device_node *of_node ; struct fwnode_handle *fwnode ; dev_t devt ; u32 id ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; struct iommu_group *iommu_group ; bool offline_disabled ; bool offline ; }; struct wakeup_source { char const *name ; struct list_head entry ; spinlock_t lock ; struct wake_irq *wakeirq ; struct timer_list timer ; unsigned long timer_expires ; ktime_t total_time ; ktime_t max_time ; ktime_t last_time ; ktime_t start_prevent_time ; ktime_t prevent_sleep_time ; unsigned long event_count ; unsigned long active_count ; unsigned long relax_count ; unsigned long expire_count ; unsigned long wakeup_count ; bool active ; bool autosleep_enabled ; }; enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_PDATA = 3 } ; struct fwnode_handle { enum fwnode_type type ; struct fwnode_handle *secondary ; }; typedef unsigned long kernel_ulong_t; struct pci_device_id { __u32 vendor ; __u32 device ; __u32 subvendor ; __u32 subdevice ; __u32 class ; __u32 class_mask ; kernel_ulong_t driver_data ; }; struct acpi_device_id { __u8 id[9U] ; kernel_ulong_t driver_data ; }; struct of_device_id { char name[32U] ; char type[32U] ; char compatible[128U] ; void const *data ; }; struct platform_device_id { char name[20U] ; kernel_ulong_t driver_data ; }; struct hlist_bl_node; struct hlist_bl_head { struct hlist_bl_node *first ; }; struct hlist_bl_node { struct hlist_bl_node *next ; struct hlist_bl_node **pprev ; }; struct __anonstruct____missing_field_name_231 { spinlock_t lock ; int count ; }; union __anonunion____missing_field_name_230 { struct __anonstruct____missing_field_name_231 __annonCompField58 ; }; struct lockref { union __anonunion____missing_field_name_230 __annonCompField59 ; }; struct vfsmount; struct __anonstruct____missing_field_name_233 { u32 hash ; u32 len ; }; union __anonunion____missing_field_name_232 { struct __anonstruct____missing_field_name_233 __annonCompField60 ; u64 hash_len ; }; struct qstr { union __anonunion____missing_field_name_232 __annonCompField61 ; unsigned char const *name ; }; struct dentry_operations; union __anonunion_d_u_234 { struct hlist_node d_alias ; struct callback_head d_rcu ; }; struct dentry { unsigned int d_flags ; seqcount_t d_seq ; struct hlist_bl_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct inode *d_inode ; unsigned char d_iname[32U] ; struct lockref d_lockref ; struct dentry_operations const *d_op ; struct super_block *d_sb ; unsigned long d_time ; void *d_fsdata ; struct list_head d_lru ; struct list_head d_child ; struct list_head d_subdirs ; union __anonunion_d_u_234 d_u ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , unsigned int ) ; int (*d_weak_revalidate)(struct dentry * , unsigned int ) ; int (*d_hash)(struct dentry const * , struct qstr * ) ; int (*d_compare)(struct dentry const * , struct dentry const * , unsigned int , char const * , struct qstr const * ) ; int (*d_delete)(struct dentry const * ) ; void (*d_release)(struct dentry * ) ; void (*d_prune)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; struct vfsmount *(*d_automount)(struct path * ) ; int (*d_manage)(struct dentry * , bool ) ; struct inode *(*d_select_inode)(struct dentry * , unsigned int ) ; }; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct shrink_control { gfp_t gfp_mask ; unsigned long nr_to_scan ; int nid ; struct mem_cgroup *memcg ; }; struct shrinker { unsigned long (*count_objects)(struct shrinker * , struct shrink_control * ) ; unsigned long (*scan_objects)(struct shrinker * , struct shrink_control * ) ; int seeks ; long batch ; unsigned long flags ; struct list_head list ; atomic_long_t *nr_deferred ; }; struct list_lru_one { struct list_head list ; long nr_items ; }; struct list_lru_memcg { struct list_lru_one *lru[0U] ; }; struct list_lru_node { spinlock_t lock ; struct list_lru_one lru ; struct list_lru_memcg *memcg_lrus ; }; struct list_lru { struct list_lru_node *node ; struct list_head list ; }; struct __anonstruct____missing_field_name_238 { struct radix_tree_node *parent ; void *private_data ; }; union __anonunion____missing_field_name_237 { struct __anonstruct____missing_field_name_238 __annonCompField62 ; struct callback_head callback_head ; }; struct radix_tree_node { unsigned int path ; unsigned int count ; union __anonunion____missing_field_name_237 __annonCompField63 ; struct list_head private_list ; void *slots[64U] ; unsigned long tags[3U][1U] ; }; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; struct block_device; struct bdi_writeback; struct export_operations; struct kiocb; struct poll_table_struct; struct kstatfs; struct swap_info_struct; struct iov_iter; struct vm_fault; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; kuid_t ia_uid ; kgid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct dquot; typedef __kernel_uid32_t projid_t; struct __anonstruct_kprojid_t_242 { projid_t val ; }; typedef struct __anonstruct_kprojid_t_242 kprojid_t; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; typedef long long qsize_t; union __anonunion____missing_field_name_243 { kuid_t uid ; kgid_t gid ; kprojid_t projid ; }; struct kqid { union __anonunion____missing_field_name_243 __annonCompField65 ; enum quota_type type ; }; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_max_spc_limit ; qsize_t dqi_max_ino_limit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; struct kqid dq_id ; loff_t dq_off ; unsigned long dq_flags ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; int (*get_projid)(struct inode * , kprojid_t * ) ; }; struct qc_dqblk { int d_fieldmask ; u64 d_spc_hardlimit ; u64 d_spc_softlimit ; u64 d_ino_hardlimit ; u64 d_ino_softlimit ; u64 d_space ; u64 d_ino_count ; s64 d_ino_timer ; s64 d_spc_timer ; int d_ino_warns ; int d_spc_warns ; u64 d_rt_spc_hardlimit ; u64 d_rt_spc_softlimit ; u64 d_rt_space ; s64 d_rt_spc_timer ; int d_rt_spc_warns ; }; struct qc_type_state { unsigned int flags ; unsigned int spc_timelimit ; unsigned int ino_timelimit ; unsigned int rt_spc_timelimit ; unsigned int spc_warnlimit ; unsigned int ino_warnlimit ; unsigned int rt_spc_warnlimit ; unsigned long long ino ; blkcnt_t blocks ; blkcnt_t nextents ; }; struct qc_state { unsigned int s_incoredqs ; struct qc_type_state s_state[3U] ; }; struct qc_info { int i_fieldmask ; unsigned int i_flags ; unsigned int i_spc_timelimit ; unsigned int i_ino_timelimit ; unsigned int i_rt_spc_timelimit ; unsigned int i_spc_warnlimit ; unsigned int i_ino_warnlimit ; unsigned int i_rt_spc_warnlimit ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , struct path * ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_enable)(struct super_block * , unsigned int ) ; int (*quota_disable)(struct super_block * , unsigned int ) ; int (*quota_sync)(struct super_block * , int ) ; int (*set_info)(struct super_block * , int , struct qc_info * ) ; int (*get_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*set_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*get_state)(struct super_block * , struct qc_state * ) ; int (*rm_xquota)(struct super_block * , unsigned int ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct inode *files[3U] ; struct mem_dqinfo info[3U] ; struct quota_format_ops const *ops[3U] ; }; struct writeback_control; struct kiocb { struct file *ki_filp ; loff_t ki_pos ; void (*ki_complete)(struct kiocb * , long , long ) ; void *private ; int ki_flags ; }; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned int , unsigned int ) ; int (*releasepage)(struct page * , gfp_t ) ; void (*freepage)(struct page * ) ; ssize_t (*direct_IO)(struct kiocb * , struct iov_iter * , loff_t ) ; int (*migratepage)(struct address_space * , struct page * , struct page * , enum migrate_mode ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , unsigned long , unsigned long ) ; void (*is_dirty_writeback)(struct page * , bool * , bool * ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; int (*swap_activate)(struct swap_info_struct * , struct file * , sector_t * ) ; void (*swap_deactivate)(struct file * ) ; }; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; atomic_t i_mmap_writable ; struct rb_root i_mmap ; struct rw_semaphore i_mmap_rwsem ; unsigned long nrpages ; unsigned long nrshadows ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; spinlock_t private_lock ; struct list_head private_list ; void *private_data ; }; struct request_queue; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; int bd_openers ; struct inode *bd_inode ; struct super_block *bd_super ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; bool bd_write_holder ; struct list_head bd_holder_disks ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct request_queue *bd_queue ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; }; struct posix_acl; struct inode_operations; union __anonunion____missing_field_name_246 { unsigned int const i_nlink ; unsigned int __i_nlink ; }; union __anonunion____missing_field_name_247 { struct hlist_head i_dentry ; struct callback_head i_rcu ; }; struct file_lock_context; struct cdev; union __anonunion____missing_field_name_248 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; char *i_link ; }; struct inode { umode_t i_mode ; unsigned short i_opflags ; kuid_t i_uid ; kgid_t i_gid ; unsigned int i_flags ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; struct inode_operations const *i_op ; struct super_block *i_sb ; struct address_space *i_mapping ; void *i_security ; unsigned long i_ino ; union __anonunion____missing_field_name_246 __annonCompField66 ; dev_t i_rdev ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; spinlock_t i_lock ; unsigned short i_bytes ; unsigned int i_blkbits ; blkcnt_t i_blocks ; unsigned long i_state ; struct mutex i_mutex ; unsigned long dirtied_when ; unsigned long dirtied_time_when ; struct hlist_node i_hash ; struct list_head i_wb_list ; struct bdi_writeback *i_wb ; int i_wb_frn_winner ; u16 i_wb_frn_avg_time ; u16 i_wb_frn_history ; struct list_head i_lru ; struct list_head i_sb_list ; union __anonunion____missing_field_name_247 __annonCompField67 ; u64 i_version ; atomic_t i_count ; atomic_t i_dio_count ; atomic_t i_writecount ; atomic_t i_readcount ; struct file_operations const *i_fop ; struct file_lock_context *i_flctx ; struct address_space i_data ; struct list_head i_devices ; union __anonunion____missing_field_name_248 __annonCompField68 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_marks ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; kuid_t uid ; kuid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_249 { struct llist_node fu_llist ; struct callback_head fu_rcuhead ; }; struct file { union __anonunion_f_u_249 f_u ; struct path f_path ; struct inode *f_inode ; struct file_operations const *f_op ; spinlock_t f_lock ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; struct mutex f_pos_lock ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct list_head f_tfile_llink ; struct address_space *f_mapping ; }; typedef void *fl_owner_t; struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock * , struct file_lock * ) ; unsigned long (*lm_owner_key)(struct file_lock * ) ; fl_owner_t (*lm_get_owner)(fl_owner_t ) ; void (*lm_put_owner)(fl_owner_t ) ; void (*lm_notify)(struct file_lock * ) ; int (*lm_grant)(struct file_lock * , int ) ; bool (*lm_break)(struct file_lock * ) ; int (*lm_change)(struct file_lock * , int , struct list_head * ) ; void (*lm_setup)(struct file_lock * , void ** ) ; }; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_251 { struct list_head link ; int state ; }; union __anonunion_fl_u_250 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_251 afs ; }; struct file_lock { struct file_lock *fl_next ; struct list_head fl_list ; struct hlist_node fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned int fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; int fl_link_cpu ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; unsigned long fl_downgrade_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_250 fl_u ; }; struct file_lock_context { spinlock_t flc_lock ; struct list_head flc_flock ; struct list_head flc_posix ; struct list_head flc_lease ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct callback_head fa_rcu ; }; struct sb_writers { struct percpu_counter counter[3U] ; wait_queue_head_t wait ; int frozen ; wait_queue_head_t wait_unfrozen ; struct lockdep_map lock_map[3U] ; }; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_iflags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_bl_head s_anon ; struct list_head s_mounts ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct hlist_node s_instances ; unsigned int s_quota_types ; struct quota_info s_dquot ; struct sb_writers s_writers ; char s_id[32U] ; u8 s_uuid[16U] ; void *s_fs_info ; unsigned int s_max_links ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; struct dentry_operations const *s_d_op ; int cleancache_poolid ; struct shrinker s_shrink ; atomic_long_t s_remove_count ; int s_readonly_remount ; struct workqueue_struct *s_dio_done_wq ; struct hlist_head s_pins ; struct list_lru s_dentry_lru ; struct list_lru s_inode_lru ; struct callback_head rcu ; int s_stack_depth ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct dir_context; struct dir_context { int (*actor)(struct dir_context * , char const * , int , loff_t , u64 , unsigned int ) ; loff_t pos ; }; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*read_iter)(struct kiocb * , struct iov_iter * ) ; ssize_t (*write_iter)(struct kiocb * , struct iov_iter * ) ; int (*iterate)(struct file * , struct dir_context * ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*mremap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , loff_t , loff_t , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** , void ** ) ; long (*fallocate)(struct file * , int , loff_t , loff_t ) ; void (*show_fdinfo)(struct seq_file * , struct file * ) ; }; struct inode_operations { struct dentry *(*lookup)(struct inode * , struct dentry * , unsigned int ) ; char const *(*follow_link)(struct dentry * , void ** ) ; int (*permission)(struct inode * , int ) ; struct posix_acl *(*get_acl)(struct inode * , int ) ; int (*readlink)(struct dentry * , char * , int ) ; void (*put_link)(struct inode * , void * ) ; int (*create)(struct inode * , struct dentry * , umode_t , bool ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , umode_t ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , umode_t , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; int (*rename2)(struct inode * , struct dentry * , struct inode * , struct dentry * , unsigned int ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; int (*update_time)(struct inode * , struct timespec * , int ) ; int (*atomic_open)(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; int (*tmpfile)(struct inode * , struct dentry * , umode_t ) ; int (*set_acl)(struct inode * , struct posix_acl * , int ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * , int ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; int (*drop_inode)(struct inode * ) ; void (*evict_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_super)(struct super_block * ) ; int (*freeze_fs)(struct super_block * ) ; int (*thaw_super)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct dentry * ) ; int (*show_devname)(struct seq_file * , struct dentry * ) ; int (*show_path)(struct seq_file * , struct dentry * ) ; int (*show_stats)(struct seq_file * , struct dentry * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; struct dquot **(*get_dquots)(struct inode * ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; long (*nr_cached_objects)(struct super_block * , struct shrink_control * ) ; long (*free_cached_objects)(struct super_block * , struct shrink_control * ) ; }; struct file_system_type { char const *name ; int fs_flags ; struct dentry *(*mount)(struct file_system_type * , int , char const * , void * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct hlist_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key s_writers_key[3U] ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; }; struct proc_dir_entry; struct platform_device; enum chipset_type { NOT_SUPPORTED = 0, SUPPORTED = 1 } ; struct agp_version { u16 major ; u16 minor ; }; struct agp_kern_info { struct agp_version version ; struct pci_dev *device ; enum chipset_type chipset ; unsigned long mode ; unsigned long aper_base ; size_t aper_size ; int max_memory ; int current_memory ; bool cant_use_aperture ; unsigned long page_mask ; struct vm_operations_struct const *vm_ops ; }; struct agp_bridge_data; struct scatterlist; struct cdev { struct kobject kobj ; struct module *owner ; struct file_operations const *ops ; struct list_head list ; dev_t dev ; unsigned int count ; }; struct dma_attrs { unsigned long flags[1U] ; }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *cow_page ; struct page *page ; unsigned long max_pgoff ; pte_t *pte ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; void (*map_pages)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*pfn_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; char const *(*name)(struct vm_area_struct * ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; struct page *(*find_special_page)(struct vm_area_struct * , unsigned long ) ; }; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct sg_table { struct scatterlist *sgl ; unsigned int nents ; unsigned int orig_nents ; }; struct dma_map_ops { void *(*alloc)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; void (*free)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; int (*mmap)(struct device * , struct vm_area_struct * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; int (*get_sgtable)(struct device * , struct sg_table * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; dma_addr_t (*map_page)(struct device * , struct page * , unsigned long , size_t , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_page)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ) ; int (*map_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*sync_single_for_cpu)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_single_for_device)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; void (*sync_sg_for_device)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; int (*mapping_error)(struct device * , dma_addr_t ) ; int (*dma_supported)(struct device * , u64 ) ; int (*set_dma_mask)(struct device * , u64 ) ; int is_phys ; }; struct exception_table_entry { int insn ; int fixup ; }; enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; typedef enum irqreturn irqreturn_t; struct hotplug_slot; struct pci_slot { struct pci_bus *bus ; struct list_head list ; struct hotplug_slot *hotplug ; unsigned char number ; struct kobject kobj ; }; typedef int pci_power_t; typedef unsigned int pci_channel_state_t; enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; typedef unsigned short pci_dev_flags_t; typedef unsigned short pci_bus_flags_t; struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_ats; struct pci_driver; union __anonunion____missing_field_name_262 { struct pci_sriov *sriov ; struct pci_dev *physfn ; }; struct pci_dev { struct list_head bus_list ; struct pci_bus *bus ; struct pci_bus *subordinate ; void *sysdata ; struct proc_dir_entry *procent ; struct pci_slot *slot ; unsigned int devfn ; unsigned short vendor ; unsigned short device ; unsigned short subsystem_vendor ; unsigned short subsystem_device ; unsigned int class ; u8 revision ; u8 hdr_type ; u8 pcie_cap ; u8 msi_cap ; u8 msix_cap ; unsigned char pcie_mpss : 3 ; u8 rom_base_reg ; u8 pin ; u16 pcie_flags_reg ; u8 dma_alias_devfn ; struct pci_driver *driver ; u64 dma_mask ; struct device_dma_parameters dma_parms ; pci_power_t current_state ; u8 pm_cap ; unsigned char pme_support : 5 ; unsigned char pme_interrupt : 1 ; unsigned char pme_poll : 1 ; unsigned char d1_support : 1 ; unsigned char d2_support : 1 ; unsigned char no_d1d2 : 1 ; unsigned char no_d3cold : 1 ; unsigned char d3cold_allowed : 1 ; unsigned char mmio_always_on : 1 ; unsigned char wakeup_prepared : 1 ; unsigned char runtime_d3cold : 1 ; unsigned char ignore_hotplug : 1 ; unsigned int d3_delay ; unsigned int d3cold_delay ; struct pcie_link_state *link_state ; pci_channel_state_t error_state ; struct device dev ; int cfg_size ; unsigned int irq ; struct resource resource[17U] ; bool match_driver ; unsigned char transparent : 1 ; unsigned char multifunction : 1 ; unsigned char is_added : 1 ; unsigned char is_busmaster : 1 ; unsigned char no_msi : 1 ; unsigned char no_64bit_msi : 1 ; unsigned char block_cfg_access : 1 ; unsigned char broken_parity_status : 1 ; unsigned char irq_reroute_variant : 2 ; unsigned char msi_enabled : 1 ; unsigned char msix_enabled : 1 ; unsigned char ari_enabled : 1 ; unsigned char is_managed : 1 ; unsigned char needs_freset : 1 ; unsigned char state_saved : 1 ; unsigned char is_physfn : 1 ; unsigned char is_virtfn : 1 ; unsigned char reset_fn : 1 ; unsigned char is_hotplug_bridge : 1 ; unsigned char __aer_firmware_first_valid : 1 ; unsigned char __aer_firmware_first : 1 ; unsigned char broken_intx_masking : 1 ; unsigned char io_window_1k : 1 ; unsigned char irq_managed : 1 ; unsigned char has_secondary_link : 1 ; pci_dev_flags_t dev_flags ; atomic_t enable_cnt ; u32 saved_config_space[16U] ; struct hlist_head saved_cap_space ; struct bin_attribute *rom_attr ; int rom_attr_enabled ; struct bin_attribute *res_attr[17U] ; struct bin_attribute *res_attr_wc[17U] ; struct list_head msi_list ; struct attribute_group const **msi_irq_groups ; struct pci_vpd *vpd ; union __anonunion____missing_field_name_262 __annonCompField76 ; struct pci_ats *ats ; phys_addr_t rom ; size_t romlen ; char *driver_override ; }; struct pci_ops; struct msi_controller; struct pci_bus { struct list_head node ; struct pci_bus *parent ; struct list_head children ; struct list_head devices ; struct pci_dev *self ; struct list_head slots ; struct resource *resource[4U] ; struct list_head resources ; struct resource busn_res ; struct pci_ops *ops ; struct msi_controller *msi ; void *sysdata ; struct proc_dir_entry *procdir ; unsigned char number ; unsigned char primary ; unsigned char max_bus_speed ; unsigned char cur_bus_speed ; char name[48U] ; unsigned short bridge_ctl ; pci_bus_flags_t bus_flags ; struct device *bridge ; struct device dev ; struct bin_attribute *legacy_io ; struct bin_attribute *legacy_mem ; unsigned char is_added : 1 ; }; struct pci_ops { void *(*map_bus)(struct pci_bus * , unsigned int , int ) ; int (*read)(struct pci_bus * , unsigned int , int , int , u32 * ) ; int (*write)(struct pci_bus * , unsigned int , int , int , u32 ) ; }; struct pci_dynids { spinlock_t lock ; struct list_head list ; }; typedef unsigned int pci_ers_result_t; struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev * , enum pci_channel_state ) ; pci_ers_result_t (*mmio_enabled)(struct pci_dev * ) ; pci_ers_result_t (*link_reset)(struct pci_dev * ) ; pci_ers_result_t (*slot_reset)(struct pci_dev * ) ; void (*reset_notify)(struct pci_dev * , bool ) ; void (*resume)(struct pci_dev * ) ; }; struct pci_driver { struct list_head node ; char const *name ; struct pci_device_id const *id_table ; int (*probe)(struct pci_dev * , struct pci_device_id const * ) ; void (*remove)(struct pci_dev * ) ; int (*suspend)(struct pci_dev * , pm_message_t ) ; int (*suspend_late)(struct pci_dev * , pm_message_t ) ; int (*resume_early)(struct pci_dev * ) ; int (*resume)(struct pci_dev * ) ; void (*shutdown)(struct pci_dev * ) ; int (*sriov_configure)(struct pci_dev * , int ) ; struct pci_error_handlers const *err_handler ; struct device_driver driver ; struct pci_dynids dynids ; }; struct mfd_cell; struct platform_device { char const *name ; int id ; bool id_auto ; struct device dev ; u32 num_resources ; struct resource *resource ; struct platform_device_id const *id_entry ; char *driver_override ; struct mfd_cell *mfd_cell ; struct pdev_archdata archdata ; }; struct pollfd { int fd ; short events ; short revents ; }; struct poll_table_struct { void (*_qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ; unsigned long _key ; }; typedef unsigned int drm_magic_t; struct drm_clip_rect { unsigned short x1 ; unsigned short y1 ; unsigned short x2 ; unsigned short y2 ; }; struct drm_hw_lock { unsigned int volatile lock ; char padding[60U] ; }; enum drm_map_type { _DRM_FRAME_BUFFER = 0, _DRM_REGISTERS = 1, _DRM_SHM = 2, _DRM_AGP = 3, _DRM_SCATTER_GATHER = 4, _DRM_CONSISTENT = 5 } ; enum drm_map_flags { _DRM_RESTRICTED = 1, _DRM_READ_ONLY = 2, _DRM_LOCKED = 4, _DRM_KERNEL = 8, _DRM_WRITE_COMBINING = 16, _DRM_CONTAINS_LOCK = 32, _DRM_REMOVABLE = 64, _DRM_DRIVER = 128 } ; struct drm_mode_fb_cmd2 { __u32 fb_id ; __u32 width ; __u32 height ; __u32 pixel_format ; __u32 flags ; __u32 handles[4U] ; __u32 pitches[4U] ; __u32 offsets[4U] ; __u64 modifier[4U] ; }; struct drm_mode_create_dumb { uint32_t height ; uint32_t width ; uint32_t bpp ; uint32_t flags ; uint32_t handle ; uint32_t pitch ; uint64_t size ; }; struct drm_event { __u32 type ; __u32 length ; }; struct drm_event_vblank { struct drm_event base ; __u64 user_data ; __u32 tv_sec ; __u32 tv_usec ; __u32 sequence ; __u32 reserved ; }; struct drm_agp_head { struct agp_kern_info agp_info ; struct list_head memory ; unsigned long mode ; struct agp_bridge_data *bridge ; int enabled ; int acquired ; unsigned long base ; int agp_mtrr ; int cant_use_aperture ; unsigned long page_mask ; }; typedef u32 phandle; struct property { char *name ; int length ; void *value ; struct property *next ; unsigned long _flags ; unsigned int unique_id ; struct bin_attribute attr ; }; struct device_node { char const *name ; char const *type ; phandle phandle ; char const *full_name ; struct fwnode_handle fwnode ; struct property *properties ; struct property *deadprops ; struct device_node *parent ; struct device_node *child ; struct device_node *sibling ; struct kobject kobj ; unsigned long _flags ; void *data ; }; struct i2c_msg { __u16 addr ; __u16 flags ; __u16 len ; __u8 *buf ; }; union i2c_smbus_data { __u8 byte ; __u16 word ; __u8 block[34U] ; }; struct i2c_algorithm; struct i2c_client; enum i2c_slave_event; enum i2c_slave_event; struct i2c_client { unsigned short flags ; unsigned short addr ; char name[20U] ; struct i2c_adapter *adapter ; struct device dev ; int irq ; struct list_head detected ; int (*slave_cb)(struct i2c_client * , enum i2c_slave_event , u8 * ) ; }; enum i2c_slave_event { I2C_SLAVE_READ_REQUESTED = 0, I2C_SLAVE_WRITE_REQUESTED = 1, I2C_SLAVE_READ_PROCESSED = 2, I2C_SLAVE_WRITE_RECEIVED = 3, I2C_SLAVE_STOP = 4 } ; struct i2c_algorithm { int (*master_xfer)(struct i2c_adapter * , struct i2c_msg * , int ) ; int (*smbus_xfer)(struct i2c_adapter * , u16 , unsigned short , char , u8 , int , union i2c_smbus_data * ) ; u32 (*functionality)(struct i2c_adapter * ) ; int (*reg_slave)(struct i2c_client * ) ; int (*unreg_slave)(struct i2c_client * ) ; }; struct i2c_bus_recovery_info { int (*recover_bus)(struct i2c_adapter * ) ; int (*get_scl)(struct i2c_adapter * ) ; void (*set_scl)(struct i2c_adapter * , int ) ; int (*get_sda)(struct i2c_adapter * ) ; void (*prepare_recovery)(struct i2c_adapter * ) ; void (*unprepare_recovery)(struct i2c_adapter * ) ; int scl_gpio ; int sda_gpio ; }; struct i2c_adapter_quirks { u64 flags ; int max_num_msgs ; u16 max_write_len ; u16 max_read_len ; u16 max_comb_1st_msg_len ; u16 max_comb_2nd_msg_len ; }; struct i2c_adapter { struct module *owner ; unsigned int class ; struct i2c_algorithm const *algo ; void *algo_data ; struct rt_mutex bus_lock ; int timeout ; int retries ; struct device dev ; int nr ; char name[48U] ; struct completion dev_released ; struct mutex userspace_clients_lock ; struct list_head userspace_clients ; struct i2c_bus_recovery_info *bus_recovery_info ; struct i2c_adapter_quirks const *quirks ; }; struct fb_fix_screeninfo { char id[16U] ; unsigned long smem_start ; __u32 smem_len ; __u32 type ; __u32 type_aux ; __u32 visual ; __u16 xpanstep ; __u16 ypanstep ; __u16 ywrapstep ; __u32 line_length ; unsigned long mmio_start ; __u32 mmio_len ; __u32 accel ; __u16 capabilities ; __u16 reserved[2U] ; }; struct fb_bitfield { __u32 offset ; __u32 length ; __u32 msb_right ; }; struct fb_var_screeninfo { __u32 xres ; __u32 yres ; __u32 xres_virtual ; __u32 yres_virtual ; __u32 xoffset ; __u32 yoffset ; __u32 bits_per_pixel ; __u32 grayscale ; struct fb_bitfield red ; struct fb_bitfield green ; struct fb_bitfield blue ; struct fb_bitfield transp ; __u32 nonstd ; __u32 activate ; __u32 height ; __u32 width ; __u32 accel_flags ; __u32 pixclock ; __u32 left_margin ; __u32 right_margin ; __u32 upper_margin ; __u32 lower_margin ; __u32 hsync_len ; __u32 vsync_len ; __u32 sync ; __u32 vmode ; __u32 rotate ; __u32 colorspace ; __u32 reserved[4U] ; }; struct fb_cmap { __u32 start ; __u32 len ; __u16 *red ; __u16 *green ; __u16 *blue ; __u16 *transp ; }; struct fb_copyarea { __u32 dx ; __u32 dy ; __u32 width ; __u32 height ; __u32 sx ; __u32 sy ; }; struct fb_fillrect { __u32 dx ; __u32 dy ; __u32 width ; __u32 height ; __u32 color ; __u32 rop ; }; struct fb_image { __u32 dx ; __u32 dy ; __u32 width ; __u32 height ; __u32 fg_color ; __u32 bg_color ; __u8 depth ; char const *data ; struct fb_cmap cmap ; }; struct fbcurpos { __u16 x ; __u16 y ; }; struct fb_cursor { __u16 set ; __u16 enable ; __u16 rop ; char const *mask ; struct fbcurpos hot ; struct fb_image image ; }; enum backlight_type { BACKLIGHT_RAW = 1, BACKLIGHT_PLATFORM = 2, BACKLIGHT_FIRMWARE = 3, BACKLIGHT_TYPE_MAX = 4 } ; struct backlight_ops { unsigned int options ; int (*update_status)(struct backlight_device * ) ; int (*get_brightness)(struct backlight_device * ) ; int (*check_fb)(struct backlight_device * , struct fb_info * ) ; }; struct backlight_properties { int brightness ; int max_brightness ; int power ; int fb_blank ; enum backlight_type type ; unsigned int state ; }; struct backlight_device { struct backlight_properties props ; struct mutex update_lock ; struct mutex ops_lock ; struct backlight_ops const *ops ; struct notifier_block fb_notif ; struct list_head entry ; struct device dev ; bool fb_bl_on[32U] ; int use_count ; }; struct fb_chroma { __u32 redx ; __u32 greenx ; __u32 bluex ; __u32 whitex ; __u32 redy ; __u32 greeny ; __u32 bluey ; __u32 whitey ; }; struct fb_videomode; struct fb_monspecs { struct fb_chroma chroma ; struct fb_videomode *modedb ; __u8 manufacturer[4U] ; __u8 monitor[14U] ; __u8 serial_no[14U] ; __u8 ascii[14U] ; __u32 modedb_len ; __u32 model ; __u32 serial ; __u32 year ; __u32 week ; __u32 hfmin ; __u32 hfmax ; __u32 dclkmin ; __u32 dclkmax ; __u16 input ; __u16 dpms ; __u16 signal ; __u16 vfmin ; __u16 vfmax ; __u16 gamma ; unsigned char gtf : 1 ; __u16 misc ; __u8 version ; __u8 revision ; __u8 max_x ; __u8 max_y ; }; struct fb_blit_caps { u32 x ; u32 y ; u32 len ; u32 flags ; }; struct fb_pixmap { u8 *addr ; u32 size ; u32 offset ; u32 buf_align ; u32 scan_align ; u32 access_align ; u32 flags ; u32 blit_x ; u32 blit_y ; void (*writeio)(struct fb_info * , void * , void * , unsigned int ) ; void (*readio)(struct fb_info * , void * , void * , unsigned int ) ; }; struct fb_deferred_io { unsigned long delay ; struct mutex lock ; struct list_head pagelist ; void (*first_io)(struct fb_info * ) ; void (*deferred_io)(struct fb_info * , struct list_head * ) ; }; struct fb_ops { struct module *owner ; int (*fb_open)(struct fb_info * , int ) ; int (*fb_release)(struct fb_info * , int ) ; ssize_t (*fb_read)(struct fb_info * , char * , size_t , loff_t * ) ; ssize_t (*fb_write)(struct fb_info * , char const * , size_t , loff_t * ) ; int (*fb_check_var)(struct fb_var_screeninfo * , struct fb_info * ) ; int (*fb_set_par)(struct fb_info * ) ; int (*fb_setcolreg)(unsigned int , unsigned int , unsigned int , unsigned int , unsigned int , struct fb_info * ) ; int (*fb_setcmap)(struct fb_cmap * , struct fb_info * ) ; int (*fb_blank)(int , struct fb_info * ) ; int (*fb_pan_display)(struct fb_var_screeninfo * , struct fb_info * ) ; void (*fb_fillrect)(struct fb_info * , struct fb_fillrect const * ) ; void (*fb_copyarea)(struct fb_info * , struct fb_copyarea const * ) ; void (*fb_imageblit)(struct fb_info * , struct fb_image const * ) ; int (*fb_cursor)(struct fb_info * , struct fb_cursor * ) ; void (*fb_rotate)(struct fb_info * , int ) ; int (*fb_sync)(struct fb_info * ) ; int (*fb_ioctl)(struct fb_info * , unsigned int , unsigned long ) ; int (*fb_compat_ioctl)(struct fb_info * , unsigned int , unsigned long ) ; int (*fb_mmap)(struct fb_info * , struct vm_area_struct * ) ; void (*fb_get_caps)(struct fb_info * , struct fb_blit_caps * , struct fb_var_screeninfo * ) ; void (*fb_destroy)(struct fb_info * ) ; int (*fb_debug_enter)(struct fb_info * ) ; int (*fb_debug_leave)(struct fb_info * ) ; }; struct fb_tilemap { __u32 width ; __u32 height ; __u32 depth ; __u32 length ; __u8 const *data ; }; struct fb_tilerect { __u32 sx ; __u32 sy ; __u32 width ; __u32 height ; __u32 index ; __u32 fg ; __u32 bg ; __u32 rop ; }; struct fb_tilearea { __u32 sx ; __u32 sy ; __u32 dx ; __u32 dy ; __u32 width ; __u32 height ; }; struct fb_tileblit { __u32 sx ; __u32 sy ; __u32 width ; __u32 height ; __u32 fg ; __u32 bg ; __u32 length ; __u32 *indices ; }; struct fb_tilecursor { __u32 sx ; __u32 sy ; __u32 mode ; __u32 shape ; __u32 fg ; __u32 bg ; }; struct fb_tile_ops { void (*fb_settile)(struct fb_info * , struct fb_tilemap * ) ; void (*fb_tilecopy)(struct fb_info * , struct fb_tilearea * ) ; void (*fb_tilefill)(struct fb_info * , struct fb_tilerect * ) ; void (*fb_tileblit)(struct fb_info * , struct fb_tileblit * ) ; void (*fb_tilecursor)(struct fb_info * , struct fb_tilecursor * ) ; int (*fb_get_tilemax)(struct fb_info * ) ; }; struct aperture { resource_size_t base ; resource_size_t size ; }; struct apertures_struct { unsigned int count ; struct aperture ranges[0U] ; }; struct fb_info { atomic_t count ; int node ; int flags ; struct mutex lock ; struct mutex mm_lock ; struct fb_var_screeninfo var ; struct fb_fix_screeninfo fix ; struct fb_monspecs monspecs ; struct work_struct queue ; struct fb_pixmap pixmap ; struct fb_pixmap sprite ; struct fb_cmap cmap ; struct list_head modelist ; struct fb_videomode *mode ; struct backlight_device *bl_dev ; struct mutex bl_curve_mutex ; u8 bl_curve[128U] ; struct delayed_work deferred_work ; struct fb_deferred_io *fbdefio ; struct fb_ops *fbops ; struct device *device ; struct device *dev ; int class_flag ; struct fb_tile_ops *tileops ; char *screen_base ; unsigned long screen_size ; void *pseudo_palette ; u32 state ; void *fbcon_par ; void *par ; struct apertures_struct *apertures ; bool skip_vt_switch ; }; struct fb_videomode { char const *name ; u32 refresh ; u32 xres ; u32 yres ; u32 pixclock ; u32 left_margin ; u32 right_margin ; u32 upper_margin ; u32 lower_margin ; u32 hsync_len ; u32 vsync_len ; u32 sync ; u32 vmode ; u32 flag ; }; enum hdmi_infoframe_type { HDMI_INFOFRAME_TYPE_VENDOR = 129, HDMI_INFOFRAME_TYPE_AVI = 130, HDMI_INFOFRAME_TYPE_SPD = 131, HDMI_INFOFRAME_TYPE_AUDIO = 132 } ; enum hdmi_picture_aspect { HDMI_PICTURE_ASPECT_NONE = 0, HDMI_PICTURE_ASPECT_4_3 = 1, HDMI_PICTURE_ASPECT_16_9 = 2, HDMI_PICTURE_ASPECT_RESERVED = 3 } ; struct ww_class { atomic_long_t stamp ; struct lock_class_key acquire_key ; struct lock_class_key mutex_key ; char const *acquire_name ; char const *mutex_name ; }; struct ww_mutex; struct ww_acquire_ctx { struct task_struct *task ; unsigned long stamp ; unsigned int acquired ; unsigned int done_acquire ; struct ww_class *ww_class ; struct ww_mutex *contending_lock ; struct lockdep_map dep_map ; unsigned int deadlock_inject_interval ; unsigned int deadlock_inject_countdown ; }; struct ww_mutex { struct mutex base ; struct ww_acquire_ctx *ctx ; struct ww_class *ww_class ; }; struct drm_modeset_lock; struct drm_modeset_acquire_ctx { struct ww_acquire_ctx ww_ctx ; struct drm_modeset_lock *contended ; struct list_head locked ; bool trylock_only ; }; struct drm_modeset_lock { struct ww_mutex mutex ; struct list_head head ; }; struct drm_mode_set; struct drm_object_properties; struct fence; struct drm_mode_object { uint32_t id ; uint32_t type ; struct drm_object_properties *properties ; }; struct drm_object_properties { int count ; int atomic_count ; struct drm_property *properties[24U] ; uint64_t values[24U] ; }; enum drm_connector_force { DRM_FORCE_UNSPECIFIED = 0, DRM_FORCE_OFF = 1, DRM_FORCE_ON = 2, DRM_FORCE_ON_DIGITAL = 3 } ; enum drm_mode_status { MODE_OK = 0, MODE_HSYNC = 1, MODE_VSYNC = 2, MODE_H_ILLEGAL = 3, MODE_V_ILLEGAL = 4, MODE_BAD_WIDTH = 5, MODE_NOMODE = 6, MODE_NO_INTERLACE = 7, MODE_NO_DBLESCAN = 8, MODE_NO_VSCAN = 9, MODE_MEM = 10, MODE_VIRTUAL_X = 11, MODE_VIRTUAL_Y = 12, MODE_MEM_VIRT = 13, MODE_NOCLOCK = 14, MODE_CLOCK_HIGH = 15, MODE_CLOCK_LOW = 16, MODE_CLOCK_RANGE = 17, MODE_BAD_HVALUE = 18, MODE_BAD_VVALUE = 19, MODE_BAD_VSCAN = 20, MODE_HSYNC_NARROW = 21, MODE_HSYNC_WIDE = 22, MODE_HBLANK_NARROW = 23, MODE_HBLANK_WIDE = 24, MODE_VSYNC_NARROW = 25, MODE_VSYNC_WIDE = 26, MODE_VBLANK_NARROW = 27, MODE_VBLANK_WIDE = 28, MODE_PANEL = 29, MODE_INTERLACE_WIDTH = 30, MODE_ONE_WIDTH = 31, MODE_ONE_HEIGHT = 32, MODE_ONE_SIZE = 33, MODE_NO_REDUCED = 34, MODE_NO_STEREO = 35, MODE_UNVERIFIED = -3, MODE_BAD = -2, MODE_ERROR = -1 } ; struct drm_display_mode { struct list_head head ; struct drm_mode_object base ; char name[32U] ; enum drm_mode_status status ; unsigned int type ; int clock ; int hdisplay ; int hsync_start ; int hsync_end ; int htotal ; int hskew ; int vdisplay ; int vsync_start ; int vsync_end ; int vtotal ; int vscan ; unsigned int flags ; int width_mm ; int height_mm ; int crtc_clock ; int crtc_hdisplay ; int crtc_hblank_start ; int crtc_hblank_end ; int crtc_hsync_start ; int crtc_hsync_end ; int crtc_htotal ; int crtc_hskew ; int crtc_vdisplay ; int crtc_vblank_start ; int crtc_vblank_end ; int crtc_vsync_start ; int crtc_vsync_end ; int crtc_vtotal ; int *private ; int private_flags ; int vrefresh ; int hsync ; enum hdmi_picture_aspect picture_aspect_ratio ; }; struct drm_cmdline_mode { bool specified ; bool refresh_specified ; bool bpp_specified ; int xres ; int yres ; int bpp ; int refresh ; bool rb ; bool interlace ; bool cvt ; bool margins ; enum drm_connector_force force ; }; enum drm_connector_status { connector_status_connected = 1, connector_status_disconnected = 2, connector_status_unknown = 3 } ; enum subpixel_order { SubPixelUnknown = 0, SubPixelHorizontalRGB = 1, SubPixelHorizontalBGR = 2, SubPixelVerticalRGB = 3, SubPixelVerticalBGR = 4, SubPixelNone = 5 } ; struct drm_display_info { char name[32U] ; unsigned int width_mm ; unsigned int height_mm ; unsigned int min_vfreq ; unsigned int max_vfreq ; unsigned int min_hfreq ; unsigned int max_hfreq ; unsigned int pixel_clock ; unsigned int bpc ; enum subpixel_order subpixel_order ; u32 color_formats ; u32 const *bus_formats ; unsigned int num_bus_formats ; u8 edid_hdmi_dc_modes ; u8 cea_rev ; }; struct drm_tile_group { struct kref refcount ; struct drm_device *dev ; int id ; u8 group_data[8U] ; }; struct drm_framebuffer_funcs { void (*destroy)(struct drm_framebuffer * ) ; int (*create_handle)(struct drm_framebuffer * , struct drm_file * , unsigned int * ) ; int (*dirty)(struct drm_framebuffer * , struct drm_file * , unsigned int , unsigned int , struct drm_clip_rect * , unsigned int ) ; }; struct drm_framebuffer { struct drm_device *dev ; struct kref refcount ; struct list_head head ; struct drm_mode_object base ; struct drm_framebuffer_funcs const *funcs ; unsigned int pitches[4U] ; unsigned int offsets[4U] ; uint64_t modifier[4U] ; unsigned int width ; unsigned int height ; unsigned int depth ; int bits_per_pixel ; int flags ; uint32_t pixel_format ; struct list_head filp_head ; void *helper_private ; }; struct drm_property_blob { struct drm_mode_object base ; struct drm_device *dev ; struct kref refcount ; struct list_head head_global ; struct list_head head_file ; size_t length ; unsigned char data[] ; }; struct drm_property { struct list_head head ; struct drm_mode_object base ; uint32_t flags ; char name[32U] ; uint32_t num_values ; uint64_t *values ; struct drm_device *dev ; struct list_head enum_list ; }; struct drm_pending_vblank_event; struct drm_bridge; struct drm_crtc_state { struct drm_crtc *crtc ; bool enable ; bool active ; bool planes_changed ; bool mode_changed ; bool active_changed ; u32 plane_mask ; u32 last_vblank_count ; struct drm_display_mode adjusted_mode ; struct drm_display_mode mode ; struct drm_property_blob *mode_blob ; struct drm_pending_vblank_event *event ; struct drm_atomic_state *state ; }; struct drm_crtc_funcs { void (*save)(struct drm_crtc * ) ; void (*restore)(struct drm_crtc * ) ; void (*reset)(struct drm_crtc * ) ; int (*cursor_set)(struct drm_crtc * , struct drm_file * , uint32_t , uint32_t , uint32_t ) ; int (*cursor_set2)(struct drm_crtc * , struct drm_file * , uint32_t , uint32_t , uint32_t , int32_t , int32_t ) ; int (*cursor_move)(struct drm_crtc * , int , int ) ; void (*gamma_set)(struct drm_crtc * , u16 * , u16 * , u16 * , uint32_t , uint32_t ) ; void (*destroy)(struct drm_crtc * ) ; int (*set_config)(struct drm_mode_set * ) ; int (*page_flip)(struct drm_crtc * , struct drm_framebuffer * , struct drm_pending_vblank_event * , uint32_t ) ; int (*set_property)(struct drm_crtc * , struct drm_property * , uint64_t ) ; struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc * ) ; void (*atomic_destroy_state)(struct drm_crtc * , struct drm_crtc_state * ) ; int (*atomic_set_property)(struct drm_crtc * , struct drm_crtc_state * , struct drm_property * , uint64_t ) ; int (*atomic_get_property)(struct drm_crtc * , struct drm_crtc_state const * , struct drm_property * , uint64_t * ) ; }; struct drm_crtc { struct drm_device *dev ; struct device_node *port ; struct list_head head ; struct drm_modeset_lock mutex ; struct drm_mode_object base ; struct drm_plane *primary ; struct drm_plane *cursor ; int cursor_x ; int cursor_y ; bool enabled ; struct drm_display_mode mode ; struct drm_display_mode hwmode ; bool invert_dimensions ; int x ; int y ; struct drm_crtc_funcs const *funcs ; uint32_t gamma_size ; uint16_t *gamma_store ; int framedur_ns ; int linedur_ns ; int pixeldur_ns ; void const *helper_private ; struct drm_object_properties properties ; struct drm_crtc_state *state ; struct drm_modeset_acquire_ctx *acquire_ctx ; }; struct drm_connector_state { struct drm_connector *connector ; struct drm_crtc *crtc ; struct drm_encoder *best_encoder ; struct drm_atomic_state *state ; }; struct drm_connector_funcs { void (*dpms)(struct drm_connector * , int ) ; void (*save)(struct drm_connector * ) ; void (*restore)(struct drm_connector * ) ; void (*reset)(struct drm_connector * ) ; enum drm_connector_status (*detect)(struct drm_connector * , bool ) ; int (*fill_modes)(struct drm_connector * , uint32_t , uint32_t ) ; int (*set_property)(struct drm_connector * , struct drm_property * , uint64_t ) ; void (*destroy)(struct drm_connector * ) ; void (*force)(struct drm_connector * ) ; struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector * ) ; void (*atomic_destroy_state)(struct drm_connector * , struct drm_connector_state * ) ; int (*atomic_set_property)(struct drm_connector * , struct drm_connector_state * , struct drm_property * , uint64_t ) ; int (*atomic_get_property)(struct drm_connector * , struct drm_connector_state const * , struct drm_property * , uint64_t * ) ; }; struct drm_encoder_funcs { void (*reset)(struct drm_encoder * ) ; void (*destroy)(struct drm_encoder * ) ; }; struct drm_encoder { struct drm_device *dev ; struct list_head head ; struct drm_mode_object base ; char *name ; int encoder_type ; uint32_t possible_crtcs ; uint32_t possible_clones ; struct drm_crtc *crtc ; struct drm_bridge *bridge ; struct drm_encoder_funcs const *funcs ; void const *helper_private ; }; struct drm_connector { struct drm_device *dev ; struct device *kdev ; struct device_attribute *attr ; struct list_head head ; struct drm_mode_object base ; char *name ; int connector_type ; int connector_type_id ; bool interlace_allowed ; bool doublescan_allowed ; bool stereo_allowed ; struct list_head modes ; enum drm_connector_status status ; struct list_head probed_modes ; struct drm_display_info display_info ; struct drm_connector_funcs const *funcs ; struct drm_property_blob *edid_blob_ptr ; struct drm_object_properties properties ; struct drm_property_blob *path_blob_ptr ; struct drm_property_blob *tile_blob_ptr ; uint8_t polled ; int dpms ; void const *helper_private ; struct drm_cmdline_mode cmdline_mode ; enum drm_connector_force force ; bool override_edid ; uint32_t encoder_ids[3U] ; struct drm_encoder *encoder ; uint8_t eld[128U] ; bool dvi_dual ; int max_tmds_clock ; bool latency_present[2U] ; int video_latency[2U] ; int audio_latency[2U] ; int null_edid_counter ; unsigned int bad_edid_counter ; bool edid_corrupt ; struct dentry *debugfs_entry ; struct drm_connector_state *state ; bool has_tile ; struct drm_tile_group *tile_group ; bool tile_is_single_monitor ; uint8_t num_h_tile ; uint8_t num_v_tile ; uint8_t tile_h_loc ; uint8_t tile_v_loc ; uint16_t tile_h_size ; uint16_t tile_v_size ; struct list_head destroy_list ; }; struct drm_plane_state { struct drm_plane *plane ; struct drm_crtc *crtc ; struct drm_framebuffer *fb ; struct fence *fence ; int32_t crtc_x ; int32_t crtc_y ; uint32_t crtc_w ; uint32_t crtc_h ; uint32_t src_x ; uint32_t src_y ; uint32_t src_h ; uint32_t src_w ; unsigned int rotation ; struct drm_atomic_state *state ; }; struct drm_plane_funcs { int (*update_plane)(struct drm_plane * , struct drm_crtc * , struct drm_framebuffer * , int , int , unsigned int , unsigned int , uint32_t , uint32_t , uint32_t , uint32_t ) ; int (*disable_plane)(struct drm_plane * ) ; void (*destroy)(struct drm_plane * ) ; void (*reset)(struct drm_plane * ) ; int (*set_property)(struct drm_plane * , struct drm_property * , uint64_t ) ; struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane * ) ; void (*atomic_destroy_state)(struct drm_plane * , struct drm_plane_state * ) ; int (*atomic_set_property)(struct drm_plane * , struct drm_plane_state * , struct drm_property * , uint64_t ) ; int (*atomic_get_property)(struct drm_plane * , struct drm_plane_state const * , struct drm_property * , uint64_t * ) ; }; enum drm_plane_type { DRM_PLANE_TYPE_OVERLAY = 0, DRM_PLANE_TYPE_PRIMARY = 1, DRM_PLANE_TYPE_CURSOR = 2 } ; struct drm_plane { struct drm_device *dev ; struct list_head head ; struct drm_modeset_lock mutex ; struct drm_mode_object base ; uint32_t possible_crtcs ; uint32_t *format_types ; uint32_t format_count ; bool format_default ; struct drm_crtc *crtc ; struct drm_framebuffer *fb ; struct drm_framebuffer *old_fb ; struct drm_plane_funcs const *funcs ; struct drm_object_properties properties ; enum drm_plane_type type ; void const *helper_private ; struct drm_plane_state *state ; }; struct drm_bridge_funcs { int (*attach)(struct drm_bridge * ) ; bool (*mode_fixup)(struct drm_bridge * , struct drm_display_mode const * , struct drm_display_mode * ) ; void (*disable)(struct drm_bridge * ) ; void (*post_disable)(struct drm_bridge * ) ; void (*mode_set)(struct drm_bridge * , struct drm_display_mode * , struct drm_display_mode * ) ; void (*pre_enable)(struct drm_bridge * ) ; void (*enable)(struct drm_bridge * ) ; }; struct drm_bridge { struct drm_device *dev ; struct drm_encoder *encoder ; struct drm_bridge *next ; struct device_node *of_node ; struct list_head list ; struct drm_bridge_funcs const *funcs ; void *driver_private ; }; struct drm_atomic_state { struct drm_device *dev ; bool allow_modeset ; bool legacy_cursor_update ; struct drm_plane **planes ; struct drm_plane_state **plane_states ; struct drm_crtc **crtcs ; struct drm_crtc_state **crtc_states ; int num_connector ; struct drm_connector **connectors ; struct drm_connector_state **connector_states ; struct drm_modeset_acquire_ctx *acquire_ctx ; }; struct drm_mode_set { struct drm_framebuffer *fb ; struct drm_crtc *crtc ; struct drm_display_mode *mode ; uint32_t x ; uint32_t y ; struct drm_connector **connectors ; size_t num_connectors ; }; struct drm_mode_config_funcs { struct drm_framebuffer *(*fb_create)(struct drm_device * , struct drm_file * , struct drm_mode_fb_cmd2 * ) ; void (*output_poll_changed)(struct drm_device * ) ; int (*atomic_check)(struct drm_device * , struct drm_atomic_state * ) ; int (*atomic_commit)(struct drm_device * , struct drm_atomic_state * , bool ) ; struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device * ) ; void (*atomic_state_clear)(struct drm_atomic_state * ) ; void (*atomic_state_free)(struct drm_atomic_state * ) ; }; struct drm_mode_group { uint32_t num_crtcs ; uint32_t num_encoders ; uint32_t num_connectors ; uint32_t *id_list ; }; struct drm_mode_config { struct mutex mutex ; struct drm_modeset_lock connection_mutex ; struct drm_modeset_acquire_ctx *acquire_ctx ; struct mutex idr_mutex ; struct idr crtc_idr ; struct idr tile_idr ; struct mutex fb_lock ; int num_fb ; struct list_head fb_list ; int num_connector ; struct list_head connector_list ; int num_encoder ; struct list_head encoder_list ; int num_overlay_plane ; int num_total_plane ; struct list_head plane_list ; int num_crtc ; struct list_head crtc_list ; struct list_head property_list ; int min_width ; int min_height ; int max_width ; int max_height ; struct drm_mode_config_funcs const *funcs ; resource_size_t fb_base ; bool poll_enabled ; bool poll_running ; bool delayed_event ; struct delayed_work output_poll_work ; struct mutex blob_lock ; struct list_head property_blob_list ; struct drm_property *edid_property ; struct drm_property *dpms_property ; struct drm_property *path_property ; struct drm_property *tile_property ; struct drm_property *plane_type_property ; struct drm_property *rotation_property ; struct drm_property *prop_src_x ; struct drm_property *prop_src_y ; struct drm_property *prop_src_w ; struct drm_property *prop_src_h ; struct drm_property *prop_crtc_x ; struct drm_property *prop_crtc_y ; struct drm_property *prop_crtc_w ; struct drm_property *prop_crtc_h ; struct drm_property *prop_fb_id ; struct drm_property *prop_crtc_id ; struct drm_property *prop_active ; struct drm_property *prop_mode_id ; struct drm_property *dvi_i_subconnector_property ; struct drm_property *dvi_i_select_subconnector_property ; struct drm_property *tv_subconnector_property ; struct drm_property *tv_select_subconnector_property ; struct drm_property *tv_mode_property ; struct drm_property *tv_left_margin_property ; struct drm_property *tv_right_margin_property ; struct drm_property *tv_top_margin_property ; struct drm_property *tv_bottom_margin_property ; struct drm_property *tv_brightness_property ; struct drm_property *tv_contrast_property ; struct drm_property *tv_flicker_reduction_property ; struct drm_property *tv_overscan_property ; struct drm_property *tv_saturation_property ; struct drm_property *tv_hue_property ; struct drm_property *scaling_mode_property ; struct drm_property *aspect_ratio_property ; struct drm_property *dirty_info_property ; struct drm_property *suggested_x_property ; struct drm_property *suggested_y_property ; uint32_t preferred_depth ; uint32_t prefer_shadow ; bool async_page_flip ; bool allow_fb_modifiers ; uint32_t cursor_width ; uint32_t cursor_height ; }; struct edid; struct drm_open_hash { struct hlist_head *table ; u8 order ; }; struct drm_mm; struct drm_mm_node { struct list_head node_list ; struct list_head hole_stack ; unsigned char hole_follows : 1 ; unsigned char scanned_block : 1 ; unsigned char scanned_prev_free : 1 ; unsigned char scanned_next_free : 1 ; unsigned char scanned_preceeds_hole : 1 ; unsigned char allocated : 1 ; unsigned long color ; u64 start ; u64 size ; struct drm_mm *mm ; }; struct drm_mm { struct list_head hole_stack ; struct drm_mm_node head_node ; unsigned char scan_check_range : 1 ; unsigned int scan_alignment ; unsigned long scan_color ; u64 scan_size ; u64 scan_hit_start ; u64 scan_hit_end ; unsigned int scanned_blocks ; u64 scan_start ; u64 scan_end ; struct drm_mm_node *prev_scanned_node ; void (*color_adjust)(struct drm_mm_node * , unsigned long , u64 * , u64 * ) ; }; struct drm_vma_offset_node { rwlock_t vm_lock ; struct drm_mm_node vm_node ; struct rb_node vm_rb ; struct rb_root vm_files ; }; struct drm_vma_offset_manager { rwlock_t vm_lock ; struct rb_root vm_addr_space_rb ; struct drm_mm vm_addr_space_mm ; }; struct drm_local_map; struct drm_device_dma; struct drm_dma_handle; struct reservation_object; typedef int drm_ioctl_t(struct drm_device * , void * , struct drm_file * ); struct drm_ioctl_desc { unsigned int cmd ; int flags ; drm_ioctl_t *func ; char const *name ; }; struct drm_pending_event { struct drm_event *event ; struct list_head link ; struct drm_file *file_priv ; pid_t pid ; void (*destroy)(struct drm_pending_event * ) ; }; struct drm_prime_file_private { struct list_head head ; struct mutex lock ; }; struct drm_master; struct drm_file { unsigned char authenticated : 1 ; unsigned char is_master : 1 ; unsigned char stereo_allowed : 1 ; unsigned char universal_planes : 1 ; unsigned char atomic : 1 ; struct pid *pid ; kuid_t uid ; drm_magic_t magic ; struct list_head lhead ; struct drm_minor *minor ; unsigned long lock_count ; struct idr object_idr ; spinlock_t table_lock ; struct file *filp ; void *driver_priv ; struct drm_master *master ; struct list_head fbs ; struct mutex fbs_lock ; struct list_head blobs ; wait_queue_head_t event_wait ; struct list_head event_list ; int event_space ; struct drm_prime_file_private prime ; }; struct drm_lock_data { struct drm_hw_lock *hw_lock ; struct drm_file *file_priv ; wait_queue_head_t lock_queue ; unsigned long lock_time ; spinlock_t spinlock ; uint32_t kernel_waiters ; uint32_t user_waiters ; int idle_has_lock ; }; struct drm_master { struct kref refcount ; struct drm_minor *minor ; char *unique ; int unique_len ; struct idr magic_map ; struct drm_lock_data lock ; void *driver_priv ; }; struct drm_driver { int (*load)(struct drm_device * , unsigned long ) ; int (*firstopen)(struct drm_device * ) ; int (*open)(struct drm_device * , struct drm_file * ) ; void (*preclose)(struct drm_device * , struct drm_file * ) ; void (*postclose)(struct drm_device * , struct drm_file * ) ; void (*lastclose)(struct drm_device * ) ; int (*unload)(struct drm_device * ) ; int (*suspend)(struct drm_device * , pm_message_t ) ; int (*resume)(struct drm_device * ) ; int (*dma_ioctl)(struct drm_device * , void * , struct drm_file * ) ; int (*dma_quiescent)(struct drm_device * ) ; int (*context_dtor)(struct drm_device * , int ) ; int (*set_busid)(struct drm_device * , struct drm_master * ) ; u32 (*get_vblank_counter)(struct drm_device * , int ) ; int (*enable_vblank)(struct drm_device * , int ) ; void (*disable_vblank)(struct drm_device * , int ) ; int (*device_is_agp)(struct drm_device * ) ; int (*get_scanout_position)(struct drm_device * , int , unsigned int , int * , int * , ktime_t * , ktime_t * ) ; int (*get_vblank_timestamp)(struct drm_device * , int , int * , struct timeval * , unsigned int ) ; irqreturn_t (*irq_handler)(int , void * ) ; void (*irq_preinstall)(struct drm_device * ) ; int (*irq_postinstall)(struct drm_device * ) ; void (*irq_uninstall)(struct drm_device * ) ; int (*master_create)(struct drm_device * , struct drm_master * ) ; void (*master_destroy)(struct drm_device * , struct drm_master * ) ; int (*master_set)(struct drm_device * , struct drm_file * , bool ) ; void (*master_drop)(struct drm_device * , struct drm_file * , bool ) ; int (*debugfs_init)(struct drm_minor * ) ; void (*debugfs_cleanup)(struct drm_minor * ) ; void (*gem_free_object)(struct drm_gem_object * ) ; int (*gem_open_object)(struct drm_gem_object * , struct drm_file * ) ; void (*gem_close_object)(struct drm_gem_object * , struct drm_file * ) ; int (*prime_handle_to_fd)(struct drm_device * , struct drm_file * , uint32_t , uint32_t , int * ) ; int (*prime_fd_to_handle)(struct drm_device * , struct drm_file * , int , uint32_t * ) ; struct dma_buf *(*gem_prime_export)(struct drm_device * , struct drm_gem_object * , int ) ; struct drm_gem_object *(*gem_prime_import)(struct drm_device * , struct dma_buf * ) ; int (*gem_prime_pin)(struct drm_gem_object * ) ; void (*gem_prime_unpin)(struct drm_gem_object * ) ; struct reservation_object *(*gem_prime_res_obj)(struct drm_gem_object * ) ; struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object * ) ; struct drm_gem_object *(*gem_prime_import_sg_table)(struct drm_device * , struct dma_buf_attachment * , struct sg_table * ) ; void *(*gem_prime_vmap)(struct drm_gem_object * ) ; void (*gem_prime_vunmap)(struct drm_gem_object * , void * ) ; int (*gem_prime_mmap)(struct drm_gem_object * , struct vm_area_struct * ) ; void (*vgaarb_irq)(struct drm_device * , bool ) ; int (*dumb_create)(struct drm_file * , struct drm_device * , struct drm_mode_create_dumb * ) ; int (*dumb_map_offset)(struct drm_file * , struct drm_device * , uint32_t , uint64_t * ) ; int (*dumb_destroy)(struct drm_file * , struct drm_device * , uint32_t ) ; struct vm_operations_struct const *gem_vm_ops ; int major ; int minor ; int patchlevel ; char *name ; char *desc ; char *date ; u32 driver_features ; int dev_priv_size ; struct drm_ioctl_desc const *ioctls ; int num_ioctls ; struct file_operations const *fops ; struct list_head legacy_dev_list ; }; struct drm_minor { int index ; int type ; struct device *kdev ; struct drm_device *dev ; struct dentry *debugfs_root ; struct list_head debugfs_list ; struct mutex debugfs_lock ; struct drm_master *master ; struct drm_mode_group mode_group ; }; struct drm_pending_vblank_event { struct drm_pending_event base ; int pipe ; struct drm_event_vblank event ; }; struct drm_vblank_crtc { struct drm_device *dev ; wait_queue_head_t queue ; struct timer_list disable_timer ; unsigned long count ; struct timeval time[2U] ; atomic_t refcount ; u32 last ; u32 last_wait ; unsigned int inmodeset ; int crtc ; bool enabled ; }; struct virtio_device; struct drm_sg_mem; struct __anonstruct_sigdata_266 { int context ; struct drm_hw_lock *lock ; }; struct drm_device { struct list_head legacy_dev_list ; int if_version ; struct kref ref ; struct device *dev ; struct drm_driver *driver ; void *dev_private ; struct drm_minor *control ; struct drm_minor *primary ; struct drm_minor *render ; atomic_t unplugged ; struct inode *anon_inode ; char *unique ; struct mutex struct_mutex ; struct mutex master_mutex ; int open_count ; spinlock_t buf_lock ; int buf_use ; atomic_t buf_alloc ; struct list_head filelist ; struct list_head maplist ; struct drm_open_hash map_hash ; struct list_head ctxlist ; struct mutex ctxlist_mutex ; struct idr ctx_idr ; struct list_head vmalist ; struct drm_device_dma *dma ; long volatile context_flag ; int last_context ; bool irq_enabled ; int irq ; bool vblank_disable_allowed ; bool vblank_disable_immediate ; struct drm_vblank_crtc *vblank ; spinlock_t vblank_time_lock ; spinlock_t vbl_lock ; u32 max_vblank_count ; struct list_head vblank_event_list ; spinlock_t event_lock ; struct drm_agp_head *agp ; struct pci_dev *pdev ; struct platform_device *platformdev ; struct virtio_device *virtdev ; struct drm_sg_mem *sg ; unsigned int num_crtcs ; sigset_t sigmask ; struct __anonstruct_sigdata_266 sigdata ; struct drm_local_map *agp_buffer_map ; unsigned int agp_buffer_token ; struct drm_mode_config mode_config ; struct mutex object_name_lock ; struct idr object_name_idr ; struct drm_vma_offset_manager *vma_offset_manager ; int switch_power_state ; }; struct drm_i915_gem_exec_object2 { __u32 handle ; __u32 relocation_count ; __u64 relocs_ptr ; __u64 alignment ; __u64 offset ; __u64 flags ; __u64 rsvd1 ; __u64 rsvd2 ; }; struct drm_i915_gem_execbuffer2 { __u64 buffers_ptr ; __u32 buffer_count ; __u32 batch_start_offset ; __u32 batch_len ; __u32 DR1 ; __u32 DR4 ; __u32 num_cliprects ; __u64 cliprects_ptr ; __u64 flags ; __u64 rsvd1 ; __u64 rsvd2 ; }; struct old_child_dev_config { u16 handle ; u16 device_type ; u8 device_id[10U] ; u16 addin_offset ; u8 dvo_port ; u8 i2c_pin ; u8 slave_addr ; u8 ddc_pin ; u16 edid_ptr ; u8 dvo_cfg ; u8 dvo2_port ; u8 i2c2_pin ; u8 slave2_addr ; u8 ddc2_pin ; u8 capabilities ; u8 dvo_wiring ; u8 dvo2_wiring ; u16 extended_type ; u8 dvo_function ; }; struct common_child_dev_config { u16 handle ; u16 device_type ; u8 not_common1[12U] ; u8 dvo_port ; u8 not_common2[2U] ; u8 ddc_pin ; u16 edid_ptr ; }; union child_device_config { u8 raw[33U] ; struct old_child_dev_config old ; struct common_child_dev_config common ; }; struct edp_power_seq { u16 t1_t3 ; u16 t8 ; u16 t9 ; u16 t10 ; u16 t11_t12 ; }; struct mipi_config { u16 panel_id ; unsigned char enable_dithering : 1 ; unsigned char rsvd1 : 1 ; unsigned char is_bridge : 1 ; unsigned char panel_arch_type : 2 ; unsigned char is_cmd_mode : 1 ; unsigned char video_transfer_mode : 2 ; unsigned char cabc_supported : 1 ; unsigned char pwm_blc : 1 ; unsigned char videomode_color_format : 4 ; unsigned char rotation : 2 ; unsigned char bta_enabled : 1 ; unsigned short rsvd2 : 15 ; unsigned char dual_link : 2 ; unsigned char lane_cnt : 2 ; unsigned char pixel_overlap : 3 ; unsigned short rsvd3 : 9 ; u16 rsvd4 ; u8 rsvd5 ; u32 target_burst_mode_freq ; u32 dsi_ddr_clk ; u32 bridge_ref_clk ; unsigned char byte_clk_sel : 2 ; unsigned char rsvd6 : 6 ; unsigned char dphy_param_valid : 1 ; unsigned char eot_pkt_disabled : 1 ; unsigned char enable_clk_stop : 1 ; unsigned short rsvd7 : 13 ; u32 hs_tx_timeout ; u32 lp_rx_timeout ; u32 turn_around_timeout ; u32 device_reset_timer ; u32 master_init_timer ; u32 dbi_bw_timer ; u32 lp_byte_clk_val ; unsigned char prepare_cnt : 6 ; unsigned char rsvd8 : 2 ; unsigned char clk_zero_cnt ; unsigned char trail_cnt : 5 ; unsigned char rsvd9 : 3 ; unsigned char exit_zero_cnt : 6 ; unsigned char rsvd10 : 2 ; u32 clk_lane_switch_cnt ; u32 hl_switch_cnt ; u32 rsvd11[6U] ; u8 tclk_miss ; u8 tclk_post ; u8 rsvd12 ; u8 tclk_pre ; u8 tclk_prepare ; u8 tclk_settle ; u8 tclk_term_enable ; u8 tclk_trail ; u16 tclk_prepare_clkzero ; u8 rsvd13 ; u8 td_term_enable ; u8 teot ; u8 ths_exit ; u8 ths_prepare ; u16 ths_prepare_hszero ; u8 rsvd14 ; u8 ths_settle ; u8 ths_skip ; u8 ths_trail ; u8 tinit ; u8 tlpx ; u8 rsvd15[3U] ; u8 panel_enable ; u8 bl_enable ; u8 pwm_enable ; u8 reset_r_n ; u8 pwr_down_r ; u8 stdby_r_n ; }; struct mipi_pps_data { u16 panel_on_delay ; u16 bl_enable_delay ; u16 bl_disable_delay ; u16 panel_off_delay ; u16 panel_power_cycle_delay ; }; struct i915_gem_batch_pool { struct drm_device *dev ; struct list_head cache_list[4U] ; }; struct intel_hw_status_page { u32 *page_addr ; unsigned int gfx_addr ; struct drm_i915_gem_object *obj ; }; enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT = 1, HANGCHECK_ACTIVE = 2, HANGCHECK_ACTIVE_LOOP = 3, HANGCHECK_KICK = 4, HANGCHECK_HUNG = 5 } ; struct intel_ring_hangcheck { u64 acthd ; u64 max_acthd ; u32 seqno ; int score ; enum intel_ring_hangcheck_action action ; int deadlock ; }; struct intel_engine_cs; struct intel_ringbuffer { struct drm_i915_gem_object *obj ; void *virtual_start ; struct intel_engine_cs *ring ; u32 head ; u32 tail ; int space ; int size ; int effective_size ; u32 last_retired_head ; }; struct intel_context; struct drm_i915_reg_descriptor; enum intel_ring_id { RCS = 0, VCS = 1, BCS = 2, VECS = 3, VCS2 = 4 } ; struct drm_i915_gem_request; struct __anonstruct_mbox_269 { u32 wait[5U] ; u32 signal[5U] ; }; union __anonunion____missing_field_name_268 { struct __anonstruct_mbox_269 mbox ; u64 signal_ggtt[5U] ; }; struct __anonstruct_semaphore_267 { u32 sync_seqno[4U] ; union __anonunion____missing_field_name_268 __annonCompField77 ; int (*sync_to)(struct intel_engine_cs * , struct intel_engine_cs * , u32 ) ; int (*signal)(struct intel_engine_cs * , unsigned int ) ; }; struct __anonstruct_scratch_270 { struct drm_i915_gem_object *obj ; u32 gtt_offset ; u32 volatile *cpu_page ; }; struct intel_engine_cs { char const *name ; enum intel_ring_id id ; u32 mmio_base ; struct drm_device *dev ; struct intel_ringbuffer *buffer ; struct i915_gem_batch_pool batch_pool ; struct intel_hw_status_page status_page ; unsigned int irq_refcount ; u32 irq_enable_mask ; struct drm_i915_gem_request *trace_irq_req ; bool (*irq_get)(struct intel_engine_cs * ) ; void (*irq_put)(struct intel_engine_cs * ) ; int (*init_hw)(struct intel_engine_cs * ) ; int (*init_context)(struct intel_engine_cs * , struct intel_context * ) ; void (*write_tail)(struct intel_engine_cs * , u32 ) ; int (*flush)(struct intel_engine_cs * , u32 , u32 ) ; int (*add_request)(struct intel_engine_cs * ) ; u32 (*get_seqno)(struct intel_engine_cs * , bool ) ; void (*set_seqno)(struct intel_engine_cs * , u32 ) ; int (*dispatch_execbuffer)(struct intel_engine_cs * , u64 , u32 , unsigned int ) ; void (*cleanup)(struct intel_engine_cs * ) ; struct __anonstruct_semaphore_267 semaphore ; spinlock_t execlist_lock ; struct list_head execlist_queue ; struct list_head execlist_retired_req_list ; u8 next_context_status_buffer ; u32 irq_keep_mask ; int (*emit_request)(struct intel_ringbuffer * , struct drm_i915_gem_request * ) ; int (*emit_flush)(struct intel_ringbuffer * , struct intel_context * , u32 , u32 ) ; int (*emit_bb_start)(struct intel_ringbuffer * , struct intel_context * , u64 , unsigned int ) ; struct list_head active_list ; struct list_head request_list ; struct drm_i915_gem_request *outstanding_lazy_request ; bool gpu_caches_dirty ; wait_queue_head_t irq_queue ; struct intel_context *default_context ; struct intel_context *last_context ; struct intel_ring_hangcheck hangcheck ; struct __anonstruct_scratch_270 scratch ; bool needs_cmd_parser ; struct hlist_head cmd_hash[512U] ; struct drm_i915_reg_descriptor const *reg_table ; int reg_count ; struct drm_i915_reg_descriptor const *master_reg_table ; int master_reg_count ; u32 (*get_cmd_length_mask)(u32 ) ; }; struct drm_i915_file_private; typedef uint32_t gen6_pte_t; enum i915_ggtt_view_type { I915_GGTT_VIEW_NORMAL = 0, I915_GGTT_VIEW_ROTATED = 1, I915_GGTT_VIEW_PARTIAL = 2 } ; struct intel_rotation_info { unsigned int height ; unsigned int pitch ; uint32_t pixel_format ; uint64_t fb_modifier ; }; struct __anonstruct_partial_272 { unsigned long offset ; unsigned int size ; }; union __anonunion_params_271 { struct __anonstruct_partial_272 partial ; }; union __anonunion____missing_field_name_273 { struct intel_rotation_info rotation_info ; }; struct i915_ggtt_view { enum i915_ggtt_view_type type ; union __anonunion_params_271 params ; struct sg_table *pages ; union __anonunion____missing_field_name_273 __annonCompField78 ; }; enum i915_cache_level; enum i915_cache_level; struct i915_address_space; struct i915_vma { struct drm_mm_node node ; struct drm_i915_gem_object *obj ; struct i915_address_space *vm ; unsigned char bound : 4 ; struct i915_ggtt_view ggtt_view ; struct list_head mm_list ; struct list_head vma_link ; struct list_head exec_list ; struct hlist_node exec_node ; unsigned long exec_handle ; struct drm_i915_gem_exec_object2 *exec_entry ; unsigned char pin_count : 4 ; }; struct i915_page_table { struct page *page ; dma_addr_t daddr ; unsigned long *used_ptes ; }; union __anonunion____missing_field_name_274 { uint32_t pd_offset ; dma_addr_t daddr ; }; struct i915_page_directory { struct page *page ; union __anonunion____missing_field_name_274 __annonCompField79 ; unsigned long *used_pdes ; struct i915_page_table *page_table[512U] ; }; struct i915_page_directory_pointer { unsigned long used_pdpes[1U] ; struct i915_page_directory *page_directory[4U] ; }; struct __anonstruct_scratch_275 { dma_addr_t addr ; struct page *page ; }; struct i915_address_space { struct drm_mm mm ; struct drm_device *dev ; struct list_head global_link ; unsigned long start ; size_t total ; struct __anonstruct_scratch_275 scratch ; struct list_head active_list ; struct list_head inactive_list ; gen6_pte_t (*pte_encode)(dma_addr_t , enum i915_cache_level , bool , u32 ) ; int (*allocate_va_range)(struct i915_address_space * , uint64_t , uint64_t ) ; void (*clear_range)(struct i915_address_space * , uint64_t , uint64_t , bool ) ; void (*insert_entries)(struct i915_address_space * , struct sg_table * , uint64_t , enum i915_cache_level , u32 ) ; void (*cleanup)(struct i915_address_space * ) ; void (*unbind_vma)(struct i915_vma * ) ; int (*bind_vma)(struct i915_vma * , enum i915_cache_level , u32 ) ; }; struct io_mapping; struct i915_gtt { struct i915_address_space base ; size_t stolen_size ; unsigned long mappable_end ; struct io_mapping *mappable ; phys_addr_t mappable_base ; void *gsm ; bool do_idle_maps ; int mtrr ; int (*gtt_probe)(struct drm_device * , size_t * , size_t * , phys_addr_t * , unsigned long * ) ; }; union __anonunion____missing_field_name_276 { struct i915_page_directory_pointer pdp ; struct i915_page_directory pd ; }; struct i915_hw_ppgtt { struct i915_address_space base ; struct kref ref ; struct drm_mm_node node ; unsigned long pd_dirty_rings ; union __anonunion____missing_field_name_276 __annonCompField80 ; struct i915_page_table *scratch_pt ; struct i915_page_directory *scratch_pd ; struct drm_i915_file_private *file_priv ; gen6_pte_t *pd_addr ; int (*enable)(struct i915_hw_ppgtt * ) ; int (*switch_mm)(struct i915_hw_ppgtt * , struct intel_engine_cs * ) ; void (*debug_dump)(struct i915_hw_ppgtt * , struct seq_file * ) ; }; struct i2c_algo_bit_data { void *data ; void (*setsda)(void * , int ) ; void (*setscl)(void * , int ) ; int (*getsda)(void * ) ; int (*getscl)(void * ) ; int (*pre_xfer)(struct i2c_adapter * ) ; void (*post_xfer)(struct i2c_adapter * ) ; int udelay ; int timeout ; }; struct drm_buf { int idx ; int total ; int order ; int used ; unsigned long offset ; void *address ; unsigned long bus_address ; struct drm_buf *next ; int volatile waiting ; int volatile pending ; struct drm_file *file_priv ; int context ; int while_locked ; int list ; int dev_priv_size ; void *dev_private ; }; struct drm_dma_handle { dma_addr_t busaddr ; void *vaddr ; size_t size ; }; struct drm_buf_entry { int buf_size ; int buf_count ; struct drm_buf *buflist ; int seg_count ; int page_order ; struct drm_dma_handle **seglist ; int low_mark ; int high_mark ; }; struct drm_device_dma { struct drm_buf_entry bufs[23U] ; int buf_count ; struct drm_buf **buflist ; int seg_count ; int page_count ; unsigned long *pagelist ; unsigned long byte_count ; int flags ; }; struct drm_sg_mem { unsigned long handle ; void *virtual ; int pages ; struct page **pagelist ; dma_addr_t *busaddr ; }; struct drm_local_map { resource_size_t offset ; unsigned long size ; enum drm_map_type type ; enum drm_map_flags flags ; void *handle ; int mtrr ; }; struct drm_gem_object { struct kref refcount ; unsigned int handle_count ; struct drm_device *dev ; struct file *filp ; struct drm_vma_offset_node vma_node ; size_t size ; int name ; uint32_t read_domains ; uint32_t write_domain ; uint32_t pending_read_domains ; uint32_t pending_write_domain ; struct dma_buf *dma_buf ; struct dma_buf_attachment *import_attach ; }; struct pm_qos_request { struct plist_node node ; int pm_qos_class ; struct delayed_work work ; }; struct pm_qos_flags_request { struct list_head node ; s32 flags ; }; enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE = 2, DEV_PM_QOS_FLAGS = 3 } ; union __anonunion_data_277 { struct plist_node pnode ; struct pm_qos_flags_request flr ; }; struct dev_pm_qos_request { enum dev_pm_qos_req_type type ; union __anonunion_data_277 data ; struct device *dev ; }; enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2, PM_QOS_SUM = 3 } ; struct pm_qos_constraints { struct plist_head list ; s32 target_value ; s32 default_value ; s32 no_constraint_value ; enum pm_qos_type type ; struct blocking_notifier_head *notifiers ; }; struct pm_qos_flags { struct list_head list ; s32 effective_flags ; }; struct dev_pm_qos { struct pm_qos_constraints resume_latency ; struct pm_qos_constraints latency_tolerance ; struct pm_qos_flags flags ; struct dev_pm_qos_request *resume_latency_req ; struct dev_pm_qos_request *latency_tolerance_req ; struct dev_pm_qos_request *flags_req ; }; enum pipe { INVALID_PIPE = -1, PIPE_A = 0, PIPE_B = 1, PIPE_C = 2, _PIPE_EDP = 3, I915_MAX_PIPES = 3 } ; enum transcoder { TRANSCODER_A = 0, TRANSCODER_B = 1, TRANSCODER_C = 2, TRANSCODER_EDP = 3, I915_MAX_TRANSCODERS = 4 } ; enum plane { PLANE_A = 0, PLANE_B = 1, PLANE_C = 2 } ; enum port { PORT_A = 0, PORT_B = 1, PORT_C = 2, PORT_D = 3, PORT_E = 4, I915_MAX_PORTS = 5 } ; enum hpd_pin { HPD_NONE = 0, HPD_PORT_A = 0, HPD_TV = 0, HPD_CRT = 1, HPD_SDVO_B = 2, HPD_SDVO_C = 3, HPD_PORT_B = 4, HPD_PORT_C = 5, HPD_PORT_D = 6, HPD_NUM_PINS = 7 } ; struct i915_mm_struct; struct i915_mmu_object; struct intel_rps_client { struct list_head link ; unsigned int boosts ; }; struct __anonstruct_mm_278 { spinlock_t lock ; struct list_head request_list ; }; struct drm_i915_file_private { struct drm_i915_private *dev_priv ; struct drm_file *file ; struct __anonstruct_mm_278 mm ; struct idr context_idr ; struct intel_rps_client rps ; struct intel_engine_cs *bsd_ring ; }; enum intel_dpll_id { DPLL_ID_PRIVATE = -1, DPLL_ID_PCH_PLL_A = 0, DPLL_ID_PCH_PLL_B = 1, DPLL_ID_WRPLL1 = 0, DPLL_ID_WRPLL2 = 1, DPLL_ID_SKL_DPLL1 = 0, DPLL_ID_SKL_DPLL2 = 1, DPLL_ID_SKL_DPLL3 = 2 } ; struct intel_dpll_hw_state { uint32_t dpll ; uint32_t dpll_md ; uint32_t fp0 ; uint32_t fp1 ; uint32_t wrpll ; uint32_t ctrl1 ; uint32_t cfgcr1 ; uint32_t cfgcr2 ; uint32_t ebb0 ; uint32_t pll0 ; uint32_t pll1 ; uint32_t pll2 ; uint32_t pll3 ; uint32_t pll6 ; uint32_t pll8 ; uint32_t pll10 ; uint32_t pcsdw12 ; }; struct intel_shared_dpll_config { unsigned int crtc_mask ; struct intel_dpll_hw_state hw_state ; }; struct intel_shared_dpll { struct intel_shared_dpll_config config ; struct intel_shared_dpll_config *new_config ; int active ; bool on ; char const *name ; enum intel_dpll_id id ; void (*mode_set)(struct drm_i915_private * , struct intel_shared_dpll * ) ; void (*enable)(struct drm_i915_private * , struct intel_shared_dpll * ) ; void (*disable)(struct drm_i915_private * , struct intel_shared_dpll * ) ; bool (*get_hw_state)(struct drm_i915_private * , struct intel_shared_dpll * , struct intel_dpll_hw_state * ) ; }; struct intel_link_m_n { uint32_t tu ; uint32_t gmch_m ; uint32_t gmch_n ; uint32_t link_m ; uint32_t link_n ; }; struct opregion_header; struct opregion_acpi; struct opregion_swsci; struct opregion_asle; struct intel_opregion { struct opregion_header *header ; struct opregion_acpi *acpi ; struct opregion_swsci *swsci ; u32 swsci_gbda_sub_functions ; u32 swsci_sbcb_sub_functions ; struct opregion_asle *asle ; void *vbt ; u32 *lid_state ; struct work_struct asle_work ; }; struct intel_overlay; struct intel_overlay_error_state; struct drm_i915_fence_reg { struct list_head lru_list ; struct drm_i915_gem_object *obj ; int pin_count ; }; struct sdvo_device_mapping { u8 initialized ; u8 dvo_port ; u8 slave_addr ; u8 dvo_wiring ; u8 i2c_pin ; u8 ddc_pin ; }; struct intel_display_error_state; struct drm_i915_error_object { int page_count ; u32 gtt_offset ; u32 *pages[0U] ; }; struct drm_i915_error_request { long jiffies ; u32 seqno ; u32 tail ; }; union __anonunion____missing_field_name_280 { u64 pdp[4U] ; u32 pp_dir_base ; }; struct __anonstruct_vm_info_279 { u32 gfx_mode ; union __anonunion____missing_field_name_280 __annonCompField81 ; }; struct drm_i915_error_ring { bool valid ; bool waiting ; int hangcheck_score ; enum intel_ring_hangcheck_action hangcheck_action ; int num_requests ; u32 cpu_ring_head ; u32 cpu_ring_tail ; u32 semaphore_seqno[4U] ; u32 start ; u32 tail ; u32 head ; u32 ctl ; u32 hws ; u32 ipeir ; u32 ipehr ; u32 instdone ; u32 bbstate ; u32 instpm ; u32 instps ; u32 seqno ; u64 bbaddr ; u64 acthd ; u32 fault_reg ; u64 faddr ; u32 rc_psmi ; u32 semaphore_mboxes[4U] ; struct drm_i915_error_object *ringbuffer ; struct drm_i915_error_object *batchbuffer ; struct drm_i915_error_object *wa_batchbuffer ; struct drm_i915_error_object *ctx ; struct drm_i915_error_object *hws_page ; struct drm_i915_error_request *requests ; struct __anonstruct_vm_info_279 vm_info ; pid_t pid ; char comm[16U] ; }; struct drm_i915_error_buffer { u32 size ; u32 name ; u32 rseqno[5U] ; u32 wseqno ; u32 gtt_offset ; u32 read_domains ; u32 write_domain ; signed char fence_reg : 6 ; signed char pinned : 2 ; unsigned char tiling : 2 ; unsigned char dirty : 1 ; unsigned char purgeable : 1 ; unsigned char userptr : 1 ; signed char ring : 4 ; unsigned char cache_level : 3 ; }; struct drm_i915_error_state { struct kref ref ; struct timeval time ; char error_msg[128U] ; u32 reset_count ; u32 suspend_count ; u32 eir ; u32 pgtbl_er ; u32 ier ; u32 gtier[4U] ; u32 ccid ; u32 derrmr ; u32 forcewake ; u32 error ; u32 err_int ; u32 fault_data0 ; u32 fault_data1 ; u32 done_reg ; u32 gac_eco ; u32 gam_ecochk ; u32 gab_ctl ; u32 gfx_mode ; u32 extra_instdone[4U] ; u64 fence[32U] ; struct intel_overlay_error_state *overlay ; struct intel_display_error_state *display ; struct drm_i915_error_object *semaphore_obj ; struct drm_i915_error_ring ring[5U] ; struct drm_i915_error_buffer **active_bo ; struct drm_i915_error_buffer **pinned_bo ; u32 *active_bo_count ; u32 *pinned_bo_count ; u32 vm_count ; }; struct intel_connector; struct intel_encoder; struct intel_crtc_state; struct intel_initial_plane_config; struct intel_crtc; struct intel_limit; struct dpll; struct drm_i915_display_funcs { bool (*fbc_enabled)(struct drm_device * ) ; void (*enable_fbc)(struct drm_crtc * ) ; void (*disable_fbc)(struct drm_device * ) ; int (*get_display_clock_speed)(struct drm_device * ) ; int (*get_fifo_size)(struct drm_device * , int ) ; bool (*find_dpll)(struct intel_limit const * , struct intel_crtc_state * , int , int , struct dpll * , struct dpll * ) ; void (*update_wm)(struct drm_crtc * ) ; void (*update_sprite_wm)(struct drm_plane * , struct drm_crtc * , uint32_t , uint32_t , int , bool , bool ) ; void (*modeset_global_resources)(struct drm_atomic_state * ) ; bool (*get_pipe_config)(struct intel_crtc * , struct intel_crtc_state * ) ; void (*get_initial_plane_config)(struct intel_crtc * , struct intel_initial_plane_config * ) ; int (*crtc_compute_clock)(struct intel_crtc * , struct intel_crtc_state * ) ; void (*crtc_enable)(struct drm_crtc * ) ; void (*crtc_disable)(struct drm_crtc * ) ; void (*off)(struct drm_crtc * ) ; void (*audio_codec_enable)(struct drm_connector * , struct intel_encoder * , struct drm_display_mode * ) ; void (*audio_codec_disable)(struct intel_encoder * ) ; void (*fdi_link_train)(struct drm_crtc * ) ; void (*init_clock_gating)(struct drm_device * ) ; int (*queue_flip)(struct drm_device * , struct drm_crtc * , struct drm_framebuffer * , struct drm_i915_gem_object * , struct intel_engine_cs * , uint32_t ) ; void (*update_primary_plane)(struct drm_crtc * , struct drm_framebuffer * , int , int ) ; void (*hpd_irq_setup)(struct drm_device * ) ; int (*setup_backlight)(struct intel_connector * , enum pipe ) ; uint32_t (*get_backlight)(struct intel_connector * ) ; void (*set_backlight)(struct intel_connector * , uint32_t ) ; void (*disable_backlight)(struct intel_connector * ) ; void (*enable_backlight)(struct intel_connector * ) ; }; enum forcewake_domain_id { FW_DOMAIN_ID_RENDER = 0, FW_DOMAIN_ID_BLITTER = 1, FW_DOMAIN_ID_MEDIA = 2, FW_DOMAIN_ID_COUNT = 3 } ; enum forcewake_domains { FORCEWAKE_RENDER = 1, FORCEWAKE_BLITTER = 2, FORCEWAKE_MEDIA = 4, FORCEWAKE_ALL = 7 } ; struct intel_uncore_funcs { void (*force_wake_get)(struct drm_i915_private * , enum forcewake_domains ) ; void (*force_wake_put)(struct drm_i915_private * , enum forcewake_domains ) ; uint8_t (*mmio_readb)(struct drm_i915_private * , off_t , bool ) ; uint16_t (*mmio_readw)(struct drm_i915_private * , off_t , bool ) ; uint32_t (*mmio_readl)(struct drm_i915_private * , off_t , bool ) ; uint64_t (*mmio_readq)(struct drm_i915_private * , off_t , bool ) ; void (*mmio_writeb)(struct drm_i915_private * , off_t , uint8_t , bool ) ; void (*mmio_writew)(struct drm_i915_private * , off_t , uint16_t , bool ) ; void (*mmio_writel)(struct drm_i915_private * , off_t , uint32_t , bool ) ; void (*mmio_writeq)(struct drm_i915_private * , off_t , uint64_t , bool ) ; }; struct intel_uncore_forcewake_domain { struct drm_i915_private *i915 ; enum forcewake_domain_id id ; unsigned int wake_count ; struct timer_list timer ; u32 reg_set ; u32 val_set ; u32 val_clear ; u32 reg_ack ; u32 reg_post ; u32 val_reset ; }; struct intel_uncore { spinlock_t lock ; struct intel_uncore_funcs funcs ; unsigned int fifo_count ; enum forcewake_domains fw_domains ; struct intel_uncore_forcewake_domain fw_domain[3U] ; }; enum csr_state { FW_UNINITIALIZED = 0, FW_LOADED = 1, FW_FAILED = 2 } ; struct intel_csr { char const *fw_path ; __be32 *dmc_payload ; uint32_t dmc_fw_size ; uint32_t mmio_count ; uint32_t mmioaddr[8U] ; uint32_t mmiodata[8U] ; enum csr_state state ; }; struct intel_device_info { u32 display_mmio_offset ; u16 device_id ; unsigned char num_pipes : 3 ; u8 num_sprites[3U] ; u8 gen ; u8 ring_mask ; unsigned char is_mobile : 1 ; unsigned char is_i85x : 1 ; unsigned char is_i915g : 1 ; unsigned char is_i945gm : 1 ; unsigned char is_g33 : 1 ; unsigned char need_gfx_hws : 1 ; unsigned char is_g4x : 1 ; unsigned char is_pineview : 1 ; unsigned char is_broadwater : 1 ; unsigned char is_crestline : 1 ; unsigned char is_ivybridge : 1 ; unsigned char is_valleyview : 1 ; unsigned char is_haswell : 1 ; unsigned char is_skylake : 1 ; unsigned char is_preliminary : 1 ; unsigned char has_fbc : 1 ; unsigned char has_pipe_cxsr : 1 ; unsigned char has_hotplug : 1 ; unsigned char cursor_needs_physical : 1 ; unsigned char has_overlay : 1 ; unsigned char overlay_needs_physical : 1 ; unsigned char supports_tv : 1 ; unsigned char has_llc : 1 ; unsigned char has_ddi : 1 ; unsigned char has_fpga_dbg : 1 ; int pipe_offsets[4U] ; int trans_offsets[4U] ; int palette_offsets[3U] ; int cursor_offsets[3U] ; u8 slice_total ; u8 subslice_total ; u8 subslice_per_slice ; u8 eu_total ; u8 eu_per_subslice ; u8 subslice_7eu[3U] ; unsigned char has_slice_pg : 1 ; unsigned char has_subslice_pg : 1 ; unsigned char has_eu_pg : 1 ; }; enum i915_cache_level { I915_CACHE_NONE = 0, I915_CACHE_LLC = 1, I915_CACHE_L3_LLC = 2, I915_CACHE_WT = 3 } ; struct i915_ctx_hang_stats { unsigned int batch_pending ; unsigned int batch_active ; unsigned long guilty_ts ; unsigned long ban_period_seconds ; bool banned ; }; struct __anonstruct_legacy_hw_ctx_281 { struct drm_i915_gem_object *rcs_state ; bool initialized ; }; struct __anonstruct_engine_282 { struct drm_i915_gem_object *state ; struct intel_ringbuffer *ringbuf ; int pin_count ; }; struct intel_context { struct kref ref ; int user_handle ; uint8_t remap_slice ; struct drm_i915_file_private *file_priv ; struct i915_ctx_hang_stats hang_stats ; struct i915_hw_ppgtt *ppgtt ; struct __anonstruct_legacy_hw_ctx_281 legacy_hw_ctx ; bool rcs_initialized ; struct __anonstruct_engine_282 engine[5U] ; struct list_head link ; }; struct intel_fbc_work { struct delayed_work work ; struct drm_crtc *crtc ; struct drm_framebuffer *fb ; }; enum no_fbc_reason { FBC_OK = 0, FBC_UNSUPPORTED = 1, FBC_NO_OUTPUT = 2, FBC_STOLEN_TOO_SMALL = 3, FBC_UNSUPPORTED_MODE = 4, FBC_MODE_TOO_LARGE = 5, FBC_BAD_PLANE = 6, FBC_NOT_TILED = 7, FBC_MULTIPLE_PIPES = 8, FBC_MODULE_PARAM = 9, FBC_CHIP_DEFAULT = 10 } ; struct i915_fbc { unsigned long uncompressed_size ; unsigned int threshold ; unsigned int fb_id ; unsigned int possible_framebuffer_bits ; unsigned int busy_bits ; struct intel_crtc *crtc ; int y ; struct drm_mm_node compressed_fb ; struct drm_mm_node *compressed_llb ; bool false_color ; bool enabled ; struct intel_fbc_work *fbc_work ; enum no_fbc_reason no_fbc_reason ; }; enum drrs_refresh_rate_type { DRRS_HIGH_RR = 0, DRRS_LOW_RR = 1, DRRS_MAX_RR = 2 } ; enum drrs_support_type { DRRS_NOT_SUPPORTED = 0, STATIC_DRRS_SUPPORT = 1, SEAMLESS_DRRS_SUPPORT = 2 } ; struct intel_dp; struct i915_drrs { struct mutex mutex ; struct delayed_work work ; struct intel_dp *dp ; unsigned int busy_frontbuffer_bits ; enum drrs_refresh_rate_type refresh_rate_type ; enum drrs_support_type type ; }; struct i915_psr { struct mutex lock ; bool sink_support ; bool source_ok ; struct intel_dp *enabled ; bool active ; struct delayed_work work ; unsigned int busy_frontbuffer_bits ; bool psr2_support ; bool aux_frame_sync ; }; enum intel_pch { PCH_NONE = 0, PCH_IBX = 1, PCH_CPT = 2, PCH_LPT = 3, PCH_SPT = 4, PCH_NOP = 5 } ; struct intel_fbdev; struct intel_gmbus { struct i2c_adapter adapter ; u32 force_bit ; u32 reg0 ; u32 gpio_reg ; struct i2c_algo_bit_data bit_algo ; struct drm_i915_private *dev_priv ; }; struct i915_suspend_saved_registers { u32 saveDSPARB ; u32 saveLVDS ; u32 savePP_ON_DELAYS ; u32 savePP_OFF_DELAYS ; u32 savePP_ON ; u32 savePP_OFF ; u32 savePP_CONTROL ; u32 savePP_DIVISOR ; u32 saveFBC_CONTROL ; u32 saveCACHE_MODE_0 ; u32 saveMI_ARB_STATE ; u32 saveSWF0[16U] ; u32 saveSWF1[16U] ; u32 saveSWF2[3U] ; uint64_t saveFENCE[32U] ; u32 savePCH_PORT_HOTPLUG ; u16 saveGCDGMBUS ; }; struct vlv_s0ix_state { u32 wr_watermark ; u32 gfx_prio_ctrl ; u32 arb_mode ; u32 gfx_pend_tlb0 ; u32 gfx_pend_tlb1 ; u32 lra_limits[13U] ; u32 media_max_req_count ; u32 gfx_max_req_count ; u32 render_hwsp ; u32 ecochk ; u32 bsd_hwsp ; u32 blt_hwsp ; u32 tlb_rd_addr ; u32 g3dctl ; u32 gsckgctl ; u32 mbctl ; u32 ucgctl1 ; u32 ucgctl3 ; u32 rcgctl1 ; u32 rcgctl2 ; u32 rstctl ; u32 misccpctl ; u32 gfxpause ; u32 rpdeuhwtc ; u32 rpdeuc ; u32 ecobus ; u32 pwrdwnupctl ; u32 rp_down_timeout ; u32 rp_deucsw ; u32 rcubmabdtmr ; u32 rcedata ; u32 spare2gh ; u32 gt_imr ; u32 gt_ier ; u32 pm_imr ; u32 pm_ier ; u32 gt_scratch[8U] ; u32 tilectl ; u32 gt_fifoctl ; u32 gtlc_wake_ctrl ; u32 gtlc_survive ; u32 pmwgicz ; u32 gu_ctl0 ; u32 gu_ctl1 ; u32 pcbr ; u32 clock_gate_dis2 ; }; struct intel_rps_ei { u32 cz_clock ; u32 render_c0 ; u32 media_c0 ; }; struct intel_gen6_power_mgmt { struct work_struct work ; bool interrupts_enabled ; u32 pm_iir ; u8 cur_freq ; u8 min_freq_softlimit ; u8 max_freq_softlimit ; u8 max_freq ; u8 min_freq ; u8 idle_freq ; u8 efficient_freq ; u8 rp1_freq ; u8 rp0_freq ; u32 cz_freq ; u8 up_threshold ; u8 down_threshold ; int last_adj ; int power ; spinlock_t client_lock ; struct list_head clients ; bool client_boost ; bool enabled ; struct delayed_work delayed_resume_work ; unsigned int boosts ; struct intel_rps_client semaphores ; struct intel_rps_client mmioflips ; struct intel_rps_ei up_ei ; struct intel_rps_ei down_ei ; struct mutex hw_lock ; }; struct intel_ilk_power_mgmt { u8 cur_delay ; u8 min_delay ; u8 max_delay ; u8 fmax ; u8 fstart ; u64 last_count1 ; unsigned long last_time1 ; unsigned long chipset_power ; u64 last_count2 ; u64 last_time2 ; unsigned long gfx_power ; u8 corr ; int c_m ; int r_t ; }; struct i915_power_well_ops { void (*sync_hw)(struct drm_i915_private * , struct i915_power_well * ) ; void (*enable)(struct drm_i915_private * , struct i915_power_well * ) ; void (*disable)(struct drm_i915_private * , struct i915_power_well * ) ; bool (*is_enabled)(struct drm_i915_private * , struct i915_power_well * ) ; }; struct i915_power_well { char const *name ; bool always_on ; int count ; bool hw_enabled ; unsigned long domains ; unsigned long data ; struct i915_power_well_ops const *ops ; }; struct i915_power_domains { bool init_power_on ; bool initializing ; int power_well_count ; struct mutex lock ; int domain_use_count[29U] ; struct i915_power_well *power_wells ; }; struct intel_l3_parity { u32 *remap_info[2U] ; struct work_struct error_work ; int which_slice ; }; struct i915_gem_mm { struct drm_mm stolen ; struct list_head bound_list ; struct list_head unbound_list ; unsigned long stolen_base ; struct i915_hw_ppgtt *aliasing_ppgtt ; struct notifier_block oom_notifier ; struct shrinker shrinker ; bool shrinker_no_lock_stealing ; struct list_head fence_list ; struct delayed_work retire_work ; struct delayed_work idle_work ; bool interruptible ; bool busy ; int bsd_ring_dispatch_index ; uint32_t bit_6_swizzle_x ; uint32_t bit_6_swizzle_y ; spinlock_t object_stat_lock ; size_t object_memory ; u32 object_count ; }; struct i915_gpu_error { struct workqueue_struct *hangcheck_wq ; struct delayed_work hangcheck_work ; spinlock_t lock ; struct drm_i915_error_state *first_error ; unsigned long missed_irq_rings ; atomic_t reset_counter ; wait_queue_head_t reset_queue ; u32 stop_rings ; unsigned int test_irq_rings ; bool reload_in_reset ; }; enum modeset_restore { MODESET_ON_LID_OPEN = 0, MODESET_DONE = 1, MODESET_SUSPENDED = 2 } ; struct ddi_vbt_port_info { uint8_t hdmi_level_shift ; unsigned char supports_dvi : 1 ; unsigned char supports_hdmi : 1 ; unsigned char supports_dp : 1 ; }; enum psr_lines_to_wait { PSR_0_LINES_TO_WAIT = 0, PSR_1_LINE_TO_WAIT = 1, PSR_4_LINES_TO_WAIT = 2, PSR_8_LINES_TO_WAIT = 3 } ; struct __anonstruct_psr_283 { bool full_link ; bool require_aux_wakeup ; int idle_frames ; enum psr_lines_to_wait lines_to_wait ; int tp1_wakeup_time ; int tp2_tp3_wakeup_time ; }; struct __anonstruct_backlight_284 { u16 pwm_freq_hz ; bool present ; bool active_low_pwm ; u8 min_brightness ; }; struct __anonstruct_dsi_285 { u16 port ; u16 panel_id ; struct mipi_config *config ; struct mipi_pps_data *pps ; u8 seq_version ; u32 size ; u8 *data ; u8 *sequence[6U] ; }; struct intel_vbt_data { struct drm_display_mode *lfp_lvds_vbt_mode ; struct drm_display_mode *sdvo_lvds_vbt_mode ; unsigned char int_tv_support : 1 ; unsigned char lvds_dither : 1 ; unsigned char lvds_vbt : 1 ; unsigned char int_crt_support : 1 ; unsigned char lvds_use_ssc : 1 ; unsigned char display_clock_mode : 1 ; unsigned char fdi_rx_polarity_inverted : 1 ; unsigned char has_mipi : 1 ; int lvds_ssc_freq ; unsigned int bios_lvds_val ; enum drrs_support_type drrs_type ; int edp_rate ; int edp_lanes ; int edp_preemphasis ; int edp_vswing ; bool edp_initialized ; bool edp_support ; int edp_bpp ; struct edp_power_seq edp_pps ; struct __anonstruct_psr_283 psr ; struct __anonstruct_backlight_284 backlight ; struct __anonstruct_dsi_285 dsi ; int crt_ddc_pin ; int child_dev_num ; union child_device_config *child_dev ; struct ddi_vbt_port_info ddi_port_info[5U] ; }; enum intel_ddb_partitioning { INTEL_DDB_PART_1_2 = 0, INTEL_DDB_PART_5_6 = 1 } ; struct intel_wm_level { bool enable ; uint32_t pri_val ; uint32_t spr_val ; uint32_t cur_val ; uint32_t fbc_val ; }; struct ilk_wm_values { uint32_t wm_pipe[3U] ; uint32_t wm_lp[3U] ; uint32_t wm_lp_spr[3U] ; uint32_t wm_linetime[3U] ; bool enable_fbc_wm ; enum intel_ddb_partitioning partitioning ; }; struct __anonstruct_pipe_286 { uint16_t primary ; uint16_t sprite[2U] ; uint8_t cursor ; }; struct __anonstruct_sr_287 { uint16_t plane ; uint8_t cursor ; }; struct __anonstruct_ddl_288 { uint8_t cursor ; uint8_t sprite[2U] ; uint8_t primary ; }; struct vlv_wm_values { struct __anonstruct_pipe_286 pipe[3U] ; struct __anonstruct_sr_287 sr ; struct __anonstruct_ddl_288 ddl[3U] ; }; struct skl_ddb_entry { uint16_t start ; uint16_t end ; }; struct skl_ddb_allocation { struct skl_ddb_entry pipe[3U] ; struct skl_ddb_entry plane[3U][4U] ; struct skl_ddb_entry y_plane[3U][4U] ; struct skl_ddb_entry cursor[3U] ; }; struct skl_wm_values { bool dirty[3U] ; struct skl_ddb_allocation ddb ; uint32_t wm_linetime[3U] ; uint32_t plane[3U][4U][8U] ; uint32_t cursor[3U][8U] ; uint32_t plane_trans[3U][4U] ; uint32_t cursor_trans[3U] ; }; struct skl_wm_level { bool plane_en[4U] ; bool cursor_en ; uint16_t plane_res_b[4U] ; uint8_t plane_res_l[4U] ; uint16_t cursor_res_b ; uint8_t cursor_res_l ; }; struct i915_runtime_pm { bool suspended ; bool irqs_enabled ; }; enum intel_pipe_crc_source { INTEL_PIPE_CRC_SOURCE_NONE = 0, INTEL_PIPE_CRC_SOURCE_PLANE1 = 1, INTEL_PIPE_CRC_SOURCE_PLANE2 = 2, INTEL_PIPE_CRC_SOURCE_PF = 3, INTEL_PIPE_CRC_SOURCE_PIPE = 4, INTEL_PIPE_CRC_SOURCE_TV = 5, INTEL_PIPE_CRC_SOURCE_DP_B = 6, INTEL_PIPE_CRC_SOURCE_DP_C = 7, INTEL_PIPE_CRC_SOURCE_DP_D = 8, INTEL_PIPE_CRC_SOURCE_AUTO = 9, INTEL_PIPE_CRC_SOURCE_MAX = 10 } ; struct intel_pipe_crc_entry { uint32_t frame ; uint32_t crc[5U] ; }; struct intel_pipe_crc { spinlock_t lock ; bool opened ; struct intel_pipe_crc_entry *entries ; enum intel_pipe_crc_source source ; int head ; int tail ; wait_queue_head_t wq ; }; struct i915_frontbuffer_tracking { struct mutex lock ; unsigned int busy_bits ; unsigned int flip_bits ; }; struct i915_wa_reg { u32 addr ; u32 value ; u32 mask ; }; struct i915_workarounds { struct i915_wa_reg reg[16U] ; u32 count ; }; struct i915_virtual_gpu { bool active ; }; union __anonunion____missing_field_name_289 { u32 irq_mask ; u32 de_irq_mask[3U] ; }; struct __anonstruct_hpd_stats_290 { unsigned long hpd_last_jiffies ; int hpd_cnt ; int hpd_mark ; }; union __anonunion____missing_field_name_292 { struct ilk_wm_values hw ; struct skl_wm_values skl_hw ; struct vlv_wm_values vlv ; }; struct __anonstruct_wm_291 { uint16_t pri_latency[5U] ; uint16_t spr_latency[5U] ; uint16_t cur_latency[5U] ; uint16_t skl_latency[8U] ; struct skl_wm_values skl_results ; union __anonunion____missing_field_name_292 __annonCompField83 ; }; struct intel_digital_port; struct __anonstruct_gt_293 { int (*execbuf_submit)(struct drm_device * , struct drm_file * , struct intel_engine_cs * , struct intel_context * , struct drm_i915_gem_execbuffer2 * , struct list_head * , struct drm_i915_gem_object * , u64 , u32 ) ; int (*init_rings)(struct drm_device * ) ; void (*cleanup_ring)(struct intel_engine_cs * ) ; void (*stop_ring)(struct intel_engine_cs * ) ; }; struct drm_i915_private { struct drm_device *dev ; struct kmem_cache *objects ; struct kmem_cache *vmas ; struct kmem_cache *requests ; struct intel_device_info const info ; int relative_constants_mode ; void *regs ; struct intel_uncore uncore ; struct i915_virtual_gpu vgpu ; struct intel_csr csr ; struct mutex csr_lock ; struct intel_gmbus gmbus[7U] ; struct mutex gmbus_mutex ; uint32_t gpio_mmio_base ; uint32_t mipi_mmio_base ; wait_queue_head_t gmbus_wait_queue ; struct pci_dev *bridge_dev ; struct intel_engine_cs ring[5U] ; struct drm_i915_gem_object *semaphore_obj ; uint32_t last_seqno ; uint32_t next_seqno ; struct drm_dma_handle *status_page_dmah ; struct resource mch_res ; spinlock_t irq_lock ; spinlock_t mmio_flip_lock ; bool display_irqs_enabled ; struct pm_qos_request pm_qos ; struct mutex sb_lock ; union __anonunion____missing_field_name_289 __annonCompField82 ; u32 gt_irq_mask ; u32 pm_irq_mask ; u32 pm_rps_events ; u32 pipestat_irq_mask[3U] ; struct work_struct hotplug_work ; struct __anonstruct_hpd_stats_290 hpd_stats[7U] ; u32 hpd_event_bits ; struct delayed_work hotplug_reenable_work ; struct i915_fbc fbc ; struct i915_drrs drrs ; struct intel_opregion opregion ; struct intel_vbt_data vbt ; bool preserve_bios_swizzle ; struct intel_overlay *overlay ; struct mutex backlight_lock ; bool no_aux_handshake ; struct mutex pps_mutex ; struct drm_i915_fence_reg fence_regs[32U] ; int fence_reg_start ; int num_fence_regs ; unsigned int fsb_freq ; unsigned int mem_freq ; unsigned int is_ddr3 ; unsigned int skl_boot_cdclk ; unsigned int cdclk_freq ; unsigned int hpll_freq ; struct workqueue_struct *wq ; struct drm_i915_display_funcs display ; enum intel_pch pch_type ; unsigned short pch_id ; unsigned long quirks ; enum modeset_restore modeset_restore ; struct mutex modeset_restore_lock ; struct list_head vm_list ; struct i915_gtt gtt ; struct i915_gem_mm mm ; struct hlist_head mm_structs[128U] ; struct mutex mm_lock ; struct sdvo_device_mapping sdvo_mappings[2U] ; struct drm_crtc *plane_to_crtc_mapping[3U] ; struct drm_crtc *pipe_to_crtc_mapping[3U] ; wait_queue_head_t pending_flip_queue ; struct intel_pipe_crc pipe_crc[3U] ; int num_shared_dpll ; struct intel_shared_dpll shared_dplls[3U] ; int dpio_phy_iosf_port[2U] ; struct i915_workarounds workarounds ; bool render_reclock_avail ; bool lvds_downclock_avail ; int lvds_downclock ; struct i915_frontbuffer_tracking fb_tracking ; u16 orig_clock ; bool mchbar_need_disable ; struct intel_l3_parity l3_parity ; size_t ellc_size ; struct intel_gen6_power_mgmt rps ; struct intel_ilk_power_mgmt ips ; struct i915_power_domains power_domains ; struct i915_psr psr ; struct i915_gpu_error gpu_error ; struct drm_i915_gem_object *vlv_pctx ; struct intel_fbdev *fbdev ; struct work_struct fbdev_suspend_work ; struct drm_property *broadcast_rgb_property ; struct drm_property *force_audio_property ; bool audio_component_registered ; uint32_t hw_context_size ; struct list_head context_list ; u32 fdi_rx_config ; u32 chv_phy_control ; u32 suspend_count ; struct i915_suspend_saved_registers regfile ; struct vlv_s0ix_state vlv_s0ix_state ; struct __anonstruct_wm_291 wm ; struct i915_runtime_pm pm ; struct intel_digital_port *hpd_irq_port[5U] ; u32 long_hpd_port_mask ; u32 short_hpd_port_mask ; struct work_struct dig_port_work ; struct workqueue_struct *dp_wq ; struct __anonstruct_gt_293 gt ; bool edp_low_vswing ; }; enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, HDMI_AUDIO_OFF = -1, HDMI_AUDIO_AUTO = 0, HDMI_AUDIO_ON = 1 } ; struct drm_i915_gem_object_ops { int (*get_pages)(struct drm_i915_gem_object * ) ; void (*put_pages)(struct drm_i915_gem_object * ) ; int (*dmabuf_export)(struct drm_i915_gem_object * ) ; void (*release)(struct drm_i915_gem_object * ) ; }; struct get_page { struct scatterlist *sg ; int last ; }; struct i915_gem_userptr { uintptr_t ptr ; unsigned char read_only : 1 ; unsigned char workers : 4 ; struct i915_mm_struct *mm ; struct i915_mmu_object *mmu_object ; struct work_struct *work ; }; union __anonunion____missing_field_name_294 { struct drm_dma_handle *phys_handle ; struct i915_gem_userptr userptr ; }; struct drm_i915_gem_object { struct drm_gem_object base ; struct drm_i915_gem_object_ops const *ops ; struct list_head vma_list ; struct drm_mm_node *stolen ; struct list_head global_list ; struct list_head ring_list[5U] ; struct list_head obj_exec_link ; struct list_head batch_pool_link ; unsigned char active : 5 ; unsigned char dirty : 1 ; signed char fence_reg : 6 ; unsigned char madv : 2 ; unsigned char tiling_mode : 2 ; unsigned char fence_dirty : 1 ; unsigned char map_and_fenceable : 1 ; unsigned char fault_mappable : 1 ; unsigned char gt_ro : 1 ; unsigned char cache_level : 3 ; unsigned char cache_dirty : 1 ; unsigned char has_dma_mapping : 1 ; unsigned short frontbuffer_bits : 12 ; unsigned int pin_display ; struct sg_table *pages ; int pages_pin_count ; struct get_page get_page ; void *dma_buf_vmapping ; int vmapping_count ; struct drm_i915_gem_request *last_read_req[5U] ; struct drm_i915_gem_request *last_write_req ; struct drm_i915_gem_request *last_fenced_req ; uint32_t stride ; unsigned long framebuffer_references ; unsigned long *bit_17 ; union __anonunion____missing_field_name_294 __annonCompField84 ; }; struct drm_i915_gem_request { struct kref ref ; struct drm_i915_private *i915 ; struct intel_engine_cs *ring ; uint32_t seqno ; u32 head ; u32 postfix ; u32 tail ; struct intel_context *ctx ; struct intel_ringbuffer *ringbuf ; struct drm_i915_gem_object *batch_obj ; unsigned long emitted_jiffies ; struct list_head list ; struct drm_i915_file_private *file_priv ; struct list_head client_list ; struct pid *pid ; struct list_head execlist_link ; int elsp_submitted ; }; struct tracepoint_func { void *func ; void *data ; }; struct tracepoint { char const *name ; struct static_key key ; void (*regfunc)(void) ; void (*unregfunc)(void) ; struct tracepoint_func *funcs ; }; struct trace_enum_map { char const *system ; char const *enum_string ; unsigned long enum_value ; }; struct drm_fb_offset { int x ; int y ; }; struct drm_fb_helper_crtc { struct drm_mode_set mode_set ; struct drm_display_mode *desired_mode ; int x ; int y ; }; struct drm_fb_helper_surface_size { u32 fb_width ; u32 fb_height ; u32 surface_width ; u32 surface_height ; u32 surface_bpp ; u32 surface_depth ; }; struct drm_fb_helper_funcs { void (*gamma_set)(struct drm_crtc * , u16 , u16 , u16 , int ) ; void (*gamma_get)(struct drm_crtc * , u16 * , u16 * , u16 * , int ) ; int (*fb_probe)(struct drm_fb_helper * , struct drm_fb_helper_surface_size * ) ; bool (*initial_config)(struct drm_fb_helper * , struct drm_fb_helper_crtc ** , struct drm_display_mode ** , struct drm_fb_offset * , bool * , int , int ) ; }; struct drm_fb_helper_connector { struct drm_connector *connector ; }; struct drm_fb_helper { struct drm_framebuffer *fb ; struct drm_device *dev ; int crtc_count ; struct drm_fb_helper_crtc *crtc_info ; int connector_count ; int connector_info_alloc_count ; struct drm_fb_helper_connector **connector_info ; struct drm_fb_helper_funcs const *funcs ; struct fb_info *fbdev ; u32 pseudo_palette[17U] ; struct list_head kernel_fb_list ; bool delayed_hotplug ; }; struct drm_dp_aux_msg { unsigned int address ; u8 request ; u8 reply ; void *buffer ; size_t size ; }; struct drm_dp_aux { char const *name ; struct i2c_adapter ddc ; struct device *dev ; struct mutex hw_mutex ; ssize_t (*transfer)(struct drm_dp_aux * , struct drm_dp_aux_msg * ) ; unsigned int i2c_nack_count ; unsigned int i2c_defer_count ; }; struct drm_dp_mst_branch; struct drm_dp_vcpi { int vcpi ; int pbn ; int aligned_pbn ; int num_slots ; }; struct drm_dp_mst_port { struct kref kref ; bool guid_valid ; u8 guid[16U] ; u8 port_num ; bool input ; bool mcs ; bool ddps ; u8 pdt ; bool ldps ; u8 dpcd_rev ; u8 num_sdp_streams ; u8 num_sdp_stream_sinks ; uint16_t available_pbn ; struct list_head next ; struct drm_dp_mst_branch *mstb ; struct drm_dp_aux aux ; struct drm_dp_mst_branch *parent ; struct drm_dp_vcpi vcpi ; struct drm_connector *connector ; struct drm_dp_mst_topology_mgr *mgr ; struct edid *cached_edid ; }; struct drm_dp_sideband_msg_tx; struct drm_dp_mst_branch { struct kref kref ; u8 rad[8U] ; u8 lct ; int num_ports ; int msg_slots ; struct list_head ports ; struct drm_dp_mst_port *port_parent ; struct drm_dp_mst_topology_mgr *mgr ; struct drm_dp_sideband_msg_tx *tx_slots[2U] ; int last_seqno ; bool link_address_sent ; }; struct drm_dp_sideband_msg_hdr { u8 lct ; u8 lcr ; u8 rad[8U] ; bool broadcast ; bool path_msg ; u8 msg_len ; bool somt ; bool eomt ; bool seqno ; }; struct drm_dp_nak_reply { u8 guid[16U] ; u8 reason ; u8 nak_data ; }; struct drm_dp_link_addr_reply_port { bool input_port ; u8 peer_device_type ; u8 port_number ; bool mcs ; bool ddps ; bool legacy_device_plug_status ; u8 dpcd_revision ; u8 peer_guid[16U] ; u8 num_sdp_streams ; u8 num_sdp_stream_sinks ; }; struct drm_dp_link_address_ack_reply { u8 guid[16U] ; u8 nports ; struct drm_dp_link_addr_reply_port ports[16U] ; }; struct drm_dp_remote_dpcd_read_ack_reply { u8 port_number ; u8 num_bytes ; u8 bytes[255U] ; }; struct drm_dp_remote_dpcd_write_ack_reply { u8 port_number ; }; struct drm_dp_remote_dpcd_write_nak_reply { u8 port_number ; u8 reason ; u8 bytes_written_before_failure ; }; struct drm_dp_remote_i2c_read_ack_reply { u8 port_number ; u8 num_bytes ; u8 bytes[255U] ; }; struct drm_dp_remote_i2c_read_nak_reply { u8 port_number ; u8 nak_reason ; u8 i2c_nak_transaction ; }; struct drm_dp_remote_i2c_write_ack_reply { u8 port_number ; }; struct drm_dp_sideband_msg_rx { u8 chunk[48U] ; u8 msg[256U] ; u8 curchunk_len ; u8 curchunk_idx ; u8 curchunk_hdrlen ; u8 curlen ; bool have_somt ; bool have_eomt ; struct drm_dp_sideband_msg_hdr initial_hdr ; }; struct drm_dp_allocate_payload_ack_reply { u8 port_number ; u8 vcpi ; u16 allocated_pbn ; }; struct drm_dp_enum_path_resources_ack_reply { u8 port_number ; u16 full_payload_bw_number ; u16 avail_payload_bw_number ; }; struct drm_dp_port_number_rep { u8 port_number ; }; struct drm_dp_query_payload_ack_reply { u8 port_number ; u8 allocated_pbn ; }; union ack_replies { struct drm_dp_nak_reply nak ; struct drm_dp_link_address_ack_reply link_addr ; struct drm_dp_port_number_rep port_number ; struct drm_dp_enum_path_resources_ack_reply path_resources ; struct drm_dp_allocate_payload_ack_reply allocate_payload ; struct drm_dp_query_payload_ack_reply query_payload ; struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack ; struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack ; struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack ; struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack ; struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack ; struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack ; }; struct drm_dp_sideband_msg_reply_body { u8 reply_type ; u8 req_type ; union ack_replies u ; }; struct drm_dp_sideband_msg_tx { u8 msg[256U] ; u8 chunk[48U] ; u8 cur_offset ; u8 cur_len ; struct drm_dp_mst_branch *dst ; struct list_head next ; int seqno ; int state ; bool path_msg ; struct drm_dp_sideband_msg_reply_body reply ; }; struct drm_dp_mst_topology_cbs { struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr * , struct drm_dp_mst_port * , char const * ) ; void (*destroy_connector)(struct drm_dp_mst_topology_mgr * , struct drm_connector * ) ; void (*hotplug)(struct drm_dp_mst_topology_mgr * ) ; }; struct drm_dp_payload { int payload_state ; int start_slot ; int num_slots ; int vcpi ; }; struct drm_dp_mst_topology_mgr { struct device *dev ; struct drm_dp_mst_topology_cbs *cbs ; int max_dpcd_transaction_bytes ; struct drm_dp_aux *aux ; int max_payloads ; int conn_base_id ; struct drm_dp_sideband_msg_rx down_rep_recv ; struct drm_dp_sideband_msg_rx up_req_recv ; struct mutex lock ; bool mst_state ; struct drm_dp_mst_branch *mst_primary ; bool guid_valid ; u8 guid[16U] ; u8 dpcd[15U] ; u8 sink_count ; int pbn_div ; int total_slots ; int avail_slots ; int total_pbn ; struct mutex qlock ; struct list_head tx_msg_downq ; struct list_head tx_msg_upq ; bool tx_down_in_progress ; bool tx_up_in_progress ; struct mutex payload_lock ; struct drm_dp_vcpi **proposed_vcpis ; struct drm_dp_payload *payloads ; unsigned long payload_mask ; unsigned long vcpi_mask ; wait_queue_head_t tx_waitq ; struct work_struct work ; struct work_struct tx_work ; struct list_head destroy_connector_list ; struct mutex destroy_connector_lock ; struct work_struct destroy_connector_work ; }; enum intel_output_type { INTEL_OUTPUT_UNUSED = 0, INTEL_OUTPUT_ANALOG = 1, INTEL_OUTPUT_DVO = 2, INTEL_OUTPUT_SDVO = 3, INTEL_OUTPUT_LVDS = 4, INTEL_OUTPUT_TVOUT = 5, INTEL_OUTPUT_HDMI = 6, INTEL_OUTPUT_DISPLAYPORT = 7, INTEL_OUTPUT_EDP = 8, INTEL_OUTPUT_DSI = 9, INTEL_OUTPUT_UNKNOWN = 10, INTEL_OUTPUT_DP_MST = 11 } ; struct intel_framebuffer { struct drm_framebuffer base ; struct drm_i915_gem_object *obj ; }; struct intel_fbdev { struct drm_fb_helper helper ; struct intel_framebuffer *fb ; struct list_head fbdev_list ; struct drm_display_mode *our_mode ; int preferred_bpp ; }; struct intel_encoder { struct drm_encoder base ; struct intel_crtc *new_crtc ; enum intel_output_type type ; unsigned int cloneable ; bool connectors_active ; void (*hot_plug)(struct intel_encoder * ) ; bool (*compute_config)(struct intel_encoder * , struct intel_crtc_state * ) ; void (*pre_pll_enable)(struct intel_encoder * ) ; void (*pre_enable)(struct intel_encoder * ) ; void (*enable)(struct intel_encoder * ) ; void (*mode_set)(struct intel_encoder * ) ; void (*disable)(struct intel_encoder * ) ; void (*post_disable)(struct intel_encoder * ) ; bool (*get_hw_state)(struct intel_encoder * , enum pipe * ) ; void (*get_config)(struct intel_encoder * , struct intel_crtc_state * ) ; void (*suspend)(struct intel_encoder * ) ; int crtc_mask ; enum hpd_pin hpd_pin ; }; struct __anonstruct_backlight_300 { bool present ; u32 level ; u32 min ; u32 max ; bool enabled ; bool combination_mode ; bool active_low_pwm ; struct backlight_device *device ; }; struct intel_panel { struct drm_display_mode *fixed_mode ; struct drm_display_mode *downclock_mode ; int fitting_mode ; struct __anonstruct_backlight_300 backlight ; void (*backlight_power)(struct intel_connector * , bool ) ; }; struct intel_connector { struct drm_connector base ; struct intel_encoder *encoder ; struct intel_encoder *new_encoder ; bool (*get_hw_state)(struct intel_connector * ) ; void (*unregister)(struct intel_connector * ) ; struct intel_panel panel ; struct edid *edid ; struct edid *detect_edid ; u8 polled ; void *port ; struct intel_dp *mst_port ; }; struct dpll { int n ; int m1 ; int m2 ; int p1 ; int p2 ; int dot ; int vco ; int m ; int p ; }; struct intel_initial_plane_config { struct intel_framebuffer *fb ; unsigned int tiling ; int size ; u32 base ; }; struct intel_scaler { int id ; int in_use ; uint32_t mode ; }; struct intel_crtc_scaler_state { struct intel_scaler scalers[2U] ; unsigned int scaler_users ; int scaler_id ; }; struct __anonstruct_gmch_pfit_301 { u32 control ; u32 pgm_ratios ; u32 lvds_border_bits ; }; struct __anonstruct_pch_pfit_302 { u32 pos ; u32 size ; bool enabled ; bool force_thru ; }; struct intel_crtc_state { struct drm_crtc_state base ; unsigned long quirks ; int pipe_src_w ; int pipe_src_h ; bool has_pch_encoder ; bool has_infoframe ; enum transcoder cpu_transcoder ; bool limited_color_range ; bool has_dp_encoder ; bool has_hdmi_sink ; bool has_audio ; bool dither ; bool clock_set ; bool sdvo_tv_clock ; bool bw_constrained ; struct dpll dpll ; enum intel_dpll_id shared_dpll ; uint32_t ddi_pll_sel ; struct intel_dpll_hw_state dpll_hw_state ; int pipe_bpp ; struct intel_link_m_n dp_m_n ; struct intel_link_m_n dp_m2_n2 ; bool has_drrs ; int port_clock ; unsigned int pixel_multiplier ; struct __anonstruct_gmch_pfit_301 gmch_pfit ; struct __anonstruct_pch_pfit_302 pch_pfit ; int fdi_lanes ; struct intel_link_m_n fdi_m_n ; bool ips_enabled ; bool double_wide ; bool dp_encoder_is_mst ; int pbn ; struct intel_crtc_scaler_state scaler_state ; }; struct intel_pipe_wm { struct intel_wm_level wm[5U] ; uint32_t linetime ; bool fbc_wm_enabled ; bool pipe_enabled ; bool sprites_enabled ; bool sprites_scaled ; }; struct skl_pipe_wm { struct skl_wm_level wm[8U] ; struct skl_wm_level trans_wm ; uint32_t linetime ; }; struct intel_crtc_atomic_commit { bool evade ; unsigned int start_vbl_count ; bool wait_for_flips ; bool disable_fbc ; bool disable_ips ; bool pre_disable_primary ; bool update_wm ; unsigned int disabled_planes ; unsigned int fb_bits ; bool wait_vblank ; bool update_fbc ; bool post_enable_primary ; unsigned int update_sprite_watermarks ; }; struct intel_unpin_work; struct __anonstruct_wm_303 { struct intel_pipe_wm active ; struct skl_pipe_wm skl_active ; }; struct intel_crtc { struct drm_crtc base ; enum pipe pipe ; enum plane plane ; u8 lut_r[256U] ; u8 lut_g[256U] ; u8 lut_b[256U] ; bool active ; unsigned long enabled_power_domains ; bool lowfreq_avail ; struct intel_overlay *overlay ; struct intel_unpin_work *unpin_work ; atomic_t unpin_work_count ; unsigned long dspaddr_offset ; struct drm_i915_gem_object *cursor_bo ; uint32_t cursor_addr ; uint32_t cursor_cntl ; uint32_t cursor_size ; uint32_t cursor_base ; struct intel_initial_plane_config plane_config ; struct intel_crtc_state *config ; bool new_enabled ; unsigned int reset_counter ; bool cpu_fifo_underrun_disabled ; bool pch_fifo_underrun_disabled ; struct __anonstruct_wm_303 wm ; int scanline_offset ; struct intel_crtc_atomic_commit atomic ; int num_scalers ; }; struct intel_hdmi { u32 hdmi_reg ; int ddc_bus ; uint32_t color_range ; bool color_range_auto ; bool has_hdmi_sink ; bool has_audio ; enum hdmi_force_audio force_audio ; bool rgb_quant_range_selectable ; enum hdmi_picture_aspect aspect_ratio ; void (*write_infoframe)(struct drm_encoder * , enum hdmi_infoframe_type , void const * , ssize_t ) ; void (*set_infoframes)(struct drm_encoder * , bool , struct drm_display_mode * ) ; bool (*infoframe_enabled)(struct drm_encoder * ) ; }; struct intel_dp_mst_encoder; struct intel_dp { uint32_t output_reg ; uint32_t aux_ch_ctl_reg ; uint32_t DP ; bool has_audio ; enum hdmi_force_audio force_audio ; uint32_t color_range ; bool color_range_auto ; uint8_t link_bw ; uint8_t rate_select ; uint8_t lane_count ; uint8_t dpcd[15U] ; uint8_t psr_dpcd[2U] ; uint8_t downstream_ports[16U] ; uint8_t num_sink_rates ; int sink_rates[8U] ; struct drm_dp_aux aux ; uint8_t train_set[4U] ; int panel_power_up_delay ; int panel_power_down_delay ; int panel_power_cycle_delay ; int backlight_on_delay ; int backlight_off_delay ; struct delayed_work panel_vdd_work ; bool want_panel_vdd ; unsigned long last_power_cycle ; unsigned long last_power_on ; unsigned long last_backlight_off ; struct notifier_block edp_notifier ; enum pipe pps_pipe ; struct edp_power_seq pps_delays ; bool use_tps3 ; bool can_mst ; bool is_mst ; int active_mst_links ; struct intel_connector *attached_connector ; struct intel_dp_mst_encoder *mst_encoders[3U] ; struct drm_dp_mst_topology_mgr mst_mgr ; uint32_t (*get_aux_clock_divider)(struct intel_dp * , int ) ; uint32_t (*get_aux_send_ctl)(struct intel_dp * , bool , int , uint32_t ) ; bool train_set_valid ; unsigned long compliance_test_type ; unsigned long compliance_test_data ; bool compliance_test_active ; }; struct intel_digital_port { struct intel_encoder base ; enum port port ; u32 saved_port_bits ; struct intel_dp dp ; struct intel_hdmi hdmi ; enum irqreturn (*hpd_pulse)(struct intel_digital_port * , bool ) ; }; struct intel_dp_mst_encoder { struct intel_encoder base ; enum pipe pipe ; struct intel_digital_port *primary ; void *port ; }; struct intel_unpin_work { struct work_struct work ; struct drm_crtc *crtc ; struct drm_framebuffer *old_fb ; struct drm_i915_gem_object *pending_flip_obj ; struct drm_pending_vblank_event *event ; atomic_t pending ; u32 flip_count ; u32 gtt_offset ; struct drm_i915_gem_request *flip_queued_req ; int flip_queued_vblank ; int flip_ready_vblank ; bool enable_stall_check ; }; struct i915_params { int modeset ; int panel_ignore_lid ; int semaphores ; unsigned int lvds_downclock ; int lvds_channel_mode ; int panel_use_ssc ; int vbt_sdvo_panel_type ; int enable_rc6 ; int enable_fbc ; int enable_ppgtt ; int enable_execlists ; int enable_psr ; unsigned int preliminary_hw_support ; int disable_power_well ; int enable_ips ; int invert_brightness ; int enable_cmd_parser ; bool enable_hangcheck ; bool fastboot ; bool prefault_disable ; bool load_detect_test ; bool reset ; bool disable_display ; bool disable_vtd_wa ; int use_mmio_flip ; int mmio_debug ; bool verbose_state_checks ; bool nuclear_pageflip ; int edp_vswing ; }; typedef bool ldv_func_ret_type; typedef bool ldv_func_ret_type___0; typedef bool ldv_func_ret_type___1; typedef bool ldv_func_ret_type___2; typedef bool ldv_func_ret_type___3; typedef bool ldv_func_ret_type___4; typedef bool ldv_func_ret_type___5; typedef bool ldv_func_ret_type___6; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct drm_i915_error_state_buf { struct drm_i915_private *i915 ; unsigned int bytes ; unsigned int size ; int err ; u8 *buf ; loff_t start ; loff_t pos ; }; struct i915_error_state_file_priv { struct drm_device *dev ; struct drm_i915_error_state *error ; }; typedef bool ldv_func_ret_type___7; typedef bool ldv_func_ret_type___8; typedef int pao_T__; typedef int pao_T_____0; enum hrtimer_restart; struct clk; struct cpufreq_governor; struct cpufreq_cpuinfo { unsigned int max_freq ; unsigned int min_freq ; unsigned int transition_latency ; }; struct cpufreq_real_policy { unsigned int min ; unsigned int max ; unsigned int policy ; struct cpufreq_governor *governor ; }; struct cpufreq_frequency_table; struct cpufreq_stats; struct cpufreq_policy { cpumask_var_t cpus ; cpumask_var_t related_cpus ; unsigned int shared_type ; unsigned int cpu ; unsigned int kobj_cpu ; struct clk *clk ; struct cpufreq_cpuinfo cpuinfo ; unsigned int min ; unsigned int max ; unsigned int cur ; unsigned int restore_freq ; unsigned int suspend_freq ; unsigned int policy ; struct cpufreq_governor *governor ; void *governor_data ; bool governor_enabled ; char last_governor[16U] ; struct work_struct update ; struct cpufreq_real_policy user_policy ; struct cpufreq_frequency_table *freq_table ; struct list_head policy_list ; struct kobject kobj ; struct completion kobj_unregister ; struct rw_semaphore rwsem ; bool transition_ongoing ; spinlock_t transition_lock ; wait_queue_head_t transition_wait ; struct task_struct *transition_task ; struct cpufreq_stats *stats ; void *driver_data ; }; struct cpufreq_governor { char name[16U] ; int initialized ; int (*governor)(struct cpufreq_policy * , unsigned int ) ; ssize_t (*show_setspeed)(struct cpufreq_policy * , char * ) ; int (*store_setspeed)(struct cpufreq_policy * , unsigned int ) ; unsigned int max_transition_latency ; struct list_head governor_list ; struct module *owner ; }; struct cpufreq_frequency_table { unsigned int flags ; unsigned int driver_data ; unsigned int frequency ; }; struct drm_intel_sprite_colorkey { __u32 plane_id ; __u32 min_value ; __u32 channel_mask ; __u32 max_value ; __u32 flags ; }; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct drm_rect { int x1 ; int y1 ; int x2 ; int y2 ; }; struct intel_plane_state { struct drm_plane_state base ; struct drm_rect src ; struct drm_rect dst ; struct drm_rect clip ; bool visible ; int scaler_id ; }; struct intel_plane_wm_parameters { uint32_t horiz_pixels ; uint32_t vert_pixels ; uint8_t bytes_per_pixel ; uint8_t y_bytes_per_pixel ; bool enabled ; bool scaled ; u64 tiling ; unsigned int rotation ; }; struct intel_plane { struct drm_plane base ; int plane ; enum pipe pipe ; bool can_scale ; int max_downscale ; struct drm_intel_sprite_colorkey ckey ; struct intel_plane_wm_parameters wm ; void (*update_plane)(struct drm_plane * , struct drm_crtc * , struct drm_framebuffer * , int , int , unsigned int , unsigned int , uint32_t , uint32_t , uint32_t , uint32_t ) ; void (*disable_plane)(struct drm_plane * , struct drm_crtc * , bool ) ; int (*check_plane)(struct drm_plane * , struct intel_plane_state * ) ; void (*commit_plane)(struct drm_plane * , struct intel_plane_state * ) ; }; struct intel_watermark_params { unsigned long fifo_size ; unsigned long max_wm ; unsigned long default_wm ; unsigned long guard_size ; unsigned long cacheline_size ; }; struct cxsr_latency { int is_desktop ; int is_ddr3 ; unsigned long fsb_freq ; unsigned long mem_freq ; unsigned long display_sr ; unsigned long display_hpll_disable ; unsigned long cursor_sr ; unsigned long cursor_hpll_disable ; }; union __anonunion___u_416 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_418 { struct tracepoint_func *__val ; char __c[1U] ; }; struct skl_pipe_wm_parameters { bool active ; uint32_t pipe_htotal ; uint32_t pixel_rate ; struct intel_plane_wm_parameters plane[4U] ; struct intel_plane_wm_parameters cursor ; }; struct ilk_pipe_wm_parameters { bool active ; uint32_t pipe_htotal ; uint32_t pixel_rate ; struct intel_plane_wm_parameters pri ; struct intel_plane_wm_parameters spr ; struct intel_plane_wm_parameters cur ; }; struct ilk_wm_maximums { uint16_t pri ; uint16_t spr ; uint16_t cur ; uint16_t fbc ; }; struct intel_wm_config { unsigned int num_pipes_active ; bool sprites_enabled ; bool sprites_scaled ; }; struct cparams { u16 i ; u16 t ; u16 m ; u16 c ; }; struct request_boost { struct work_struct work ; struct drm_i915_gem_request *req ; }; enum hrtimer_restart; enum punit_power_well { PUNIT_POWER_WELL_RENDER = 0, PUNIT_POWER_WELL_MEDIA = 1, PUNIT_POWER_WELL_DISP2D = 3, PUNIT_POWER_WELL_DPIO_CMN_BC = 5, PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6, PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7, PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8, PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9, PUNIT_POWER_WELL_DPIO_RX0 = 10, PUNIT_POWER_WELL_DPIO_RX1 = 11, PUNIT_POWER_WELL_DPIO_CMN_D = 12, PUNIT_POWER_WELL_NUM = 13 } ; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum dpio_phy { DPIO_PHY0 = 0, DPIO_PHY1 = 1 } ; enum intel_display_power_domain { POWER_DOMAIN_PIPE_A = 0, POWER_DOMAIN_PIPE_B = 1, POWER_DOMAIN_PIPE_C = 2, POWER_DOMAIN_PIPE_A_PANEL_FITTER = 3, POWER_DOMAIN_PIPE_B_PANEL_FITTER = 4, POWER_DOMAIN_PIPE_C_PANEL_FITTER = 5, POWER_DOMAIN_TRANSCODER_A = 6, POWER_DOMAIN_TRANSCODER_B = 7, POWER_DOMAIN_TRANSCODER_C = 8, POWER_DOMAIN_TRANSCODER_EDP = 9, POWER_DOMAIN_PORT_DDI_A_2_LANES = 10, POWER_DOMAIN_PORT_DDI_A_4_LANES = 11, POWER_DOMAIN_PORT_DDI_B_2_LANES = 12, POWER_DOMAIN_PORT_DDI_B_4_LANES = 13, POWER_DOMAIN_PORT_DDI_C_2_LANES = 14, POWER_DOMAIN_PORT_DDI_C_4_LANES = 15, POWER_DOMAIN_PORT_DDI_D_2_LANES = 16, POWER_DOMAIN_PORT_DDI_D_4_LANES = 17, POWER_DOMAIN_PORT_DSI = 18, POWER_DOMAIN_PORT_CRT = 19, POWER_DOMAIN_PORT_OTHER = 20, POWER_DOMAIN_VGA = 21, POWER_DOMAIN_AUDIO = 22, POWER_DOMAIN_PLLS = 23, POWER_DOMAIN_AUX_A = 24, POWER_DOMAIN_AUX_B = 25, POWER_DOMAIN_AUX_C = 26, POWER_DOMAIN_AUX_D = 27, POWER_DOMAIN_INIT = 28, POWER_DOMAIN_NUM = 29 } ; enum hrtimer_restart; struct firmware { size_t size ; u8 const *data ; struct page **pages ; void *priv ; }; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct intel_css_header { uint32_t module_type ; uint32_t header_len ; uint32_t header_ver ; uint32_t module_id ; uint32_t module_vendor ; uint32_t date ; uint32_t size ; uint32_t key_size ; uint32_t modulus_size ; uint32_t exponent_size ; uint32_t reserved1[12U] ; uint32_t version ; uint32_t reserved2[8U] ; uint32_t kernel_header_info ; }; struct intel_fw_info { uint16_t reserved1 ; char stepping ; char substepping ; uint32_t offset ; uint32_t reserved2 ; }; struct intel_package_header { unsigned char header_len ; unsigned char header_ver ; unsigned char reserved[10U] ; uint32_t num_entries ; struct intel_fw_info fw_info[20U] ; }; struct intel_dmc_header { uint32_t signature ; unsigned char header_len ; unsigned char header_ver ; uint16_t dmcc_ver ; uint32_t project ; uint32_t fw_size ; uint32_t fw_version ; uint32_t mmio_count ; uint32_t mmioaddr[8U] ; uint32_t mmiodata[8U] ; unsigned char dfile[32U] ; uint32_t reserved1[2U] ; }; struct stepping_info { char stepping ; char substepping ; }; struct tss_struct; struct x86_hw_tss { u32 reserved1 ; u64 sp0 ; u64 sp1 ; u64 sp2 ; u64 reserved2 ; u64 ist[7U] ; u32 reserved3 ; u32 reserved4 ; u16 reserved5 ; u16 io_bitmap_base ; }; struct tss_struct { struct x86_hw_tss x86_tss ; unsigned long io_bitmap[1025U] ; unsigned long SYSENTER_stack[64U] ; }; struct __anonstruct_mm_segment_t_33 { unsigned long seg ; }; typedef struct __anonstruct_mm_segment_t_33 mm_segment_t; struct thread_info { struct task_struct *task ; __u32 flags ; __u32 status ; __u32 cpu ; int saved_preempt_count ; mm_segment_t addr_limit ; void *sysenter_return ; unsigned char sig_on_uaccess_error : 1 ; unsigned char uaccess_err : 1 ; }; enum hrtimer_restart; struct bio_vec; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; struct kvec { void *iov_base ; size_t iov_len ; }; union __anonunion____missing_field_name_217 { struct iovec const *iov ; struct kvec const *kvec ; struct bio_vec const *bvec ; }; struct iov_iter { int type ; size_t iov_offset ; size_t count ; union __anonunion____missing_field_name_217 __annonCompField58 ; unsigned long nr_segs ; }; struct bio_vec { struct page *bv_page ; unsigned int bv_len ; unsigned int bv_offset ; }; typedef s32 compat_time_t; typedef s32 compat_long_t; typedef u32 compat_uptr_t; struct compat_timespec { compat_time_t tv_sec ; s32 tv_nsec ; }; struct compat_robust_list { compat_uptr_t next ; }; struct compat_robust_list_head { struct compat_robust_list list ; compat_long_t futex_offset ; compat_uptr_t list_op_pending ; }; struct __large_struct { unsigned long buf[100U] ; }; typedef int drm_ioctl_compat_t(struct file * , unsigned int , unsigned long ); struct drm_i915_batchbuffer { int start ; int used ; int DR1 ; int DR4 ; int num_cliprects ; struct drm_clip_rect *cliprects ; }; typedef struct drm_i915_batchbuffer drm_i915_batchbuffer_t; struct _drm_i915_cmdbuffer { char *buf ; int sz ; int DR1 ; int DR4 ; int num_cliprects ; struct drm_clip_rect *cliprects ; }; typedef struct _drm_i915_cmdbuffer drm_i915_cmdbuffer_t; struct drm_i915_irq_emit { int *irq_seq ; }; typedef struct drm_i915_irq_emit drm_i915_irq_emit_t; struct drm_i915_getparam { int param ; int *value ; }; typedef struct drm_i915_getparam drm_i915_getparam_t; struct drm_i915_mem_alloc { int region ; int alignment ; int size ; int *region_offset ; }; typedef struct drm_i915_mem_alloc drm_i915_mem_alloc_t; struct _drm_i915_batchbuffer32 { int start ; int used ; int DR1 ; int DR4 ; int num_cliprects ; u32 cliprects ; }; typedef struct _drm_i915_batchbuffer32 drm_i915_batchbuffer32_t; struct _drm_i915_cmdbuffer32 { u32 buf ; int sz ; int DR1 ; int DR4 ; int num_cliprects ; u32 cliprects ; }; typedef struct _drm_i915_cmdbuffer32 drm_i915_cmdbuffer32_t; struct drm_i915_irq_emit32 { u32 irq_seq ; }; typedef struct drm_i915_irq_emit32 drm_i915_irq_emit32_t; struct drm_i915_getparam32 { int param ; u32 value ; }; typedef struct drm_i915_getparam32 drm_i915_getparam32_t; struct drm_i915_mem_alloc32 { int region ; int alignment ; int size ; u32 region_offset ; }; typedef struct drm_i915_mem_alloc32 drm_i915_mem_alloc32_t; struct gate_struct64 { u16 offset_low ; u16 segment ; unsigned char ist : 3 ; unsigned char zero0 : 5 ; unsigned char type : 5 ; unsigned char dpl : 2 ; unsigned char p : 1 ; u16 offset_middle ; u32 offset_high ; u32 zero1 ; }; typedef struct gate_struct64 gate_desc; struct desc_ptr { unsigned short size ; unsigned long address ; }; struct pv_cpu_ops { unsigned long (*get_debugreg)(int ) ; void (*set_debugreg)(int , unsigned long ) ; void (*clts)(void) ; unsigned long (*read_cr0)(void) ; void (*write_cr0)(unsigned long ) ; unsigned long (*read_cr4_safe)(void) ; unsigned long (*read_cr4)(void) ; void (*write_cr4)(unsigned long ) ; unsigned long (*read_cr8)(void) ; void (*write_cr8)(unsigned long ) ; void (*load_tr_desc)(void) ; void (*load_gdt)(struct desc_ptr const * ) ; void (*load_idt)(struct desc_ptr const * ) ; void (*store_idt)(struct desc_ptr * ) ; void (*set_ldt)(void const * , unsigned int ) ; unsigned long (*store_tr)(void) ; void (*load_tls)(struct thread_struct * , unsigned int ) ; void (*load_gs_index)(unsigned int ) ; void (*write_ldt_entry)(struct desc_struct * , int , void const * ) ; void (*write_gdt_entry)(struct desc_struct * , int , void const * , int ) ; void (*write_idt_entry)(gate_desc * , int , gate_desc const * ) ; void (*alloc_ldt)(struct desc_struct * , unsigned int ) ; void (*free_ldt)(struct desc_struct * , unsigned int ) ; void (*load_sp0)(struct tss_struct * , struct thread_struct * ) ; void (*set_iopl_mask)(unsigned int ) ; void (*wbinvd)(void) ; void (*io_delay)(void) ; void (*cpuid)(unsigned int * , unsigned int * , unsigned int * , unsigned int * ) ; u64 (*read_msr)(unsigned int , int * ) ; int (*write_msr)(unsigned int , unsigned int , unsigned int ) ; u64 (*read_tsc)(void) ; u64 (*read_pmc)(int ) ; unsigned long long (*read_tscp)(unsigned int * ) ; void (*usergs_sysret64)(void) ; void (*usergs_sysret32)(void) ; void (*iret)(void) ; void (*swapgs)(void) ; void (*start_context_switch)(struct task_struct * ) ; void (*end_context_switch)(struct task_struct * ) ; }; struct __wait_queue; typedef struct __wait_queue wait_queue_t; struct __wait_queue { unsigned int flags ; void *private ; int (*func)(wait_queue_t * , unsigned int , int , void * ) ; struct list_head task_list ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct drm_info_list { char const *name ; int (*show)(struct seq_file * , void * ) ; u32 driver_features ; void *data ; }; struct drm_info_node { struct list_head list ; struct drm_minor *minor ; struct drm_info_list const *info_ent ; struct dentry *dent ; }; enum i915_cache_level; enum i915_cache_level; union __anonunion___u_407 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_409 { struct tracepoint_func *__val ; char __c[1U] ; }; struct file_stats { struct drm_i915_file_private *file_priv ; int count ; size_t total ; size_t unbound ; size_t global ; size_t shared ; size_t active ; size_t inactive ; }; struct pipe_crc_info { char const *name ; struct drm_device *dev ; enum pipe pipe ; }; enum intel_pipe_crc_object { PIPE_CRC_OBJECT_PIPE = 0 } ; struct sseu_dev_status { unsigned int slice_total ; unsigned int subslice_total ; unsigned int subslice_per_slice ; unsigned int eu_total ; unsigned int eu_per_subslice ; }; struct i915_debugfs_files { char const *name ; struct file_operations const *fops ; }; struct dpcd_block { unsigned int offset ; unsigned int end ; size_t size ; bool edp ; }; enum hrtimer_restart; struct sg_page_iter { struct scatterlist *sg ; unsigned int sg_pgoffset ; unsigned int __nents ; int __pg_advance ; }; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct __anonstruct_cmd_286 { u32 value ; u32 mask ; }; union __anonunion_length_287 { u32 fixed ; u32 mask ; }; struct __anonstruct_reg_288 { u32 offset ; u32 mask ; u32 step ; }; struct __anonstruct_bits_289 { u32 offset ; u32 mask ; u32 expected ; u32 condition_offset ; u32 condition_mask ; }; struct drm_i915_cmd_descriptor { u32 flags ; struct __anonstruct_cmd_286 cmd ; union __anonunion_length_287 length ; struct __anonstruct_reg_288 reg ; struct __anonstruct_bits_289 bits[3U] ; }; struct drm_i915_cmd_table { struct drm_i915_cmd_descriptor const *table ; int count ; }; struct drm_i915_reg_descriptor { u32 addr ; u32 mask ; u32 value ; }; struct cmd_node { struct drm_i915_cmd_descriptor const *desc ; struct hlist_node node ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; union __anonunion___u_168 { struct idr_layer *__val ; char __c[1U] ; }; union __anonunion___u_170 { struct idr_layer *__val ; char __c[1U] ; }; enum i2c_slave_event; enum i2c_slave_event; struct drm_i915_gem_context_create { __u32 ctx_id ; __u32 pad ; }; struct drm_i915_gem_context_destroy { __u32 ctx_id ; __u32 pad ; }; struct drm_i915_gem_context_param { __u32 ctx_id ; __u32 size ; __u64 param ; __u64 value ; }; enum i915_cache_level; enum i915_cache_level; union __anonunion___u_428 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_430 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_432 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_434 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_436 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_438 { struct tracepoint_func *__val ; char __c[1U] ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct intel_renderstate_rodata { u32 const *reloc ; u32 const *batch ; u32 const batch_items ; }; struct render_state { struct intel_renderstate_rodata const *rodata ; struct drm_i915_gem_object *obj ; u64 ggtt_offset ; int gen ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct fence_ops; struct fence_cb; struct fence { struct kref refcount ; struct fence_ops const *ops ; struct callback_head rcu ; struct list_head cb_list ; spinlock_t *lock ; unsigned int context ; unsigned int seqno ; unsigned long flags ; ktime_t timestamp ; int status ; }; struct fence_cb { struct list_head node ; void (*func)(struct fence * , struct fence_cb * ) ; }; struct fence_ops { char const *(*get_driver_name)(struct fence * ) ; char const *(*get_timeline_name)(struct fence * ) ; bool (*enable_signaling)(struct fence * ) ; bool (*signaled)(struct fence * ) ; long (*wait)(struct fence * , bool , long ) ; void (*release)(struct fence * ) ; int (*fill_driver_data)(struct fence * , void * , int ) ; void (*fence_value_str)(struct fence * , char * , int ) ; void (*timeline_value_str)(struct fence * , char * , int ) ; }; struct dma_buf_ops { int (*attach)(struct dma_buf * , struct device * , struct dma_buf_attachment * ) ; void (*detach)(struct dma_buf * , struct dma_buf_attachment * ) ; struct sg_table *(*map_dma_buf)(struct dma_buf_attachment * , enum dma_data_direction ) ; void (*unmap_dma_buf)(struct dma_buf_attachment * , struct sg_table * , enum dma_data_direction ) ; void (*release)(struct dma_buf * ) ; int (*begin_cpu_access)(struct dma_buf * , size_t , size_t , enum dma_data_direction ) ; void (*end_cpu_access)(struct dma_buf * , size_t , size_t , enum dma_data_direction ) ; void *(*kmap_atomic)(struct dma_buf * , unsigned long ) ; void (*kunmap_atomic)(struct dma_buf * , unsigned long , void * ) ; void *(*kmap)(struct dma_buf * , unsigned long ) ; void (*kunmap)(struct dma_buf * , unsigned long , void * ) ; int (*mmap)(struct dma_buf * , struct vm_area_struct * ) ; void *(*vmap)(struct dma_buf * ) ; void (*vunmap)(struct dma_buf * , void * ) ; }; struct dma_buf_poll_cb_t { struct fence_cb cb ; wait_queue_head_t *poll ; unsigned long active ; }; struct dma_buf { size_t size ; struct file *file ; struct list_head attachments ; struct dma_buf_ops const *ops ; struct mutex lock ; unsigned int vmapping_counter ; void *vmap_ptr ; char const *exp_name ; struct module *owner ; struct list_head list_node ; void *priv ; struct reservation_object *resv ; wait_queue_head_t poll ; struct dma_buf_poll_cb_t cb_excl ; struct dma_buf_poll_cb_t cb_shared ; }; struct dma_buf_attachment { struct dma_buf *dmabuf ; struct device *dev ; struct list_head node ; void *priv ; }; struct dma_buf_export_info { char const *exp_name ; struct module *owner ; struct dma_buf_ops const *ops ; size_t size ; int flags ; struct reservation_object *resv ; void *priv ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; union __anonunion___u_356 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_358 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_360 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_362 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_364 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_366 { struct tracepoint_func *__val ; char __c[1U] ; }; struct cpuinfo_x86; struct cpuinfo_x86 { __u8 x86 ; __u8 x86_vendor ; __u8 x86_model ; __u8 x86_mask ; int x86_tlbsize ; __u8 x86_virt_bits ; __u8 x86_phys_bits ; __u8 x86_coreid_bits ; __u32 extended_cpuid_level ; int cpuid_level ; __u32 x86_capability[14U] ; char x86_vendor_id[16U] ; char x86_model_id[64U] ; int x86_cache_size ; int x86_cache_alignment ; int x86_cache_max_rmid ; int x86_cache_occ_scale ; int x86_power ; unsigned long loops_per_jiffy ; u16 x86_max_cores ; u16 apicid ; u16 initial_apicid ; u16 x86_clflush_size ; u16 booted_cores ; u16 phys_proc_id ; u16 cpu_core_id ; u8 compute_unit_id ; u16 cpu_index ; u32 microcode ; }; enum hrtimer_restart; union __anonunion___u_168___0 { struct idr_layer *__val ; char __c[1U] ; }; union __anonunion___u_170___0 { struct idr_layer *__val ; char __c[1U] ; }; enum i2c_slave_event; enum i2c_slave_event; struct drm_i915_gem_relocation_entry { __u32 target_handle ; __u32 delta ; __u64 offset ; __u64 presumed_offset ; __u32 read_domains ; __u32 write_domain ; }; struct drm_i915_gem_exec_object { __u32 handle ; __u32 relocation_count ; __u64 relocs_ptr ; __u64 alignment ; __u64 offset ; }; struct drm_i915_gem_execbuffer { __u64 buffers_ptr ; __u32 buffer_count ; __u32 batch_start_offset ; __u32 batch_len ; __u32 DR1 ; __u32 DR4 ; __u32 num_cliprects ; __u64 cliprects_ptr ; }; enum i915_cache_level; enum i915_cache_level; enum fb_op_origin { ORIGIN_GTT = 0, ORIGIN_CPU = 1, ORIGIN_CS = 2, ORIGIN_FLIP = 3 } ; union __anonunion___u_332 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_334 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_372 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_374 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion____missing_field_name_439 { struct i915_vma *lut[0U] ; struct hlist_head buckets[0U] ; }; struct eb_vmas { struct list_head vmas ; int and ; union __anonunion____missing_field_name_439 __annonCompField85 ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum drm_mm_search_flags { DRM_MM_SEARCH_DEFAULT = 0, DRM_MM_SEARCH_BEST = 1, DRM_MM_SEARCH_BELOW = 2 } ; enum drm_mm_allocator_flags { DRM_MM_CREATE_DEFAULT = 0, DRM_MM_CREATE_TOP = 1 } ; typedef uint64_t gen8_pte_t; typedef uint64_t gen8_pde_t; enum i915_cache_level; enum i915_cache_level; union __anonunion___u_320 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_322 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_324 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_326 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_328 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_330 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_420 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_422 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_424 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_426 { struct tracepoint_func *__val ; char __c[1U] ; }; typedef long long __s64; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct drm_i915_gem_create { __u64 size ; __u32 handle ; __u32 pad ; }; struct drm_i915_gem_pread { __u32 handle ; __u32 pad ; __u64 offset ; __u64 size ; __u64 data_ptr ; }; struct drm_i915_gem_pwrite { __u32 handle ; __u32 pad ; __u64 offset ; __u64 size ; __u64 data_ptr ; }; struct drm_i915_gem_mmap { __u32 handle ; __u32 pad ; __u64 offset ; __u64 size ; __u64 addr_ptr ; __u64 flags ; }; struct drm_i915_gem_mmap_gtt { __u32 handle ; __u32 pad ; __u64 offset ; }; struct drm_i915_gem_set_domain { __u32 handle ; __u32 read_domains ; __u32 write_domain ; }; struct drm_i915_gem_sw_finish { __u32 handle ; }; struct drm_i915_gem_busy { __u32 handle ; __u32 busy ; }; struct drm_i915_gem_caching { __u32 handle ; __u32 caching ; }; struct drm_i915_gem_get_aperture { __u64 aper_size ; __u64 aper_available_size ; }; struct drm_i915_gem_madvise { __u32 handle ; __u32 madv ; __u32 retained ; }; struct drm_i915_gem_wait { __u32 bo_handle ; __u32 flags ; __s64 timeout_ns ; }; enum i915_cache_level; enum i915_cache_level; typedef struct drm_dma_handle drm_dma_handle_t; union __anonunion___u_308 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_310 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_312 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_314 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_316 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_318 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_332___0 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_334___0 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_336 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_338 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_340 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_342 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_344 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_346 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_348 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_350 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_352 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_354 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_368 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_370 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_380 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_382 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_388 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_390 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_396 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_398 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_400 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_402 { struct tracepoint_func *__val ; char __c[1U] ; }; struct taskstats { __u16 version ; __u32 ac_exitcode ; __u8 ac_flag ; __u8 ac_nice ; __u64 cpu_count ; __u64 cpu_delay_total ; __u64 blkio_count ; __u64 blkio_delay_total ; __u64 swapin_count ; __u64 swapin_delay_total ; __u64 cpu_run_real_total ; __u64 cpu_run_virtual_total ; char ac_comm[32U] ; __u8 ac_sched ; __u8 ac_pad[3U] ; __u32 ac_uid ; __u32 ac_gid ; __u32 ac_pid ; __u32 ac_ppid ; __u32 ac_btime ; __u64 ac_etime ; __u64 ac_utime ; __u64 ac_stime ; __u64 ac_minflt ; __u64 ac_majflt ; __u64 coremem ; __u64 virtmem ; __u64 hiwater_rss ; __u64 hiwater_vm ; __u64 read_char ; __u64 write_char ; __u64 read_syscalls ; __u64 write_syscalls ; __u64 read_bytes ; __u64 write_bytes ; __u64 cancelled_write_bytes ; __u64 nvcsw ; __u64 nivcsw ; __u64 ac_utimescaled ; __u64 ac_stimescaled ; __u64 cpu_scaled_run_real_total ; __u64 freepages_count ; __u64 freepages_delay_total ; }; struct reclaim_state { unsigned long reclaimed_slab ; }; struct swap_extent { struct list_head list ; unsigned long start_page ; unsigned long nr_pages ; sector_t start_block ; }; struct swap_cluster_info { unsigned int data : 24 ; unsigned char flags ; }; struct percpu_cluster { struct swap_cluster_info index ; unsigned int next ; }; struct swap_info_struct { unsigned long flags ; short prio ; struct plist_node list ; struct plist_node avail_list ; signed char type ; unsigned int max ; unsigned char *swap_map ; struct swap_cluster_info *cluster_info ; struct swap_cluster_info free_cluster_head ; struct swap_cluster_info free_cluster_tail ; unsigned int lowest_bit ; unsigned int highest_bit ; unsigned int pages ; unsigned int inuse_pages ; unsigned int cluster_next ; unsigned int cluster_nr ; struct percpu_cluster *percpu_cluster ; struct swap_extent *curr_swap_extent ; struct swap_extent first_swap_extent ; struct block_device *bdev ; struct file *swap_file ; unsigned int old_block_size ; unsigned long *frontswap_map ; atomic_t frontswap_pages ; spinlock_t lock ; struct work_struct discard_work ; struct swap_cluster_info discard_cluster_head ; struct swap_cluster_info discard_cluster_tail ; }; union __anonunion_v_452 { short preferred_node ; nodemask_t nodes ; }; union __anonunion_w_453 { nodemask_t cpuset_mems_allowed ; nodemask_t user_nodemask ; }; struct mempolicy { atomic_t refcnt ; unsigned short mode ; unsigned short flags ; union __anonunion_v_452 v ; union __anonunion_w_453 w ; }; struct xattr_handler { char const *prefix ; int flags ; size_t (*list)(struct dentry * , char * , size_t , char const * , size_t , int ) ; int (*get)(struct dentry * , char const * , void * , size_t , int ) ; int (*set)(struct dentry * , char const * , void const * , size_t , int , int ) ; }; typedef int ldv_func_ret_type___9; typedef int ldv_func_ret_type___10; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct __anonstruct_phases_453 { struct list_head *list ; unsigned int bit ; }; struct __anonstruct_phase_454 { struct list_head *list ; unsigned int bit ; }; struct __anonstruct_456 { struct list_head *list ; unsigned int bit ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct __anonstruct_stolen_440 { u32 start ; u32 end ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct drm_i915_gem_set_tiling { __u32 handle ; __u32 tiling_mode ; __u32 stride ; __u32 swizzle_mode ; }; struct drm_i915_gem_get_tiling { __u32 handle ; __u32 tiling_mode ; __u32 swizzle_mode ; __u32 phys_swizzle_mode ; }; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct drm_i915_gem_userptr { __u64 user_ptr ; __u64 user_size ; __u32 flags ; __u32 handle ; }; enum i915_cache_level; enum i915_cache_level; struct mmu_notifier; struct mmu_notifier_ops; struct mmu_notifier_mm { struct hlist_head list ; spinlock_t lock ; }; struct mmu_notifier_ops { void (*release)(struct mmu_notifier * , struct mm_struct * ) ; int (*clear_flush_young)(struct mmu_notifier * , struct mm_struct * , unsigned long , unsigned long ) ; int (*test_young)(struct mmu_notifier * , struct mm_struct * , unsigned long ) ; void (*change_pte)(struct mmu_notifier * , struct mm_struct * , unsigned long , pte_t ) ; void (*invalidate_page)(struct mmu_notifier * , struct mm_struct * , unsigned long ) ; void (*invalidate_range_start)(struct mmu_notifier * , struct mm_struct * , unsigned long , unsigned long ) ; void (*invalidate_range_end)(struct mmu_notifier * , struct mm_struct * , unsigned long , unsigned long ) ; void (*invalidate_range)(struct mmu_notifier * , struct mm_struct * , unsigned long , unsigned long ) ; }; struct mmu_notifier { struct hlist_node hlist ; struct mmu_notifier_ops const *ops ; }; struct i915_mmu_notifier; struct i915_mm_struct { struct mm_struct *mm ; struct drm_device *dev ; struct i915_mmu_notifier *mn ; struct hlist_node node ; struct kref kref ; struct work_struct work ; }; struct interval_tree_node { struct rb_node rb ; unsigned long start ; unsigned long last ; unsigned long __subtree_last ; }; struct i915_mmu_notifier { spinlock_t lock ; struct hlist_node node ; struct mmu_notifier mn ; struct rb_root objects ; struct list_head linear ; unsigned long serial ; bool has_linear ; }; struct i915_mmu_object { struct i915_mmu_notifier *mn ; struct interval_tree_node it ; struct list_head link ; struct drm_i915_gem_object *obj ; bool is_linear ; }; struct get_pages_work { struct work_struct work ; struct drm_i915_gem_object *obj ; struct task_struct *task ; }; struct va_list; typedef __builtin_va_list __gnuc_va_list; typedef __gnuc_va_list va_list; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum kobject_action { KOBJ_ADD = 0, KOBJ_REMOVE = 1, KOBJ_CHANGE = 2, KOBJ_MOVE = 3, KOBJ_ONLINE = 4, KOBJ_OFFLINE = 5, KOBJ_MAX = 6 } ; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; union __anonunion___u_384 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_386 { struct tracepoint_func *__val ; char __c[1U] ; }; enum hrtimer_restart; struct uts_namespace; struct net; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct ring_buffer; struct ring_buffer_iter; struct trace_seq; struct seq_buf { char *buffer ; size_t size ; size_t len ; loff_t readpos ; }; struct trace_seq { unsigned char buffer[4096U] ; struct seq_buf seq ; int full ; }; union __anonunion____missing_field_name_439___0 { __u64 sample_period ; __u64 sample_freq ; }; union __anonunion____missing_field_name_440 { __u32 wakeup_events ; __u32 wakeup_watermark ; }; union __anonunion____missing_field_name_441 { __u64 bp_addr ; __u64 config1 ; }; union __anonunion____missing_field_name_442 { __u64 bp_len ; __u64 config2 ; }; struct perf_event_attr { __u32 type ; __u32 size ; __u64 config ; union __anonunion____missing_field_name_439___0 __annonCompField85 ; __u64 sample_type ; __u64 read_format ; unsigned char disabled : 1 ; unsigned char inherit : 1 ; unsigned char pinned : 1 ; unsigned char exclusive : 1 ; unsigned char exclude_user : 1 ; unsigned char exclude_kernel : 1 ; unsigned char exclude_hv : 1 ; unsigned char exclude_idle : 1 ; unsigned char mmap : 1 ; unsigned char comm : 1 ; unsigned char freq : 1 ; unsigned char inherit_stat : 1 ; unsigned char enable_on_exec : 1 ; unsigned char task : 1 ; unsigned char watermark : 1 ; unsigned char precise_ip : 2 ; unsigned char mmap_data : 1 ; unsigned char sample_id_all : 1 ; unsigned char exclude_host : 1 ; unsigned char exclude_guest : 1 ; unsigned char exclude_callchain_kernel : 1 ; unsigned char exclude_callchain_user : 1 ; unsigned char mmap2 : 1 ; unsigned char comm_exec : 1 ; unsigned char use_clockid : 1 ; unsigned long __reserved_1 : 38 ; union __anonunion____missing_field_name_440 __annonCompField86 ; __u32 bp_type ; union __anonunion____missing_field_name_441 __annonCompField87 ; union __anonunion____missing_field_name_442 __annonCompField88 ; __u64 branch_sample_type ; __u64 sample_regs_user ; __u32 sample_stack_user ; __s32 clockid ; __u64 sample_regs_intr ; __u32 aux_watermark ; __u32 __reserved_2 ; }; struct __anonstruct____missing_field_name_445 { unsigned char mem_op : 5 ; unsigned short mem_lvl : 14 ; unsigned char mem_snoop : 5 ; unsigned char mem_lock : 2 ; unsigned char mem_dtlb : 7 ; unsigned int mem_rsvd : 31 ; }; union perf_mem_data_src { __u64 val ; struct __anonstruct____missing_field_name_445 __annonCompField91 ; }; struct perf_branch_entry { __u64 from ; __u64 to ; unsigned char mispred : 1 ; unsigned char predicted : 1 ; unsigned char in_tx : 1 ; unsigned char abort : 1 ; unsigned long reserved : 60 ; }; struct mnt_namespace; struct ipc_namespace; struct nsproxy { atomic_t count ; struct uts_namespace *uts_ns ; struct ipc_namespace *ipc_ns ; struct mnt_namespace *mnt_ns ; struct pid_namespace *pid_ns_for_children ; struct net *net_ns ; }; struct proc_ns_operations; struct ns_common { atomic_long_t stashed ; struct proc_ns_operations const *ops ; unsigned int inum ; }; struct pidmap { atomic_t nr_free ; void *page ; }; struct fs_pin; struct pid_namespace { struct kref kref ; struct pidmap pidmap[128U] ; struct callback_head rcu ; int last_pid ; unsigned int nr_hashed ; struct task_struct *child_reaper ; struct kmem_cache *pid_cachep ; unsigned int level ; struct pid_namespace *parent ; struct vfsmount *proc_mnt ; struct dentry *proc_self ; struct dentry *proc_thread_self ; struct fs_pin *bacct ; struct user_namespace *user_ns ; struct work_struct proc_work ; kgid_t pid_gid ; int hide_pid ; int reboot ; struct ns_common ns ; }; struct __anonstruct_local_t_453 { atomic_long_t a ; }; typedef struct __anonstruct_local_t_453 local_t; struct __anonstruct_local64_t_454 { local_t a ; }; typedef struct __anonstruct_local64_t_454 local64_t; struct arch_hw_breakpoint { unsigned long address ; unsigned long mask ; u8 len ; u8 type ; }; struct pmu; struct ftrace_hash; struct ftrace_ops; struct ftrace_ops_hash { struct ftrace_hash *notrace_hash ; struct ftrace_hash *filter_hash ; struct mutex regex_lock ; }; struct ftrace_ops { void (*func)(unsigned long , unsigned long , struct ftrace_ops * , struct pt_regs * ) ; struct ftrace_ops *next ; unsigned long flags ; void *private ; int *disabled ; int nr_trampolines ; struct ftrace_ops_hash local_hash ; struct ftrace_ops_hash *func_hash ; struct ftrace_ops_hash old_hash ; unsigned long trampoline ; unsigned long trampoline_size ; }; struct ftrace_ret_stack { unsigned long ret ; unsigned long func ; unsigned long long calltime ; unsigned long long subtime ; unsigned long fp ; }; struct irq_work { unsigned long flags ; struct llist_node llnode ; void (*func)(struct irq_work * ) ; }; struct perf_regs { __u64 abi ; struct pt_regs *regs ; }; struct perf_callchain_entry { __u64 nr ; __u64 ip[127U] ; }; struct perf_raw_record { u32 size ; void *data ; }; struct perf_branch_stack { __u64 nr ; struct perf_branch_entry entries[0U] ; }; struct hw_perf_event_extra { u64 config ; unsigned int reg ; int alloc ; int idx ; }; struct __anonstruct____missing_field_name_471 { u64 config ; u64 last_tag ; unsigned long config_base ; unsigned long event_base ; int event_base_rdpmc ; int idx ; int last_cpu ; int flags ; struct hw_perf_event_extra extra_reg ; struct hw_perf_event_extra branch_reg ; }; struct __anonstruct____missing_field_name_472 { struct hrtimer hrtimer ; }; struct __anonstruct____missing_field_name_473 { struct list_head tp_list ; }; struct __anonstruct____missing_field_name_474 { int cqm_state ; u32 cqm_rmid ; struct list_head cqm_events_entry ; struct list_head cqm_groups_entry ; struct list_head cqm_group_entry ; }; struct __anonstruct____missing_field_name_475 { int itrace_started ; }; struct __anonstruct____missing_field_name_476 { struct arch_hw_breakpoint info ; struct list_head bp_list ; }; union __anonunion____missing_field_name_470 { struct __anonstruct____missing_field_name_471 __annonCompField92 ; struct __anonstruct____missing_field_name_472 __annonCompField93 ; struct __anonstruct____missing_field_name_473 __annonCompField94 ; struct __anonstruct____missing_field_name_474 __annonCompField95 ; struct __anonstruct____missing_field_name_475 __annonCompField96 ; struct __anonstruct____missing_field_name_476 __annonCompField97 ; }; struct hw_perf_event { union __anonunion____missing_field_name_470 __annonCompField98 ; struct task_struct *target ; int state ; local64_t prev_count ; u64 sample_period ; u64 last_period ; local64_t period_left ; u64 interrupts_seq ; u64 interrupts ; u64 freq_time_stamp ; u64 freq_count_stamp ; }; struct perf_cpu_context; struct pmu { struct list_head entry ; struct module *module ; struct device *dev ; struct attribute_group const **attr_groups ; char const *name ; int type ; int capabilities ; int *pmu_disable_count ; struct perf_cpu_context *pmu_cpu_context ; atomic_t exclusive_cnt ; int task_ctx_nr ; int hrtimer_interval_ms ; void (*pmu_enable)(struct pmu * ) ; void (*pmu_disable)(struct pmu * ) ; int (*event_init)(struct perf_event * ) ; void (*event_mapped)(struct perf_event * ) ; void (*event_unmapped)(struct perf_event * ) ; int (*add)(struct perf_event * , int ) ; void (*del)(struct perf_event * , int ) ; void (*start)(struct perf_event * , int ) ; void (*stop)(struct perf_event * , int ) ; void (*read)(struct perf_event * ) ; void (*start_txn)(struct pmu * ) ; int (*commit_txn)(struct pmu * ) ; void (*cancel_txn)(struct pmu * ) ; int (*event_idx)(struct perf_event * ) ; void (*sched_task)(struct perf_event_context * , bool ) ; size_t task_ctx_size ; u64 (*count)(struct perf_event * ) ; void *(*setup_aux)(int , void ** , int , bool ) ; void (*free_aux)(void * ) ; int (*filter_match)(struct perf_event * ) ; }; enum perf_event_active_state { PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, PERF_EVENT_STATE_ACTIVE = 1 } ; struct perf_sample_data; struct perf_cgroup; struct event_filter; struct perf_event { struct list_head event_entry ; struct list_head group_entry ; struct list_head sibling_list ; struct list_head migrate_entry ; struct hlist_node hlist_entry ; struct list_head active_entry ; int nr_siblings ; int group_flags ; struct perf_event *group_leader ; struct pmu *pmu ; enum perf_event_active_state state ; unsigned int attach_state ; local64_t count ; atomic64_t child_count ; u64 total_time_enabled ; u64 total_time_running ; u64 tstamp_enabled ; u64 tstamp_running ; u64 tstamp_stopped ; u64 shadow_ctx_time ; struct perf_event_attr attr ; u16 header_size ; u16 id_header_size ; u16 read_size ; struct hw_perf_event hw ; struct perf_event_context *ctx ; atomic_long_t refcount ; atomic64_t child_total_time_enabled ; atomic64_t child_total_time_running ; struct mutex child_mutex ; struct list_head child_list ; struct perf_event *parent ; int oncpu ; int cpu ; struct list_head owner_entry ; struct task_struct *owner ; struct mutex mmap_mutex ; atomic_t mmap_count ; struct ring_buffer *rb ; struct list_head rb_entry ; unsigned long rcu_batches ; int rcu_pending ; wait_queue_head_t waitq ; struct fasync_struct *fasync ; int pending_wakeup ; int pending_kill ; int pending_disable ; struct irq_work pending ; atomic_t event_limit ; void (*destroy)(struct perf_event * ) ; struct callback_head callback_head ; struct pid_namespace *ns ; u64 id ; u64 (*clock)(void) ; void (*overflow_handler)(struct perf_event * , struct perf_sample_data * , struct pt_regs * ) ; void *overflow_handler_context ; struct trace_event_call *tp_event ; struct event_filter *filter ; struct ftrace_ops ftrace_ops ; struct perf_cgroup *cgrp ; int cgrp_defer_enabled ; }; struct perf_event_context { struct pmu *pmu ; raw_spinlock_t lock ; struct mutex mutex ; struct list_head active_ctx_list ; struct list_head pinned_groups ; struct list_head flexible_groups ; struct list_head event_list ; int nr_events ; int nr_active ; int is_active ; int nr_stat ; int nr_freq ; int rotate_disable ; atomic_t refcount ; struct task_struct *task ; u64 time ; u64 timestamp ; struct perf_event_context *parent_ctx ; u64 parent_gen ; u64 generation ; int pin_count ; int nr_cgroups ; void *task_ctx_data ; struct callback_head callback_head ; struct delayed_work orphans_remove ; bool orphans_remove_sched ; }; struct perf_cpu_context { struct perf_event_context ctx ; struct perf_event_context *task_ctx ; int active_oncpu ; int exclusive ; raw_spinlock_t hrtimer_lock ; struct hrtimer hrtimer ; ktime_t hrtimer_interval ; unsigned int hrtimer_active ; struct pmu *unique_pmu ; struct perf_cgroup *cgrp ; }; struct perf_cgroup_info { u64 time ; u64 timestamp ; }; struct perf_cgroup { struct cgroup_subsys_state css ; struct perf_cgroup_info *info ; }; struct __anonstruct_tid_entry_478 { u32 pid ; u32 tid ; }; struct __anonstruct_cpu_entry_479 { u32 cpu ; u32 reserved ; }; struct perf_sample_data { u64 addr ; struct perf_raw_record *raw ; struct perf_branch_stack *br_stack ; u64 period ; u64 weight ; u64 txn ; union perf_mem_data_src data_src ; u64 type ; u64 ip ; struct __anonstruct_tid_entry_478 tid_entry ; u64 time ; u64 id ; u64 stream_id ; struct __anonstruct_cpu_entry_479 cpu_entry ; struct perf_callchain_entry *callchain ; struct perf_regs regs_user ; struct pt_regs regs_user_copy ; struct perf_regs regs_intr ; u64 stack_user_size ; }; struct trace_array; struct trace_buffer; struct tracer; struct bpf_prog; struct trace_iterator; struct trace_event; struct trace_entry { unsigned short type ; unsigned char flags ; unsigned char preempt_count ; int pid ; }; struct trace_iterator { struct trace_array *tr ; struct tracer *trace ; struct trace_buffer *trace_buffer ; void *private ; int cpu_file ; struct mutex mutex ; struct ring_buffer_iter **buffer_iter ; unsigned long iter_flags ; struct trace_seq tmp_seq ; cpumask_var_t started ; bool snapshot ; struct trace_seq seq ; struct trace_entry *ent ; unsigned long lost_events ; int leftover ; int ent_size ; int cpu ; u64 ts ; loff_t pos ; long idx ; }; enum print_line_t; struct trace_event_functions { enum print_line_t (*trace)(struct trace_iterator * , int , struct trace_event * ) ; enum print_line_t (*raw)(struct trace_iterator * , int , struct trace_event * ) ; enum print_line_t (*hex)(struct trace_iterator * , int , struct trace_event * ) ; enum print_line_t (*binary)(struct trace_iterator * , int , struct trace_event * ) ; }; struct trace_event { struct hlist_node node ; struct list_head list ; int type ; struct trace_event_functions *funcs ; }; enum print_line_t { TRACE_TYPE_PARTIAL_LINE = 0, TRACE_TYPE_HANDLED = 1, TRACE_TYPE_UNHANDLED = 2, TRACE_TYPE_NO_CONSUME = 3 } ; enum trace_reg { TRACE_REG_REGISTER = 0, TRACE_REG_UNREGISTER = 1, TRACE_REG_PERF_REGISTER = 2, TRACE_REG_PERF_UNREGISTER = 3, TRACE_REG_PERF_OPEN = 4, TRACE_REG_PERF_CLOSE = 5, TRACE_REG_PERF_ADD = 6, TRACE_REG_PERF_DEL = 7 } ; struct trace_event_class { char const *system ; void *probe ; void *perf_probe ; int (*reg)(struct trace_event_call * , enum trace_reg , void * ) ; int (*define_fields)(struct trace_event_call * ) ; struct list_head *(*get_fields)(struct trace_event_call * ) ; struct list_head fields ; int (*raw_init)(struct trace_event_call * ) ; }; union __anonunion____missing_field_name_480 { char *name ; struct tracepoint *tp ; }; struct trace_event_call { struct list_head list ; struct trace_event_class *class ; union __anonunion____missing_field_name_480 __annonCompField100 ; struct trace_event event ; char *print_fmt ; struct event_filter *filter ; void *mod ; void *data ; int flags ; int perf_refcount ; struct hlist_head *perf_events ; struct bpf_prog *prog ; int (*perf_perm)(struct trace_event_call * , struct perf_event * ) ; }; struct trace_event_raw_i915_pipe_update_start { struct trace_entry ent ; enum pipe pipe ; u32 frame ; u32 scanline ; u32 min ; u32 max ; char __data[0U] ; }; struct trace_event_raw_i915_pipe_update_vblank_evaded { struct trace_entry ent ; enum pipe pipe ; u32 frame ; u32 scanline ; u32 min ; u32 max ; char __data[0U] ; }; struct trace_event_raw_i915_pipe_update_end { struct trace_entry ent ; enum pipe pipe ; u32 frame ; u32 scanline ; char __data[0U] ; }; struct trace_event_raw_i915_gem_object_create { struct trace_entry ent ; struct drm_i915_gem_object *obj ; u32 size ; char __data[0U] ; }; struct trace_event_raw_i915_vma_bind { struct trace_entry ent ; struct drm_i915_gem_object *obj ; struct i915_address_space *vm ; u64 offset ; u32 size ; unsigned int flags ; char __data[0U] ; }; struct trace_event_raw_i915_vma_unbind { struct trace_entry ent ; struct drm_i915_gem_object *obj ; struct i915_address_space *vm ; u64 offset ; u32 size ; char __data[0U] ; }; struct trace_event_raw_i915_va { struct trace_entry ent ; struct i915_address_space *vm ; u64 start ; u64 end ; u32 __data_loc_name ; char __data[0U] ; }; struct trace_event_raw_i915_page_table_entry { struct trace_entry ent ; struct i915_address_space *vm ; u32 pde ; u64 start ; u64 end ; char __data[0U] ; }; struct trace_event_raw_i915_page_table_entry_update { struct trace_entry ent ; struct i915_address_space *vm ; u32 pde ; u32 first ; u32 last ; u32 __data_loc_cur_ptes ; char __data[0U] ; }; struct trace_event_raw_i915_gem_object_change_domain { struct trace_entry ent ; struct drm_i915_gem_object *obj ; u32 read_domains ; u32 write_domain ; char __data[0U] ; }; struct trace_event_raw_i915_gem_object_pwrite { struct trace_entry ent ; struct drm_i915_gem_object *obj ; u32 offset ; u32 len ; char __data[0U] ; }; struct trace_event_raw_i915_gem_object_pread { struct trace_entry ent ; struct drm_i915_gem_object *obj ; u32 offset ; u32 len ; char __data[0U] ; }; struct trace_event_raw_i915_gem_object_fault { struct trace_entry ent ; struct drm_i915_gem_object *obj ; u32 index ; bool gtt ; bool write ; char __data[0U] ; }; struct trace_event_raw_i915_gem_object { struct trace_entry ent ; struct drm_i915_gem_object *obj ; char __data[0U] ; }; struct trace_event_raw_i915_gem_evict { struct trace_entry ent ; u32 dev ; u32 size ; u32 align ; unsigned int flags ; char __data[0U] ; }; struct trace_event_raw_i915_gem_evict_everything { struct trace_entry ent ; u32 dev ; char __data[0U] ; }; struct trace_event_raw_i915_gem_evict_vm { struct trace_entry ent ; u32 dev ; struct i915_address_space *vm ; char __data[0U] ; }; struct trace_event_raw_i915_gem_ring_sync_to { struct trace_entry ent ; u32 dev ; u32 sync_from ; u32 sync_to ; u32 seqno ; char __data[0U] ; }; struct trace_event_raw_i915_gem_ring_dispatch { struct trace_entry ent ; u32 dev ; u32 ring ; u32 seqno ; u32 flags ; char __data[0U] ; }; struct trace_event_raw_i915_gem_ring_flush { struct trace_entry ent ; u32 dev ; u32 ring ; u32 invalidate ; u32 flush ; char __data[0U] ; }; struct trace_event_raw_i915_gem_request { struct trace_entry ent ; u32 dev ; u32 ring ; u32 seqno ; char __data[0U] ; }; struct trace_event_raw_i915_gem_request_notify { struct trace_entry ent ; u32 dev ; u32 ring ; u32 seqno ; char __data[0U] ; }; struct trace_event_raw_i915_gem_request_wait_begin { struct trace_entry ent ; u32 dev ; u32 ring ; u32 seqno ; bool blocking ; char __data[0U] ; }; struct trace_event_raw_i915_flip_request { struct trace_entry ent ; int plane ; struct drm_i915_gem_object *obj ; char __data[0U] ; }; struct trace_event_raw_i915_flip_complete { struct trace_entry ent ; int plane ; struct drm_i915_gem_object *obj ; char __data[0U] ; }; struct trace_event_raw_i915_reg_rw { struct trace_entry ent ; u64 val ; u32 reg ; u16 write ; u16 len ; char __data[0U] ; }; struct trace_event_raw_intel_gpu_freq_change { struct trace_entry ent ; u32 freq ; char __data[0U] ; }; struct trace_event_raw_i915_ppgtt { struct trace_entry ent ; struct i915_address_space *vm ; u32 dev ; char __data[0U] ; }; struct trace_event_raw_i915_context { struct trace_entry ent ; u32 dev ; struct intel_context *ctx ; struct i915_address_space *vm ; char __data[0U] ; }; struct trace_event_raw_switch_mm { struct trace_entry ent ; u32 ring ; struct intel_context *to ; struct i915_address_space *vm ; u32 dev ; char __data[0U] ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; union __anonunion___u_372___0 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_374___0 { struct tracepoint_func *__val ; char __c[1U] ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; union __anonunion___u_376 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_378 { struct tracepoint_func *__val ; char __c[1U] ; }; enum hrtimer_restart; struct drm_i915_reg_read { __u64 offset ; __u64 val ; }; struct drm_i915_reset_stats { __u32 ctx_id ; __u32 flags ; __u32 reset_count ; __u32 batch_active ; __u32 batch_pending ; __u32 pad ; }; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; union __anonunion___u_412 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_414 { struct tracepoint_func *__val ; char __c[1U] ; }; struct register_whitelist { uint64_t offset ; uint32_t size ; uint32_t gen_bitmask ; }; typedef __u16 __le16; enum hrtimer_restart; struct component_ops { int (*bind)(struct device * , struct device * , void * ) ; void (*unbind)(struct device * , struct device * , void * ) ; }; struct i915_audio_component_ops { struct module *owner ; void (*get_power)(struct device * ) ; void (*put_power)(struct device * ) ; void (*codec_wake_override)(struct device * , bool ) ; int (*get_cdclk_freq)(struct device * ) ; }; struct i915_audio_component { struct device *dev ; struct i915_audio_component_ops const *ops ; }; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct est_timings { u8 t1 ; u8 t2 ; u8 mfg_rsvd ; }; struct std_timing { u8 hsize ; u8 vfreq_aspect ; }; struct detailed_pixel_timing { u8 hactive_lo ; u8 hblank_lo ; u8 hactive_hblank_hi ; u8 vactive_lo ; u8 vblank_lo ; u8 vactive_vblank_hi ; u8 hsync_offset_lo ; u8 hsync_pulse_width_lo ; u8 vsync_offset_pulse_width_lo ; u8 hsync_vsync_offset_pulse_width_hi ; u8 width_mm_lo ; u8 height_mm_lo ; u8 width_height_mm_hi ; u8 hborder ; u8 vborder ; u8 misc ; }; struct detailed_data_string { u8 str[13U] ; }; struct __anonstruct_gtf2_440 { u8 reserved ; u8 hfreq_start_khz ; u8 c ; __le16 m ; u8 k ; u8 j ; }; struct __anonstruct_cvt_441 { u8 version ; u8 data1 ; u8 data2 ; u8 supported_aspects ; u8 flags ; u8 supported_scalings ; u8 preferred_refresh ; }; union __anonunion_formula_439 { struct __anonstruct_gtf2_440 gtf2 ; struct __anonstruct_cvt_441 cvt ; }; struct detailed_data_monitor_range { u8 min_vfreq ; u8 max_vfreq ; u8 min_hfreq_khz ; u8 max_hfreq_khz ; u8 pixel_clock_mhz ; u8 flags ; union __anonunion_formula_439 formula ; }; struct detailed_data_wpindex { u8 white_yx_lo ; u8 white_x_hi ; u8 white_y_hi ; u8 gamma ; }; struct cvt_timing { u8 code[3U] ; }; union __anonunion_data_442 { struct detailed_data_string str ; struct detailed_data_monitor_range range ; struct detailed_data_wpindex color ; struct std_timing timings[6U] ; struct cvt_timing cvt[4U] ; }; struct detailed_non_pixel { u8 pad1 ; u8 type ; u8 pad2 ; union __anonunion_data_442 data ; }; union __anonunion_data_443 { struct detailed_pixel_timing pixel_data ; struct detailed_non_pixel other_data ; }; struct detailed_timing { __le16 pixel_clock ; union __anonunion_data_443 data ; }; struct edid { u8 header[8U] ; u8 mfg_id[2U] ; u8 prod_code[2U] ; u32 serial ; u8 mfg_week ; u8 mfg_year ; u8 version ; u8 revision ; u8 input ; u8 width_cm ; u8 height_cm ; u8 gamma ; u8 features ; u8 red_green_lo ; u8 black_white_lo ; u8 red_x ; u8 red_y ; u8 green_x ; u8 green_y ; u8 blue_x ; u8 blue_y ; u8 white_x ; u8 white_y ; struct est_timings established_timings ; struct std_timing standard_timings[8U] ; struct detailed_timing detailed_timings[4U] ; u8 extensions ; u8 checksum ; }; struct __anonstruct_hdmi_audio_clock_444 { int clock ; u32 config ; }; enum hrtimer_restart; struct dmi_strmatch { unsigned char slot : 7 ; unsigned char exact_match : 1 ; char substr[79U] ; }; struct dmi_system_id { int (*callback)(struct dmi_system_id const * ) ; char const *ident ; struct dmi_strmatch matches[4U] ; void *driver_data ; }; enum i2c_slave_event; enum i2c_slave_event; struct vbt_header { u8 signature[20U] ; u16 version ; u16 header_size ; u16 vbt_size ; u8 vbt_checksum ; u8 reserved0 ; u32 bdb_offset ; u32 aim_offset[4U] ; }; struct bdb_header { u8 signature[16U] ; u16 version ; u16 header_size ; u16 bdb_size ; }; struct bdb_general_features { unsigned char panel_fitting : 2 ; unsigned char flexaim : 1 ; unsigned char msg_enable : 1 ; unsigned char clear_screen : 3 ; unsigned char color_flip : 1 ; unsigned char download_ext_vbt : 1 ; unsigned char enable_ssc : 1 ; unsigned char ssc_freq : 1 ; unsigned char enable_lfp_on_override : 1 ; unsigned char disable_ssc_ddt : 1 ; unsigned char rsvd7 : 1 ; unsigned char display_clock_mode : 1 ; unsigned char rsvd8 : 1 ; unsigned char disable_smooth_vision : 1 ; unsigned char single_dvi : 1 ; unsigned char rsvd9 : 1 ; unsigned char fdi_rx_polarity_inverted : 1 ; unsigned char rsvd10 : 4 ; u8 legacy_monitor_detect ; unsigned char int_crt_support : 1 ; unsigned char int_tv_support : 1 ; unsigned char int_efp_support : 1 ; unsigned char dp_ssc_enb : 1 ; unsigned char dp_ssc_freq : 1 ; unsigned char rsvd11 : 3 ; }; struct bdb_general_definitions { u8 crt_ddc_gmbus_pin ; unsigned char dpms_acpi : 1 ; unsigned char skip_boot_crt_detect : 1 ; unsigned char dpms_aim : 1 ; unsigned char rsvd1 : 5 ; u8 boot_display[2U] ; u8 child_dev_size ; uint8_t devices[0U] ; }; struct bdb_lvds_options { u8 panel_type ; u8 rsvd1 ; unsigned char pfit_mode : 2 ; unsigned char pfit_text_mode_enhanced : 1 ; unsigned char pfit_gfx_mode_enhanced : 1 ; unsigned char pfit_ratio_auto : 1 ; unsigned char pixel_dither : 1 ; unsigned char lvds_edid : 1 ; unsigned char rsvd2 : 1 ; u8 rsvd4 ; u32 lvds_panel_channel_bits ; u16 ssc_bits ; u16 ssc_freq ; u16 ssc_ddt ; u16 panel_color_depth ; u32 dps_panel_type_bits ; u32 blt_control_type_bits ; }; struct bdb_lvds_lfp_data_ptr { u16 fp_timing_offset ; u8 fp_table_size ; u16 dvo_timing_offset ; u8 dvo_table_size ; u16 panel_pnp_id_offset ; u8 pnp_table_size ; }; struct bdb_lvds_lfp_data_ptrs { u8 lvds_entries ; struct bdb_lvds_lfp_data_ptr ptr[16U] ; }; struct lvds_fp_timing { u16 x_res ; u16 y_res ; u32 lvds_reg ; u32 lvds_reg_val ; u32 pp_on_reg ; u32 pp_on_reg_val ; u32 pp_off_reg ; u32 pp_off_reg_val ; u32 pp_cycle_reg ; u32 pp_cycle_reg_val ; u32 pfit_reg ; u32 pfit_reg_val ; u16 terminator ; }; struct lvds_dvo_timing { u16 clock ; u8 hactive_lo ; u8 hblank_lo ; unsigned char hblank_hi : 4 ; unsigned char hactive_hi : 4 ; u8 vactive_lo ; u8 vblank_lo ; unsigned char vblank_hi : 4 ; unsigned char vactive_hi : 4 ; u8 hsync_off_lo ; u8 hsync_pulse_width ; unsigned char vsync_pulse_width : 4 ; unsigned char vsync_off : 4 ; unsigned char rsvd0 : 6 ; unsigned char hsync_off_hi : 2 ; u8 h_image ; u8 v_image ; u8 max_hv ; u8 h_border ; u8 v_border ; unsigned char rsvd1 : 3 ; unsigned char digital : 2 ; unsigned char vsync_positive : 1 ; unsigned char hsync_positive : 1 ; unsigned char rsvd2 : 1 ; }; struct lvds_pnp_id { u16 mfg_name ; u16 product_code ; u32 serial ; u8 mfg_week ; u8 mfg_year ; }; struct bdb_lvds_lfp_data_entry { struct lvds_fp_timing fp_timing ; struct lvds_dvo_timing dvo_timing ; struct lvds_pnp_id pnp_id ; }; struct bdb_lvds_lfp_data { struct bdb_lvds_lfp_data_entry data[16U] ; }; struct bdb_lfp_backlight_data_entry { unsigned char type : 2 ; unsigned char active_low_pwm : 1 ; unsigned char obsolete1 : 5 ; u16 pwm_freq_hz ; u8 min_brightness ; u8 obsolete2 ; u8 obsolete3 ; }; struct bdb_lfp_backlight_data { u8 entry_size ; struct bdb_lfp_backlight_data_entry data[16U] ; u8 level[16U] ; }; struct bdb_sdvo_lvds_options { u8 panel_backlight ; u8 h40_set_panel_type ; u8 panel_type ; u8 ssc_clk_freq ; u16 als_low_trip ; u16 als_high_trip ; u8 sclalarcoeff_tab_row_num ; u8 sclalarcoeff_tab_row_size ; u8 coefficient[8U] ; u8 panel_misc_bits_1 ; u8 panel_misc_bits_2 ; u8 panel_misc_bits_3 ; u8 panel_misc_bits_4 ; }; struct bdb_driver_features { unsigned char boot_dev_algorithm : 1 ; unsigned char block_display_switch : 1 ; unsigned char allow_display_switch : 1 ; unsigned char hotplug_dvo : 1 ; unsigned char dual_view_zoom : 1 ; unsigned char int15h_hook : 1 ; unsigned char sprite_in_clone : 1 ; unsigned char primary_lfp_id : 1 ; u16 boot_mode_x ; u16 boot_mode_y ; u8 boot_mode_bpp ; u8 boot_mode_refresh ; unsigned char enable_lfp_primary : 1 ; unsigned char selective_mode_pruning : 1 ; unsigned char dual_frequency : 1 ; unsigned char render_clock_freq : 1 ; unsigned char nt_clone_support : 1 ; unsigned char power_scheme_ui : 1 ; unsigned char sprite_display_assign : 1 ; unsigned char cui_aspect_scaling : 1 ; unsigned char preserve_aspect_ratio : 1 ; unsigned char sdvo_device_power_down : 1 ; unsigned char crt_hotplug : 1 ; unsigned char lvds_config : 2 ; unsigned char tv_hotplug : 1 ; unsigned char hdmi_config : 2 ; unsigned char static_display : 1 ; unsigned char reserved2 : 7 ; u16 legacy_crt_max_x ; u16 legacy_crt_max_y ; u8 legacy_crt_max_refresh ; u8 hdmi_termination ; u8 custom_vbt_version ; unsigned char rmpm_enabled : 1 ; unsigned char s2ddt_enabled : 1 ; unsigned char dpst_enabled : 1 ; unsigned char bltclt_enabled : 1 ; unsigned char adb_enabled : 1 ; unsigned char drrs_enabled : 1 ; unsigned char grs_enabled : 1 ; unsigned char gpmt_enabled : 1 ; unsigned char tbt_enabled : 1 ; unsigned char psr_enabled : 1 ; unsigned char ips_enabled : 1 ; unsigned char reserved3 : 4 ; unsigned char pc_feature_valid : 1 ; }; struct edp_link_params { unsigned char rate : 4 ; unsigned char lanes : 4 ; unsigned char preemphasis : 4 ; unsigned char vswing : 4 ; }; struct bdb_edp { struct edp_power_seq power_seqs[16U] ; u32 color_depth ; struct edp_link_params link_params[16U] ; u32 sdrrs_msa_timing_delay ; u16 edp_s3d_feature ; u16 edp_t3_optimization ; u64 edp_vswing_preemph ; }; struct psr_table { unsigned char full_link : 1 ; unsigned char require_aux_to_wakeup : 1 ; unsigned char feature_bits_rsvd : 6 ; unsigned char idle_frames : 4 ; unsigned char lines_to_wait : 3 ; unsigned char wait_times_rsvd : 1 ; u16 tp1_wakeup_time ; u16 tp2_tp3_wakeup_time ; }; struct bdb_psr { struct psr_table psr_table[16U] ; }; struct bdb_mipi_config { struct mipi_config config[6U] ; struct mipi_pps_data pps[6U] ; }; struct bdb_mipi_sequence { u8 version ; u8 data[0U] ; }; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct drm_i915_get_pipe_from_crtc_id { __u32 crtc_id ; __u32 pipe ; }; enum i915_cache_level; enum i915_cache_level; enum dpio_channel { DPIO_CH0 = 0, DPIO_CH1 = 1 } ; enum intel_sbi_destination { SBI_ICLK = 0, SBI_MPHY = 1 } ; union __anonunion___u_405 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_407___0 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_409___0 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_411 { struct tracepoint_func *__val ; char __c[1U] ; }; enum mode_set_atomic { LEAVE_ATOMIC_MODE_SET = 0, ENTER_ATOMIC_MODE_SET = 1 } ; struct drm_crtc_helper_funcs { void (*dpms)(struct drm_crtc * , int ) ; void (*prepare)(struct drm_crtc * ) ; void (*commit)(struct drm_crtc * ) ; bool (*mode_fixup)(struct drm_crtc * , struct drm_display_mode const * , struct drm_display_mode * ) ; int (*mode_set)(struct drm_crtc * , struct drm_display_mode * , struct drm_display_mode * , int , int , struct drm_framebuffer * ) ; void (*mode_set_nofb)(struct drm_crtc * ) ; int (*mode_set_base)(struct drm_crtc * , int , int , struct drm_framebuffer * ) ; int (*mode_set_base_atomic)(struct drm_crtc * , struct drm_framebuffer * , int , int , enum mode_set_atomic ) ; void (*load_lut)(struct drm_crtc * ) ; void (*disable)(struct drm_crtc * ) ; void (*enable)(struct drm_crtc * ) ; int (*atomic_check)(struct drm_crtc * , struct drm_crtc_state * ) ; void (*atomic_begin)(struct drm_crtc * ) ; void (*atomic_flush)(struct drm_crtc * ) ; }; typedef struct dpll intel_clock_t; struct intel_mmio_flip { struct work_struct work ; struct drm_i915_private *i915 ; struct drm_i915_gem_request *req ; struct intel_crtc *crtc ; }; enum link_m_n_set { M1_N1 = 0, M2_N2 = 1 } ; struct intel_load_detect_pipe { struct drm_framebuffer *release_fb ; bool load_detect_temp ; int dpms_mode ; }; struct drm_plane_helper_funcs; struct drm_plane_helper_funcs { int (*prepare_fb)(struct drm_plane * , struct drm_framebuffer * , struct drm_plane_state const * ) ; void (*cleanup_fb)(struct drm_plane * , struct drm_framebuffer * , struct drm_plane_state const * ) ; int (*atomic_check)(struct drm_plane * , struct drm_plane_state * ) ; void (*atomic_update)(struct drm_plane * , struct drm_plane_state * ) ; void (*atomic_disable)(struct drm_plane * , struct drm_plane_state * ) ; }; struct __anonstruct_intel_range_t_445 { int min ; int max ; }; typedef struct __anonstruct_intel_range_t_445 intel_range_t; struct __anonstruct_intel_p2_t_446 { int dot_limit ; int p2_slow ; int p2_fast ; }; typedef struct __anonstruct_intel_p2_t_446 intel_p2_t; typedef struct intel_limit intel_limit_t; struct intel_limit { intel_range_t dot ; intel_range_t vco ; intel_range_t n ; intel_range_t m ; intel_range_t m1 ; intel_range_t m2 ; intel_range_t p ; intel_range_t p1 ; intel_p2_t p2 ; }; struct skl_cdclk_entry { unsigned int freq ; unsigned int vco ; }; struct intel_quirk { int device ; int subsystem_vendor ; int subsystem_device ; void (*hook)(struct drm_device * ) ; }; struct intel_dmi_quirk { void (*hook)(struct drm_device * ) ; struct dmi_system_id const (*dmi_id_list)[] ; }; struct intel_cursor_error_state { u32 control ; u32 position ; u32 base ; u32 size ; }; struct intel_pipe_error_state { bool power_domain_on ; u32 source ; u32 stat ; }; struct intel_plane_error_state { u32 control ; u32 stride ; u32 size ; u32 pos ; u32 addr ; u32 surface ; u32 tile_offset ; }; struct intel_transcoder_error_state { bool power_domain_on ; enum transcoder cpu_transcoder ; u32 conf ; u32 htotal ; u32 hblank ; u32 hsync ; u32 vtotal ; u32 vblank ; u32 vsync ; }; struct intel_display_error_state { u32 power_well_driver ; int num_transcoders ; struct intel_cursor_error_state cursor[3U] ; struct intel_pipe_error_state pipe[3U] ; struct intel_plane_error_state plane[3U] ; struct intel_transcoder_error_state transcoder[4U] ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct drm_prop_enum_list { int type ; char *name ; }; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct drm_intel_overlay_put_image { __u32 flags ; __u32 bo_handle ; __u16 stride_Y ; __u16 stride_UV ; __u32 offset_Y ; __u32 offset_U ; __u32 offset_V ; __u16 src_width ; __u16 src_height ; __u16 src_scan_width ; __u16 src_scan_height ; __u32 crtc_id ; __u16 dst_x ; __u16 dst_y ; __u16 dst_width ; __u16 dst_height ; }; struct drm_intel_overlay_attrs { __u32 flags ; __u32 color_key ; __s32 brightness ; __u32 contrast ; __u32 saturation ; __u32 gamma0 ; __u32 gamma1 ; __u32 gamma2 ; __u32 gamma3 ; __u32 gamma4 ; __u32 gamma5 ; }; enum i915_cache_level; enum i915_cache_level; struct overlay_registers { u32 OBUF_0Y ; u32 OBUF_1Y ; u32 OBUF_0U ; u32 OBUF_0V ; u32 OBUF_1U ; u32 OBUF_1V ; u32 OSTRIDE ; u32 YRGB_VPH ; u32 UV_VPH ; u32 HORZ_PH ; u32 INIT_PHS ; u32 DWINPOS ; u32 DWINSZ ; u32 SWIDTH ; u32 SWIDTHSW ; u32 SHEIGHT ; u32 YRGBSCALE ; u32 UVSCALE ; u32 OCLRC0 ; u32 OCLRC1 ; u32 DCLRKV ; u32 DCLRKM ; u32 SCLRKVH ; u32 SCLRKVL ; u32 SCLRKEN ; u32 OCONFIG ; u32 OCMD ; u32 RESERVED1 ; u32 OSTART_0Y ; u32 OSTART_1Y ; u32 OSTART_0U ; u32 OSTART_0V ; u32 OSTART_1U ; u32 OSTART_1V ; u32 OTILEOFF_0Y ; u32 OTILEOFF_1Y ; u32 OTILEOFF_0U ; u32 OTILEOFF_0V ; u32 OTILEOFF_1U ; u32 OTILEOFF_1V ; u32 FASTHSCALE ; u32 UVSCALEV ; u32 RESERVEDC[86U] ; u16 Y_VCOEFS[51U] ; u16 RESERVEDD[77U] ; u16 Y_HCOEFS[85U] ; u16 RESERVEDE[171U] ; u16 UV_VCOEFS[51U] ; u16 RESERVEDF[77U] ; u16 UV_HCOEFS[51U] ; u16 RESERVEDG[77U] ; }; struct intel_overlay { struct drm_device *dev ; struct intel_crtc *crtc ; struct drm_i915_gem_object *vid_bo ; struct drm_i915_gem_object *old_vid_bo ; bool active ; bool pfit_active ; u32 pfit_vscale_ratio ; unsigned int color_key : 24 ; unsigned char color_key_enabled : 1 ; u32 brightness ; u32 contrast ; u32 saturation ; u32 old_xscale ; u32 old_yscale ; u32 flip_addr ; struct drm_i915_gem_object *reg_bo ; struct drm_i915_gem_request *last_flip_req ; void (*flip_tail)(struct intel_overlay * ) ; }; struct put_image_params { int format ; short dst_x ; short dst_y ; short dst_w ; short dst_h ; short src_w ; short src_scan_h ; short src_scan_w ; short src_h ; short stride_Y ; short stride_UV ; int offset_Y ; int offset_U ; int offset_V ; }; struct intel_overlay_error_state { struct overlay_registers regs ; unsigned long base ; u32 dovsta ; u32 isr ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct edp_sdp_header { u8 HB0 ; u8 HB1 ; u8 HB2 ; u8 HB3 ; }; struct edp_vsc_psr { struct edp_sdp_header sdp_header ; u8 DB0 ; u8 DB1 ; u8 DB2 ; u8 DB3 ; u8 DB4 ; u8 DB5 ; u8 DB6 ; u8 DB7 ; u8 DB8_31[24U] ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; union __anonunion___u_291 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_293 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_295 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_297 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_299 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_301 { struct tracepoint_func *__val ; char __c[1U] ; }; enum hrtimer_restart; struct acpi_device; typedef u64 acpi_size; typedef u64 acpi_io_address; typedef u32 acpi_status; typedef void *acpi_handle; typedef u32 acpi_object_type; struct __anonstruct_integer_222 { acpi_object_type type ; u64 value ; }; struct __anonstruct_string_223 { acpi_object_type type ; u32 length ; char *pointer ; }; struct __anonstruct_buffer_224 { acpi_object_type type ; u32 length ; u8 *pointer ; }; struct __anonstruct_package_225 { acpi_object_type type ; u32 count ; union acpi_object *elements ; }; struct __anonstruct_reference_226 { acpi_object_type type ; acpi_object_type actual_type ; acpi_handle handle ; }; struct __anonstruct_processor_227 { acpi_object_type type ; u32 proc_id ; acpi_io_address pblk_address ; u32 pblk_length ; }; struct __anonstruct_power_resource_228 { acpi_object_type type ; u32 system_level ; u32 resource_order ; }; union acpi_object { acpi_object_type type ; struct __anonstruct_integer_222 integer ; struct __anonstruct_string_223 string ; struct __anonstruct_buffer_224 buffer ; struct __anonstruct_package_225 package ; struct __anonstruct_reference_226 reference ; struct __anonstruct_processor_227 processor ; struct __anonstruct_power_resource_228 power_resource ; }; struct acpi_buffer { acpi_size length ; void *pointer ; }; struct acpi_driver; struct acpi_hotplug_profile { struct kobject kobj ; int (*scan_dependent)(struct acpi_device * ) ; void (*notify_online)(struct acpi_device * ) ; bool enabled ; bool demand_offline ; }; struct acpi_scan_handler { struct acpi_device_id const *ids ; struct list_head list_node ; bool (*match)(char * , struct acpi_device_id const ** ) ; int (*attach)(struct acpi_device * , struct acpi_device_id const * ) ; void (*detach)(struct acpi_device * ) ; void (*bind)(struct device * ) ; void (*unbind)(struct device * ) ; struct acpi_hotplug_profile hotplug ; }; struct acpi_hotplug_context { struct acpi_device *self ; int (*notify)(struct acpi_device * , u32 ) ; void (*uevent)(struct acpi_device * , u32 ) ; void (*fixup)(struct acpi_device * ) ; }; struct acpi_device_ops { int (*add)(struct acpi_device * ) ; int (*remove)(struct acpi_device * ) ; void (*notify)(struct acpi_device * , u32 ) ; }; struct acpi_driver { char name[80U] ; char class[80U] ; struct acpi_device_id const *ids ; unsigned int flags ; struct acpi_device_ops ops ; struct device_driver drv ; struct module *owner ; }; struct acpi_device_status { unsigned char present : 1 ; unsigned char enabled : 1 ; unsigned char show_in_ui : 1 ; unsigned char functional : 1 ; unsigned char battery_present : 1 ; unsigned int reserved : 27 ; }; struct acpi_device_flags { unsigned char dynamic_status : 1 ; unsigned char removable : 1 ; unsigned char ejectable : 1 ; unsigned char power_manageable : 1 ; unsigned char match_driver : 1 ; unsigned char initialized : 1 ; unsigned char visited : 1 ; unsigned char hotplug_notify : 1 ; unsigned char is_dock_station : 1 ; unsigned char of_compatible_ok : 1 ; unsigned char coherent_dma : 1 ; unsigned char cca_seen : 1 ; unsigned int reserved : 20 ; }; struct acpi_device_dir { struct proc_dir_entry *entry ; }; typedef char acpi_bus_id[8U]; typedef unsigned long acpi_bus_address; typedef char acpi_device_name[40U]; typedef char acpi_device_class[20U]; struct acpi_pnp_type { unsigned char hardware_id : 1 ; unsigned char bus_address : 1 ; unsigned char platform_id : 1 ; unsigned int reserved : 29 ; }; struct acpi_device_pnp { acpi_bus_id bus_id ; struct acpi_pnp_type type ; acpi_bus_address bus_address ; char *unique_id ; struct list_head ids ; acpi_device_name device_name ; acpi_device_class device_class ; union acpi_object *str_obj ; }; struct acpi_device_power_flags { unsigned char explicit_get : 1 ; unsigned char power_resources : 1 ; unsigned char inrush_current : 1 ; unsigned char power_removed : 1 ; unsigned char ignore_parent : 1 ; unsigned char dsw_present : 1 ; unsigned int reserved : 26 ; }; struct __anonstruct_flags_253 { unsigned char valid : 1 ; unsigned char explicit_set : 1 ; unsigned char reserved : 6 ; }; struct acpi_device_power_state { struct __anonstruct_flags_253 flags ; int power ; int latency ; struct list_head resources ; }; struct acpi_device_power { int state ; struct acpi_device_power_flags flags ; struct acpi_device_power_state states[5U] ; }; struct acpi_device_perf_flags { u8 reserved ; }; struct __anonstruct_flags_254 { unsigned char valid : 1 ; unsigned char reserved : 7 ; }; struct acpi_device_perf_state { struct __anonstruct_flags_254 flags ; u8 power ; u8 performance ; int latency ; }; struct acpi_device_perf { int state ; struct acpi_device_perf_flags flags ; int state_count ; struct acpi_device_perf_state *states ; }; struct acpi_device_wakeup_flags { unsigned char valid : 1 ; unsigned char run_wake : 1 ; unsigned char notifier_present : 1 ; unsigned char enabled : 1 ; }; struct acpi_device_wakeup_context { struct work_struct work ; struct device *dev ; }; struct acpi_device_wakeup { acpi_handle gpe_device ; u64 gpe_number ; u64 sleep_state ; struct list_head resources ; struct acpi_device_wakeup_flags flags ; struct acpi_device_wakeup_context context ; struct wakeup_source *ws ; int prepare_count ; }; struct acpi_device_data { union acpi_object const *pointer ; union acpi_object const *properties ; union acpi_object const *of_compatible ; }; struct acpi_gpio_mapping; struct acpi_device { int device_type ; acpi_handle handle ; struct fwnode_handle fwnode ; struct acpi_device *parent ; struct list_head children ; struct list_head node ; struct list_head wakeup_list ; struct list_head del_list ; struct acpi_device_status status ; struct acpi_device_flags flags ; struct acpi_device_pnp pnp ; struct acpi_device_power power ; struct acpi_device_wakeup wakeup ; struct acpi_device_perf performance ; struct acpi_device_dir dir ; struct acpi_device_data data ; struct acpi_scan_handler *handler ; struct acpi_hotplug_context *hp ; struct acpi_driver *driver ; struct acpi_gpio_mapping const *driver_gpios ; void *driver_data ; struct device dev ; unsigned int physical_node_count ; unsigned int dep_unmet ; struct list_head physical_node_list ; struct mutex physical_node_lock ; void (*remove)(struct acpi_device * ) ; }; struct acpi_gpio_params { unsigned int crs_entry_index ; unsigned int line_index ; bool active_low ; }; struct acpi_gpio_mapping { char const *name ; struct acpi_gpio_params const *data ; unsigned int size ; }; struct intel_dsm_priv { acpi_handle dhandle ; }; struct _ddebug { char const *modname ; char const *function ; char const *filename ; char const *format ; unsigned int lineno : 18 ; unsigned char flags ; }; enum hrtimer_restart; typedef u64 acpi_physical_address; typedef char *acpi_string; struct acpi_object_list { u32 count ; union acpi_object *pointer ; }; struct acpi_bus_event { struct list_head node ; acpi_device_class device_class ; acpi_bus_id bus_id ; u32 type ; u32 data ; }; enum acpi_backlight_type { acpi_backlight_undef = -1, acpi_backlight_none = 0, acpi_backlight_video = 1, acpi_backlight_vendor = 2, acpi_backlight_native = 3 } ; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct opregion_header { u8 signature[16U] ; u32 size ; u32 opregion_ver ; u8 bios_ver[32U] ; u8 vbios_ver[16U] ; u8 driver_ver[16U] ; u32 mboxes ; u8 reserved[164U] ; }; struct opregion_acpi { u32 drdy ; u32 csts ; u32 cevt ; u8 rsvd1[20U] ; u32 didl[8U] ; u32 cpdl[8U] ; u32 cadl[8U] ; u32 nadl[8U] ; u32 aslp ; u32 tidx ; u32 chpd ; u32 clid ; u32 cdck ; u32 sxsw ; u32 evts ; u32 cnot ; u32 nrdy ; u8 rsvd2[60U] ; }; struct opregion_swsci { u32 scic ; u32 parm ; u32 dslp ; u8 rsvd[244U] ; }; struct opregion_asle { u32 ardy ; u32 aslc ; u32 tche ; u32 alsi ; u32 bclp ; u32 pfit ; u32 cblv ; u16 bclm[20U] ; u32 cpfm ; u32 epfm ; u8 plut[74U] ; u32 pfmb ; u32 cddv ; u32 pcft ; u32 srot ; u32 iuer ; u8 rsvd[86U] ; }; struct __anonstruct_power_state_map_448 { pci_power_t pci_power_state ; u32 parm ; }; enum hrtimer_restart; struct llist_head { struct llist_node *first ; }; typedef u64 async_cookie_t; struct tty_driver; typedef unsigned char cc_t; typedef unsigned int speed_t; typedef unsigned int tcflag_t; struct ktermios { tcflag_t c_iflag ; tcflag_t c_oflag ; tcflag_t c_cflag ; tcflag_t c_lflag ; cc_t c_line ; cc_t c_cc[19U] ; speed_t c_ispeed ; speed_t c_ospeed ; }; struct winsize { unsigned short ws_row ; unsigned short ws_col ; unsigned short ws_xpixel ; unsigned short ws_ypixel ; }; struct termiox { __u16 x_hflag ; __u16 x_cflag ; __u16 x_rflag[5U] ; __u16 x_sflag ; }; struct serial_icounter_struct; struct tty_operations { struct tty_struct *(*lookup)(struct tty_driver * , struct inode * , int ) ; int (*install)(struct tty_driver * , struct tty_struct * ) ; void (*remove)(struct tty_driver * , struct tty_struct * ) ; int (*open)(struct tty_struct * , struct file * ) ; void (*close)(struct tty_struct * , struct file * ) ; void (*shutdown)(struct tty_struct * ) ; void (*cleanup)(struct tty_struct * ) ; int (*write)(struct tty_struct * , unsigned char const * , int ) ; int (*put_char)(struct tty_struct * , unsigned char ) ; void (*flush_chars)(struct tty_struct * ) ; int (*write_room)(struct tty_struct * ) ; int (*chars_in_buffer)(struct tty_struct * ) ; int (*ioctl)(struct tty_struct * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct tty_struct * , unsigned int , unsigned long ) ; void (*set_termios)(struct tty_struct * , struct ktermios * ) ; void (*throttle)(struct tty_struct * ) ; void (*unthrottle)(struct tty_struct * ) ; void (*stop)(struct tty_struct * ) ; void (*start)(struct tty_struct * ) ; void (*hangup)(struct tty_struct * ) ; int (*break_ctl)(struct tty_struct * , int ) ; void (*flush_buffer)(struct tty_struct * ) ; void (*set_ldisc)(struct tty_struct * ) ; void (*wait_until_sent)(struct tty_struct * , int ) ; void (*send_xchar)(struct tty_struct * , char ) ; int (*tiocmget)(struct tty_struct * ) ; int (*tiocmset)(struct tty_struct * , unsigned int , unsigned int ) ; int (*resize)(struct tty_struct * , struct winsize * ) ; int (*set_termiox)(struct tty_struct * , struct termiox * ) ; int (*get_icount)(struct tty_struct * , struct serial_icounter_struct * ) ; int (*poll_init)(struct tty_driver * , int , char * ) ; int (*poll_get_char)(struct tty_driver * , int ) ; void (*poll_put_char)(struct tty_driver * , int , char ) ; struct file_operations const *proc_fops ; }; struct tty_port; struct tty_driver { int magic ; struct kref kref ; struct cdev *cdevs ; struct module *owner ; char const *driver_name ; char const *name ; int name_base ; int major ; int minor_start ; unsigned int num ; short type ; short subtype ; struct ktermios init_termios ; unsigned long flags ; struct proc_dir_entry *proc_entry ; struct tty_driver *other ; struct tty_struct **ttys ; struct tty_port **ports ; struct ktermios **termios ; void *driver_state ; struct tty_operations const *ops ; struct list_head tty_drivers ; }; struct ld_semaphore { long count ; raw_spinlock_t wait_lock ; unsigned int wait_readers ; struct list_head read_wait ; struct list_head write_wait ; struct lockdep_map dep_map ; }; struct tty_ldisc_ops { int magic ; char *name ; int num ; int flags ; int (*open)(struct tty_struct * ) ; void (*close)(struct tty_struct * ) ; void (*flush_buffer)(struct tty_struct * ) ; ssize_t (*chars_in_buffer)(struct tty_struct * ) ; ssize_t (*read)(struct tty_struct * , struct file * , unsigned char * , size_t ) ; ssize_t (*write)(struct tty_struct * , struct file * , unsigned char const * , size_t ) ; int (*ioctl)(struct tty_struct * , struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct tty_struct * , struct file * , unsigned int , unsigned long ) ; void (*set_termios)(struct tty_struct * , struct ktermios * ) ; unsigned int (*poll)(struct tty_struct * , struct file * , struct poll_table_struct * ) ; int (*hangup)(struct tty_struct * ) ; void (*receive_buf)(struct tty_struct * , unsigned char const * , char * , int ) ; void (*write_wakeup)(struct tty_struct * ) ; void (*dcd_change)(struct tty_struct * , unsigned int ) ; void (*fasync)(struct tty_struct * , int ) ; int (*receive_buf2)(struct tty_struct * , unsigned char const * , char * , int ) ; struct module *owner ; int refcount ; }; struct tty_ldisc { struct tty_ldisc_ops *ops ; struct tty_struct *tty ; }; union __anonunion____missing_field_name_241 { struct tty_buffer *next ; struct llist_node free ; }; struct tty_buffer { union __anonunion____missing_field_name_241 __annonCompField69 ; int used ; int size ; int commit ; int read ; int flags ; unsigned long data[0U] ; }; struct tty_bufhead { struct tty_buffer *head ; struct work_struct work ; struct mutex lock ; atomic_t priority ; struct tty_buffer sentinel ; struct llist_head free ; atomic_t mem_used ; int mem_limit ; struct tty_buffer *tail ; }; struct tty_port_operations { int (*carrier_raised)(struct tty_port * ) ; void (*dtr_rts)(struct tty_port * , int ) ; void (*shutdown)(struct tty_port * ) ; int (*activate)(struct tty_port * , struct tty_struct * ) ; void (*destruct)(struct tty_port * ) ; }; struct tty_port { struct tty_bufhead buf ; struct tty_struct *tty ; struct tty_struct *itty ; struct tty_port_operations const *ops ; spinlock_t lock ; int blocked_open ; int count ; wait_queue_head_t open_wait ; wait_queue_head_t close_wait ; wait_queue_head_t delta_msr_wait ; unsigned long flags ; unsigned char console : 1 ; unsigned char low_latency : 1 ; struct mutex mutex ; struct mutex buf_mutex ; unsigned char *xmit_buf ; unsigned int close_delay ; unsigned int closing_wait ; int drain_delay ; struct kref kref ; }; struct tty_struct { int magic ; struct kref kref ; struct device *dev ; struct tty_driver *driver ; struct tty_operations const *ops ; int index ; struct ld_semaphore ldisc_sem ; struct tty_ldisc *ldisc ; struct mutex atomic_write_lock ; struct mutex legacy_mutex ; struct mutex throttle_mutex ; struct rw_semaphore termios_rwsem ; struct mutex winsize_mutex ; spinlock_t ctrl_lock ; spinlock_t flow_lock ; struct ktermios termios ; struct ktermios termios_locked ; struct termiox *termiox ; char name[64U] ; struct pid *pgrp ; struct pid *session ; unsigned long flags ; int count ; struct winsize winsize ; unsigned char stopped : 1 ; unsigned char flow_stopped : 1 ; unsigned long unused : 62 ; int hw_stopped ; unsigned char ctrl_status ; unsigned char packet : 1 ; unsigned long unused_ctrl : 55 ; unsigned int receive_room ; int flow_change ; struct tty_struct *link ; struct fasync_struct *fasync ; int alt_speed ; wait_queue_head_t write_wait ; wait_queue_head_t read_wait ; struct work_struct hangup_work ; void *disc_data ; void *driver_data ; struct list_head tty_files ; int closing ; unsigned char *write_buf ; int write_cnt ; struct work_struct SAK_work ; struct tty_port *port ; }; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct intel_dvo_dev_ops; struct intel_dvo_device { char const *name ; int type ; u32 dvo_reg ; u32 gpio ; int slave_addr ; struct intel_dvo_dev_ops const *dev_ops ; void *dev_priv ; struct i2c_adapter *i2c_bus ; }; struct intel_dvo_dev_ops { bool (*init)(struct intel_dvo_device * , struct i2c_adapter * ) ; void (*create_resources)(struct intel_dvo_device * ) ; void (*dpms)(struct intel_dvo_device * , bool ) ; int (*mode_valid)(struct intel_dvo_device * , struct drm_display_mode * ) ; void (*prepare)(struct intel_dvo_device * ) ; void (*commit)(struct intel_dvo_device * ) ; void (*mode_set)(struct intel_dvo_device * , struct drm_display_mode * , struct drm_display_mode * ) ; enum drm_connector_status (*detect)(struct intel_dvo_device * ) ; bool (*get_hw_state)(struct intel_dvo_device * ) ; struct drm_display_mode *(*get_modes)(struct intel_dvo_device * ) ; void (*destroy)(struct intel_dvo_device * ) ; void (*dump_regs)(struct intel_dvo_device * ) ; }; struct ch7017_priv { uint8_t dummy ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct ch7xxx_id_struct { uint8_t vid ; char *name ; }; struct ch7xxx_did_struct { uint8_t did ; char *name ; }; struct ch7xxx_priv { bool quiet ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct ivch_priv { bool quiet ; uint16_t width ; uint16_t height ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct ns2501_reg { uint8_t offset ; uint8_t value ; }; struct ns2501_configuration { uint8_t sync ; uint8_t conf ; uint8_t syncb ; uint8_t dither ; uint8_t pll_a ; uint16_t pll_b ; uint16_t hstart ; uint16_t hstop ; uint16_t vstart ; uint16_t vstop ; uint16_t vsync ; uint16_t vtotal ; uint16_t hpos ; uint16_t vpos ; uint16_t voffs ; uint16_t hscale ; uint16_t vscale ; }; struct ns2501_priv { bool quiet ; struct ns2501_configuration const *conf ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct sil164_priv { bool quiet ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct tfp410_priv { bool quiet ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct drm_connector_helper_funcs { int (*get_modes)(struct drm_connector * ) ; enum drm_mode_status (*mode_valid)(struct drm_connector * , struct drm_display_mode * ) ; struct drm_encoder *(*best_encoder)(struct drm_connector * ) ; }; enum i915_cache_level; enum i915_cache_level; struct intel_crt { struct intel_encoder base ; struct intel_connector *connector ; bool force_hotplug_required ; u32 adpa_reg ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct ddi_buf_trans { u32 trans1 ; u32 trans2 ; }; struct bxt_ddi_buf_trans { u32 margin ; u32 scale ; u32 enable ; u32 deemphasis ; bool default_index ; }; struct wrpll_rnp { unsigned int p ; unsigned int n2 ; unsigned int r2 ; }; struct skl_wrpll_params { uint32_t dco_fraction ; uint32_t dco_integer ; uint32_t qdiv_ratio ; uint32_t qdiv_mode ; uint32_t kdiv ; uint32_t pdiv ; uint32_t central_freq ; }; struct bxt_clk_div { uint32_t p1 ; uint32_t p2 ; uint32_t m2_int ; uint32_t m2_frac ; bool m2_frac_en ; uint32_t n ; }; struct skl_dpll_regs { u32 ctl ; u32 cfgcr1 ; u32 cfgcr2 ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct dp_link_dpll { int link_bw ; struct dpll dpll ; }; typedef bool ldv_func_ret_type___11; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct display_timing; struct drm_panel_funcs { int (*disable)(struct drm_panel * ) ; int (*unprepare)(struct drm_panel * ) ; int (*prepare)(struct drm_panel * ) ; int (*enable)(struct drm_panel * ) ; int (*get_modes)(struct drm_panel * ) ; int (*get_timings)(struct drm_panel * , unsigned int , struct display_timing * ) ; }; struct drm_panel { struct drm_device *drm ; struct drm_connector *connector ; struct device *dev ; struct drm_panel_funcs const *funcs ; struct list_head list ; }; struct mipi_dsi_msg { u8 channel ; u8 type ; u16 flags ; size_t tx_len ; void const *tx_buf ; size_t rx_len ; void *rx_buf ; }; struct mipi_dsi_packet { size_t size ; u8 header[4U] ; size_t payload_length ; u8 const *payload ; }; struct mipi_dsi_host_ops { int (*attach)(struct mipi_dsi_host * , struct mipi_dsi_device * ) ; int (*detach)(struct mipi_dsi_host * , struct mipi_dsi_device * ) ; ssize_t (*transfer)(struct mipi_dsi_host * , struct mipi_dsi_msg const * ) ; }; struct mipi_dsi_host { struct device *dev ; struct mipi_dsi_host_ops const *ops ; }; enum mipi_dsi_pixel_format { MIPI_DSI_FMT_RGB888 = 0, MIPI_DSI_FMT_RGB666 = 1, MIPI_DSI_FMT_RGB666_PACKED = 2, MIPI_DSI_FMT_RGB565 = 3 } ; struct mipi_dsi_device { struct mipi_dsi_host *host ; struct device dev ; unsigned int channel ; unsigned int lanes ; enum mipi_dsi_pixel_format format ; unsigned long mode_flags ; }; enum i915_cache_level; enum i915_cache_level; struct intel_dsi_host; struct intel_dsi { struct intel_encoder base ; struct drm_panel *panel ; struct intel_dsi_host *dsi_hosts[5U] ; struct intel_connector *attached_connector ; u16 ports ; bool hs ; int channel ; u16 operation_mode ; unsigned int lane_count ; u32 pixel_format ; u32 video_mode_format ; u8 eotp_pkt ; u8 clock_stop ; u8 escape_clk_div ; u8 dual_link ; u8 pixel_overlap ; u32 port_bits ; u32 bw_timer ; u32 dphy_reg ; u32 video_frmt_cfg_bits ; u16 lp_byte_clk ; u16 lp_rx_timeout ; u16 turn_arnd_val ; u16 rst_timer_val ; u16 hs_to_lp_count ; u16 clk_lp_to_hs_count ; u16 clk_hs_to_lp_count ; u16 init_count ; u32 pclk ; u16 burst_mode_ratio ; u16 backlight_off_delay ; u16 backlight_on_delay ; u16 panel_on_delay ; u16 panel_off_delay ; u16 panel_pwr_cycle_delay ; }; struct intel_dsi_host { struct mipi_dsi_host base ; struct intel_dsi *intel_dsi ; enum port port ; struct mipi_dsi_device *device ; }; struct __anonstruct_intel_dsi_drivers_444 { u16 panel_id ; struct drm_panel *(*init)(struct intel_dsi * , u16 ) ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct dsi_mnp { u32 dsi_pll_ctrl ; u32 dsi_pll_div ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct vbt_panel { struct drm_panel panel ; struct intel_dsi *intel_dsi ; }; struct gpio_table { u16 function_reg ; u16 pad_reg ; u8 init ; }; typedef u8 const *(*fn_mipi_elem_exec)(struct intel_dsi * , u8 const * ); enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct intel_dvo { struct intel_encoder base ; struct intel_dvo_device dev ; struct drm_display_mode *panel_fixed_mode ; bool panel_wants_dither ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct hdmi_any_infoframe { enum hdmi_infoframe_type type ; unsigned char version ; unsigned char length ; }; enum hdmi_colorspace { HDMI_COLORSPACE_RGB = 0, HDMI_COLORSPACE_YUV422 = 1, HDMI_COLORSPACE_YUV444 = 2, HDMI_COLORSPACE_YUV420 = 3, HDMI_COLORSPACE_RESERVED4 = 4, HDMI_COLORSPACE_RESERVED5 = 5, HDMI_COLORSPACE_RESERVED6 = 6, HDMI_COLORSPACE_IDO_DEFINED = 7 } ; enum hdmi_scan_mode { HDMI_SCAN_MODE_NONE = 0, HDMI_SCAN_MODE_OVERSCAN = 1, HDMI_SCAN_MODE_UNDERSCAN = 2, HDMI_SCAN_MODE_RESERVED = 3 } ; enum hdmi_colorimetry { HDMI_COLORIMETRY_NONE = 0, HDMI_COLORIMETRY_ITU_601 = 1, HDMI_COLORIMETRY_ITU_709 = 2, HDMI_COLORIMETRY_EXTENDED = 3 } ; enum hdmi_active_aspect { HDMI_ACTIVE_ASPECT_16_9_TOP = 2, HDMI_ACTIVE_ASPECT_14_9_TOP = 3, HDMI_ACTIVE_ASPECT_16_9_CENTER = 4, HDMI_ACTIVE_ASPECT_PICTURE = 8, HDMI_ACTIVE_ASPECT_4_3 = 9, HDMI_ACTIVE_ASPECT_16_9 = 10, HDMI_ACTIVE_ASPECT_14_9 = 11, HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13, HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14, HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15 } ; enum hdmi_extended_colorimetry { HDMI_EXTENDED_COLORIMETRY_XV_YCC_601 = 0, HDMI_EXTENDED_COLORIMETRY_XV_YCC_709 = 1, HDMI_EXTENDED_COLORIMETRY_S_YCC_601 = 2, HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601 = 3, HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB = 4, HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM = 5, HDMI_EXTENDED_COLORIMETRY_BT2020 = 6, HDMI_EXTENDED_COLORIMETRY_RESERVED = 7 } ; enum hdmi_quantization_range { HDMI_QUANTIZATION_RANGE_DEFAULT = 0, HDMI_QUANTIZATION_RANGE_LIMITED = 1, HDMI_QUANTIZATION_RANGE_FULL = 2, HDMI_QUANTIZATION_RANGE_RESERVED = 3 } ; enum hdmi_nups { HDMI_NUPS_UNKNOWN = 0, HDMI_NUPS_HORIZONTAL = 1, HDMI_NUPS_VERTICAL = 2, HDMI_NUPS_BOTH = 3 } ; enum hdmi_ycc_quantization_range { HDMI_YCC_QUANTIZATION_RANGE_LIMITED = 0, HDMI_YCC_QUANTIZATION_RANGE_FULL = 1 } ; enum hdmi_content_type { HDMI_CONTENT_TYPE_GRAPHICS = 0, HDMI_CONTENT_TYPE_PHOTO = 1, HDMI_CONTENT_TYPE_CINEMA = 2, HDMI_CONTENT_TYPE_GAME = 3 } ; struct hdmi_avi_infoframe { enum hdmi_infoframe_type type ; unsigned char version ; unsigned char length ; enum hdmi_colorspace colorspace ; enum hdmi_scan_mode scan_mode ; enum hdmi_colorimetry colorimetry ; enum hdmi_picture_aspect picture_aspect ; enum hdmi_active_aspect active_aspect ; bool itc ; enum hdmi_extended_colorimetry extended_colorimetry ; enum hdmi_quantization_range quantization_range ; enum hdmi_nups nups ; unsigned char video_code ; enum hdmi_ycc_quantization_range ycc_quantization_range ; enum hdmi_content_type content_type ; unsigned char pixel_repeat ; unsigned short top_bar ; unsigned short bottom_bar ; unsigned short left_bar ; unsigned short right_bar ; }; enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN = 0, HDMI_SPD_SDI_DSTB = 1, HDMI_SPD_SDI_DVDP = 2, HDMI_SPD_SDI_DVHS = 3, HDMI_SPD_SDI_HDDVR = 4, HDMI_SPD_SDI_DVC = 5, HDMI_SPD_SDI_DSC = 6, HDMI_SPD_SDI_VCD = 7, HDMI_SPD_SDI_GAME = 8, HDMI_SPD_SDI_PC = 9, HDMI_SPD_SDI_BD = 10, HDMI_SPD_SDI_SACD = 11, HDMI_SPD_SDI_HDDVD = 12, HDMI_SPD_SDI_PMP = 13 } ; struct hdmi_spd_infoframe { enum hdmi_infoframe_type type ; unsigned char version ; unsigned char length ; char vendor[8U] ; char product[16U] ; enum hdmi_spd_sdi sdi ; }; enum hdmi_audio_coding_type { HDMI_AUDIO_CODING_TYPE_STREAM = 0, HDMI_AUDIO_CODING_TYPE_PCM = 1, HDMI_AUDIO_CODING_TYPE_AC3 = 2, HDMI_AUDIO_CODING_TYPE_MPEG1 = 3, HDMI_AUDIO_CODING_TYPE_MP3 = 4, HDMI_AUDIO_CODING_TYPE_MPEG2 = 5, HDMI_AUDIO_CODING_TYPE_AAC_LC = 6, HDMI_AUDIO_CODING_TYPE_DTS = 7, HDMI_AUDIO_CODING_TYPE_ATRAC = 8, HDMI_AUDIO_CODING_TYPE_DSD = 9, HDMI_AUDIO_CODING_TYPE_EAC3 = 10, HDMI_AUDIO_CODING_TYPE_DTS_HD = 11, HDMI_AUDIO_CODING_TYPE_MLP = 12, HDMI_AUDIO_CODING_TYPE_DST = 13, HDMI_AUDIO_CODING_TYPE_WMA_PRO = 14, HDMI_AUDIO_CODING_TYPE_CXT = 15 } ; enum hdmi_audio_sample_size { HDMI_AUDIO_SAMPLE_SIZE_STREAM = 0, HDMI_AUDIO_SAMPLE_SIZE_16 = 1, HDMI_AUDIO_SAMPLE_SIZE_20 = 2, HDMI_AUDIO_SAMPLE_SIZE_24 = 3 } ; enum hdmi_audio_sample_frequency { HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM = 0, HDMI_AUDIO_SAMPLE_FREQUENCY_32000 = 1, HDMI_AUDIO_SAMPLE_FREQUENCY_44100 = 2, HDMI_AUDIO_SAMPLE_FREQUENCY_48000 = 3, HDMI_AUDIO_SAMPLE_FREQUENCY_88200 = 4, HDMI_AUDIO_SAMPLE_FREQUENCY_96000 = 5, HDMI_AUDIO_SAMPLE_FREQUENCY_176400 = 6, HDMI_AUDIO_SAMPLE_FREQUENCY_192000 = 7 } ; enum hdmi_audio_coding_type_ext { HDMI_AUDIO_CODING_TYPE_EXT_CT = 0, HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC = 1, HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2 = 2, HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND = 3, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC = 4, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2 = 5, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC = 6, HDMI_AUDIO_CODING_TYPE_EXT_DRA = 7, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND = 8, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10 } ; struct hdmi_audio_infoframe { enum hdmi_infoframe_type type ; unsigned char version ; unsigned char length ; unsigned char channels ; enum hdmi_audio_coding_type coding_type ; enum hdmi_audio_sample_size sample_size ; enum hdmi_audio_sample_frequency sample_frequency ; enum hdmi_audio_coding_type_ext coding_type_ext ; unsigned char channel_allocation ; unsigned char level_shift_value ; bool downmix_inhibit ; }; enum hdmi_3d_structure { HDMI_3D_STRUCTURE_INVALID = -1, HDMI_3D_STRUCTURE_FRAME_PACKING = 0, HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE = 1, HDMI_3D_STRUCTURE_LINE_ALTERNATIVE = 2, HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL = 3, HDMI_3D_STRUCTURE_L_DEPTH = 4, HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH = 5, HDMI_3D_STRUCTURE_TOP_AND_BOTTOM = 6, HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8 } ; struct hdmi_vendor_infoframe { enum hdmi_infoframe_type type ; unsigned char version ; unsigned char length ; unsigned int oui ; u8 vic ; enum hdmi_3d_structure s3d_struct ; unsigned int s3d_ext_data ; }; struct __anonstruct_any_221 { enum hdmi_infoframe_type type ; unsigned char version ; unsigned char length ; unsigned int oui ; }; union hdmi_vendor_any_infoframe { struct __anonstruct_any_221 any ; struct hdmi_vendor_infoframe hdmi ; }; union hdmi_infoframe { struct hdmi_any_infoframe any ; struct hdmi_avi_infoframe avi ; struct hdmi_spd_infoframe spd ; union hdmi_vendor_any_infoframe vendor ; struct hdmi_audio_infoframe audio ; }; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct gmbus_pin { char const *name ; int reg ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct intel_lvds_connector { struct intel_connector base ; struct notifier_block lid_notifier ; }; struct intel_lvds_encoder { struct intel_encoder base ; bool is_dual_link ; u32 reg ; u32 a3_power ; struct intel_lvds_connector *attached_connector ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct intel_sdvo_caps { u8 vendor_id ; u8 device_id ; u8 device_rev_id ; u8 sdvo_version_major ; u8 sdvo_version_minor ; unsigned char sdvo_inputs_mask : 2 ; unsigned char smooth_scaling : 1 ; unsigned char sharp_scaling : 1 ; unsigned char up_scaling : 1 ; unsigned char down_scaling : 1 ; unsigned char stall_support : 1 ; unsigned char pad : 1 ; u16 output_flags ; }; struct __anonstruct_part1_444 { u16 clock ; u8 h_active ; u8 h_blank ; u8 h_high ; u8 v_active ; u8 v_blank ; u8 v_high ; }; struct __anonstruct_part2_445 { u8 h_sync_off ; u8 h_sync_width ; u8 v_sync_off_width ; u8 sync_off_width_high ; u8 dtd_flags ; u8 sdvo_flags ; u8 v_sync_off_high ; u8 reserved ; }; struct intel_sdvo_dtd { struct __anonstruct_part1_444 part1 ; struct __anonstruct_part2_445 part2 ; }; struct intel_sdvo_pixel_clock_range { u16 min ; u16 max ; }; struct intel_sdvo_preferred_input_timing_args { u16 clock ; u16 width ; u16 height ; unsigned char interlace : 1 ; unsigned char scaled : 1 ; unsigned char pad : 6 ; }; struct intel_sdvo_get_trained_inputs_response { unsigned char input0_trained : 1 ; unsigned char input1_trained : 1 ; unsigned char pad : 6 ; }; struct intel_sdvo_in_out_map { u16 in0 ; u16 in1 ; }; struct intel_sdvo_set_target_input_args { unsigned char target_1 : 1 ; unsigned char pad : 7 ; }; struct intel_sdvo_tv_format { unsigned char ntsc_m : 1 ; unsigned char ntsc_j : 1 ; unsigned char ntsc_443 : 1 ; unsigned char pal_b : 1 ; unsigned char pal_d : 1 ; unsigned char pal_g : 1 ; unsigned char pal_h : 1 ; unsigned char pal_i : 1 ; unsigned char pal_m : 1 ; unsigned char pal_n : 1 ; unsigned char pal_nc : 1 ; unsigned char pal_60 : 1 ; unsigned char secam_b : 1 ; unsigned char secam_d : 1 ; unsigned char secam_g : 1 ; unsigned char secam_k : 1 ; unsigned char secam_k1 : 1 ; unsigned char secam_l : 1 ; unsigned char secam_60 : 1 ; unsigned char hdtv_std_smpte_240m_1080i_59 : 1 ; unsigned char hdtv_std_smpte_240m_1080i_60 : 1 ; unsigned char hdtv_std_smpte_260m_1080i_59 : 1 ; unsigned char hdtv_std_smpte_260m_1080i_60 : 1 ; unsigned char hdtv_std_smpte_274m_1080i_50 : 1 ; unsigned char hdtv_std_smpte_274m_1080i_59 : 1 ; unsigned char hdtv_std_smpte_274m_1080i_60 : 1 ; unsigned char hdtv_std_smpte_274m_1080p_23 : 1 ; unsigned char hdtv_std_smpte_274m_1080p_24 : 1 ; unsigned char hdtv_std_smpte_274m_1080p_25 : 1 ; unsigned char hdtv_std_smpte_274m_1080p_29 : 1 ; unsigned char hdtv_std_smpte_274m_1080p_30 : 1 ; unsigned char hdtv_std_smpte_274m_1080p_50 : 1 ; unsigned char hdtv_std_smpte_274m_1080p_59 : 1 ; unsigned char hdtv_std_smpte_274m_1080p_60 : 1 ; unsigned char hdtv_std_smpte_295m_1080i_50 : 1 ; unsigned char hdtv_std_smpte_295m_1080p_50 : 1 ; unsigned char hdtv_std_smpte_296m_720p_59 : 1 ; unsigned char hdtv_std_smpte_296m_720p_60 : 1 ; unsigned char hdtv_std_smpte_296m_720p_50 : 1 ; unsigned char hdtv_std_smpte_293m_480p_59 : 1 ; unsigned char hdtv_std_smpte_170m_480i_59 : 1 ; unsigned char hdtv_std_iturbt601_576i_50 : 1 ; unsigned char hdtv_std_iturbt601_576p_50 : 1 ; unsigned char hdtv_std_eia_7702a_480i_60 : 1 ; unsigned char hdtv_std_eia_7702a_480p_60 : 1 ; unsigned char pad : 3 ; }; struct intel_sdvo_sdtv_resolution_request { unsigned char ntsc_m : 1 ; unsigned char ntsc_j : 1 ; unsigned char ntsc_443 : 1 ; unsigned char pal_b : 1 ; unsigned char pal_d : 1 ; unsigned char pal_g : 1 ; unsigned char pal_h : 1 ; unsigned char pal_i : 1 ; unsigned char pal_m : 1 ; unsigned char pal_n : 1 ; unsigned char pal_nc : 1 ; unsigned char pal_60 : 1 ; unsigned char secam_b : 1 ; unsigned char secam_d : 1 ; unsigned char secam_g : 1 ; unsigned char secam_k : 1 ; unsigned char secam_k1 : 1 ; unsigned char secam_l : 1 ; unsigned char secam_60 : 1 ; unsigned char pad : 5 ; }; struct intel_sdvo_enhancements_reply { unsigned char flicker_filter : 1 ; unsigned char flicker_filter_adaptive : 1 ; unsigned char flicker_filter_2d : 1 ; unsigned char saturation : 1 ; unsigned char hue : 1 ; unsigned char brightness : 1 ; unsigned char contrast : 1 ; unsigned char overscan_h : 1 ; unsigned char overscan_v : 1 ; unsigned char hpos : 1 ; unsigned char vpos : 1 ; unsigned char sharpness : 1 ; unsigned char dot_crawl : 1 ; unsigned char dither : 1 ; unsigned char tv_chroma_filter : 1 ; unsigned char tv_luma_filter : 1 ; }; struct intel_sdvo_encode { u8 dvi_rev ; u8 hdmi_rev ; }; struct intel_sdvo { struct intel_encoder base ; struct i2c_adapter *i2c ; u8 slave_addr ; struct i2c_adapter ddc ; uint32_t sdvo_reg ; uint16_t controlled_output ; struct intel_sdvo_caps caps ; int pixel_clock_min ; int pixel_clock_max ; uint16_t attached_output ; uint16_t hotplug_active ; uint32_t color_range ; bool color_range_auto ; bool is_tv ; bool is_sdvob ; int tv_format_index ; bool is_hdmi ; bool has_hdmi_monitor ; bool has_hdmi_audio ; bool rgb_quant_range_selectable ; bool is_lvds ; struct drm_display_mode *sdvo_lvds_fixed_mode ; uint8_t ddc_bus ; uint8_t dtd_sdvo_flags ; }; struct intel_sdvo_connector { struct intel_connector base ; uint16_t output_flag ; enum hdmi_force_audio force_audio ; u8 tv_format_supported[19U] ; int format_supported_num ; struct drm_property *tv_format ; struct drm_property *left ; struct drm_property *right ; struct drm_property *top ; struct drm_property *bottom ; struct drm_property *hpos ; struct drm_property *vpos ; struct drm_property *contrast ; struct drm_property *saturation ; struct drm_property *hue ; struct drm_property *sharpness ; struct drm_property *flicker_filter ; struct drm_property *flicker_filter_adaptive ; struct drm_property *flicker_filter_2d ; struct drm_property *tv_chroma_filter ; struct drm_property *tv_luma_filter ; struct drm_property *dot_crawl ; struct drm_property *brightness ; u32 left_margin ; u32 right_margin ; u32 top_margin ; u32 bottom_margin ; u32 max_hscan ; u32 max_vscan ; u32 max_hpos ; u32 cur_hpos ; u32 max_vpos ; u32 cur_vpos ; u32 cur_brightness ; u32 max_brightness ; u32 cur_contrast ; u32 max_contrast ; u32 cur_saturation ; u32 max_saturation ; u32 cur_hue ; u32 max_hue ; u32 cur_sharpness ; u32 max_sharpness ; u32 cur_flicker_filter ; u32 max_flicker_filter ; u32 cur_flicker_filter_adaptive ; u32 max_flicker_filter_adaptive ; u32 cur_flicker_filter_2d ; u32 max_flicker_filter_2d ; u32 cur_tv_chroma_filter ; u32 max_tv_chroma_filter ; u32 cur_tv_luma_filter ; u32 max_tv_luma_filter ; u32 cur_dot_crawl ; u32 max_dot_crawl ; }; struct _sdvo_cmd_name { u8 cmd ; char const *name ; }; union __anonunion_enhancements_450 { struct intel_sdvo_enhancements_reply reply ; uint16_t response ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct intel_tv { struct intel_encoder base ; int type ; char const *tv_format ; int margin[4U] ; u32 save_TV_H_CTL_1 ; u32 save_TV_H_CTL_2 ; u32 save_TV_H_CTL_3 ; u32 save_TV_V_CTL_1 ; u32 save_TV_V_CTL_2 ; u32 save_TV_V_CTL_3 ; u32 save_TV_V_CTL_4 ; u32 save_TV_V_CTL_5 ; u32 save_TV_V_CTL_6 ; u32 save_TV_V_CTL_7 ; u32 save_TV_SC_CTL_1 ; u32 save_TV_SC_CTL_2 ; u32 save_TV_SC_CTL_3 ; u32 save_TV_CSC_Y ; u32 save_TV_CSC_Y2 ; u32 save_TV_CSC_U ; u32 save_TV_CSC_U2 ; u32 save_TV_CSC_V ; u32 save_TV_CSC_V2 ; u32 save_TV_CLR_KNOBS ; u32 save_TV_CLR_LEVEL ; u32 save_TV_WIN_POS ; u32 save_TV_WIN_SIZE ; u32 save_TV_FILTER_CTL_1 ; u32 save_TV_FILTER_CTL_2 ; u32 save_TV_FILTER_CTL_3 ; u32 save_TV_H_LUMA[60U] ; u32 save_TV_H_CHROMA[60U] ; u32 save_TV_V_LUMA[43U] ; u32 save_TV_V_CHROMA[43U] ; u32 save_TV_DAC ; u32 save_TV_CTL ; }; struct video_levels { int blank ; int black ; int burst ; }; struct color_conversion { u16 ry ; u16 gy ; u16 by ; u16 ay ; u16 ru ; u16 gu ; u16 bu ; u16 au ; u16 rv ; u16 gv ; u16 bv ; u16 av ; }; struct tv_mode { char const *name ; int clock ; int refresh ; u32 oversample ; int hsync_end ; int hblank_start ; int hblank_end ; int htotal ; bool progressive ; bool trilevel_sync ; bool component_only ; int vsync_start_f1 ; int vsync_start_f2 ; int vsync_len ; bool veq_ena ; int veq_start_f1 ; int veq_start_f2 ; int veq_len ; int vi_end_f1 ; int vi_end_f2 ; int nbr_end ; bool burst_ena ; int hburst_start ; int hburst_len ; int vburst_start_f1 ; int vburst_end_f1 ; int vburst_start_f2 ; int vburst_end_f2 ; int vburst_start_f3 ; int vburst_end_f3 ; int vburst_start_f4 ; int vburst_end_f4 ; int dda2_size ; int dda3_size ; int dda1_inc ; int dda2_inc ; int dda3_inc ; u32 sc_reset ; bool pal_burst ; struct video_levels const *composite_levels ; struct video_levels const *svideo_levels ; struct color_conversion const *composite_color ; struct color_conversion const *svideo_color ; u32 const *filter_table ; int max_srcw ; }; struct input_res { char const *name ; int w ; int h ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; enum i915_cache_level; enum i915_cache_level; struct _balloon_info_ { struct drm_mm_node space[4U] ; }; enum hrtimer_restart; enum i2c_slave_event; enum i2c_slave_event; struct drm_i915_setparam { int param ; int value ; }; typedef struct drm_i915_setparam drm_i915_setparam_t; enum i915_cache_level; enum i915_cache_level; struct vc_data; struct console_font; struct consw { struct module *owner ; char const *(*con_startup)(void) ; void (*con_init)(struct vc_data * , int ) ; void (*con_deinit)(struct vc_data * ) ; void (*con_clear)(struct vc_data * , int , int , int , int ) ; void (*con_putc)(struct vc_data * , int , int , int ) ; void (*con_putcs)(struct vc_data * , unsigned short const * , int , int , int ) ; void (*con_cursor)(struct vc_data * , int ) ; int (*con_scroll)(struct vc_data * , int , int , int , int ) ; void (*con_bmove)(struct vc_data * , int , int , int , int , int , int ) ; int (*con_switch)(struct vc_data * ) ; int (*con_blank)(struct vc_data * , int , int ) ; int (*con_font_set)(struct vc_data * , struct console_font * , unsigned int ) ; int (*con_font_get)(struct vc_data * , struct console_font * ) ; int (*con_font_default)(struct vc_data * , struct console_font * , char * ) ; int (*con_font_copy)(struct vc_data * , int ) ; int (*con_resize)(struct vc_data * , unsigned int , unsigned int , unsigned int ) ; int (*con_set_palette)(struct vc_data * , unsigned char * ) ; int (*con_scrolldelta)(struct vc_data * , int ) ; int (*con_set_origin)(struct vc_data * ) ; void (*con_save_screen)(struct vc_data * ) ; u8 (*con_build_attr)(struct vc_data * , u8 , u8 , u8 , u8 , u8 , u8 ) ; void (*con_invert_region)(struct vc_data * , u16 * , int ) ; u16 *(*con_screen_pos)(struct vc_data * , int ) ; unsigned long (*con_getxy)(struct vc_data * , unsigned long , int * , int * ) ; int (*con_debug_enter)(struct vc_data * ) ; int (*con_debug_leave)(struct vc_data * ) ; }; enum vga_switcheroo_state { VGA_SWITCHEROO_OFF = 0, VGA_SWITCHEROO_ON = 1, VGA_SWITCHEROO_INIT = 2, VGA_SWITCHEROO_NOT_FOUND = 3 } ; struct vga_switcheroo_client_ops { void (*set_gpu_state)(struct pci_dev * , enum vga_switcheroo_state ) ; void (*reprobe)(struct pci_dev * ) ; bool (*can_switch)(struct pci_dev * ) ; }; __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct module __this_module ; extern struct pv_irq_ops pv_irq_ops ; extern int printk(char const * , ...) ; extern void __bad_percpu_size(void) ; extern void __bad_size_call_parameter(void) ; extern void warn_slowpath_fmt(char const * , int const , char const * , ...) ; extern void *memset(void * , int , size_t ) ; __inline static unsigned long arch_local_save_flags(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static int arch_irqs_disabled_flags(unsigned long flags ) { { return ((flags & 512UL) == 0UL); } } __inline static void rep_nop(void) { { __asm__ volatile ("rep; nop": : : "memory"); return; } } __inline static void cpu_relax(void) { { rep_nop(); return; } } __inline static int atomic_read(atomic_t const *v ) { int __var ; { __var = 0; return ((int )*((int const volatile *)(& v->counter))); } } extern int __preempt_count ; __inline static int preempt_count(void) { int pfo_ret__ ; { switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (__preempt_count)); goto ldv_6002; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6002; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6002; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6002; default: __bad_percpu_size(); } ldv_6002: ; return (pfo_ret__ & 2147483647); } } extern void _raw_spin_lock_irq(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irq(raw_spinlock_t * ) ; __inline static void spin_lock_irq(spinlock_t *lock ) { { _raw_spin_lock_irq(& lock->__annonCompField18.rlock); return; } } __inline static void spin_unlock_irq(spinlock_t *lock ) { { _raw_spin_unlock_irq(& lock->__annonCompField18.rlock); return; } } extern void mutex_lock_nested(struct mutex * , unsigned int ) ; extern int mutex_trylock(struct mutex * ) ; extern void mutex_unlock(struct mutex * ) ; extern unsigned long volatile jiffies ; extern unsigned long __msecs_to_jiffies(unsigned int const ) ; __inline static unsigned long msecs_to_jiffies(unsigned int const m ) { unsigned long tmp___0 ; { tmp___0 = __msecs_to_jiffies(m); return (tmp___0); } } extern bool queue_work_on(int , struct workqueue_struct * , struct work_struct * ) ; bool ldv_queue_work_on_5(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_7(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; extern bool queue_delayed_work_on(int , struct workqueue_struct * , struct delayed_work * , unsigned long ) ; bool ldv_queue_delayed_work_on_6(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_9(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; extern void flush_workqueue(struct workqueue_struct * ) ; void ldv_flush_workqueue_8(struct workqueue_struct *ldv_func_arg1 ) ; extern bool cancel_work_sync(struct work_struct * ) ; bool ldv_cancel_work_sync_10(struct work_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_11(struct work_struct *ldv_func_arg1 ) ; extern bool cancel_delayed_work_sync(struct delayed_work * ) ; bool ldv_cancel_delayed_work_sync_12(struct delayed_work *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_sync_13(struct delayed_work *ldv_func_arg1 ) ; extern int cpu_number ; extern void *malloc(size_t ) ; extern void *calloc(size_t , size_t ) ; extern int __VERIFIER_nondet_int(void) ; extern unsigned long __VERIFIER_nondet_ulong(void) ; extern void *__VERIFIER_nondet_pointer(void) ; extern void __VERIFIER_assume(int ) ; void *ldv_malloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = malloc(size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_zalloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_init_zalloc(size_t size ) { void *p ; void *tmp ; { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } void *ldv_memset(void *s , int c , size_t n ) { void *tmp ; { tmp = memset(s, c, n); return (tmp); } } int ldv_undef_int(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); return (tmp); } } void *ldv_undef_ptr(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); return (tmp); } } unsigned long ldv_undef_ulong(void) { unsigned long tmp ; { tmp = __VERIFIER_nondet_ulong(); return (tmp); } } __inline static void ldv_stop(void) { { LDV_STOP: ; goto LDV_STOP; } } __inline static long ldv__builtin_expect(long exp , long c ) { { return (exp); } } struct drm_property *intel_lvds_connector_funcs_group0 ; int ldv_state_variable_151 ; struct work_struct *ldv_work_struct_9_2 ; int ldv_state_variable_99 ; struct trace_event_call *event_class_i915_page_table_entry_update_group0 ; int ldv_state_variable_47 ; int ldv_state_variable_20 ; int ldv_state_variable_125 ; struct intel_dvo_device *ns2501_ops_group0 ; struct drm_atomic_state *intel_mode_funcs_group1 ; int ldv_state_variable_173 ; struct pci_dev *i915_switcheroo_ops_group0 ; struct work_struct *ldv_work_struct_14_0 ; struct work_struct *ldv_work_struct_13_3 ; int ldv_state_variable_54 ; int ldv_work_15_2 ; struct trace_event_call *event_class_i915_gem_ring_sync_to_group0 ; struct work_struct *ldv_work_struct_20_2 ; int ldv_state_variable_17 ; struct file *i915_displayport_test_data_fops_group2 ; struct drm_connector *intel_tv_connector_helper_funcs_group0 ; int ldv_state_variable_160 ; int ldv_state_variable_66 ; int ldv_state_variable_19 ; struct trace_event_call *event_class_i915_flip_request_group0 ; struct work_struct *ldv_work_struct_4_3 ; int ldv_state_variable_27 ; int ldv_state_variable_9 ; int ldv_state_variable_100 ; struct trace_event_call *event_class_i915_gem_evict_vm_group0 ; int ldv_state_variable_83 ; struct work_struct *ldv_work_struct_15_2 ; struct drm_crtc *intel_fb_helper_funcs_group0 ; int ldv_work_3_3 ; struct drm_plane *intel_plane_funcs_group0 ; int ldv_state_variable_55 ; struct drm_panel *vbt_panel_funcs_group0 ; struct trace_event_call *event_class_i915_gem_object_fault_group0 ; struct device *i915_audio_component_ops_group0 ; struct intel_dvo_device *ch7017_ops_group0 ; int ldv_work_1_3 ; int ldv_state_variable_145 ; struct work_struct *ldv_work_struct_3_2 ; struct i915_power_well *chv_pipe_power_well_ops_group0 ; int ldv_state_variable_80 ; int ldv_work_16_2 ; struct work_struct *ldv_work_struct_7_2 ; int ldv_state_variable_64 ; struct i915_power_well *vlv_dpio_power_well_ops_group0 ; int ldv_work_13_1 ; struct device *dev_attr_gt_max_freq_mhz_group1 ; int ldv_state_variable_28 ; struct work_struct *ldv_work_struct_6_0 ; struct drm_connector *intel_sdvo_connector_helper_funcs_group0 ; struct work_struct *ldv_work_struct_15_1 ; int ldv_work_8_3 ; struct trace_event_call *event_class_i915_gem_request_notify_group0 ; int ldv_state_variable_166 ; int ldv_work_13_2 ; int ldv_work_14_3 ; int ldv_work_7_1 ; int ldv_state_variable_78 ; int ldv_state_variable_76 ; int ldv_work_6_2 ; struct drm_connector *intel_crt_connector_funcs_group1 ; int ldv_state_variable_137 ; int ldv_state_variable_89 ; int ldv_state_variable_124 ; int ldv_state_variable_8 ; int ldv_state_variable_169 ; int ldv_state_variable_46 ; struct trace_event_call *event_class_i915_pipe_update_vblank_evaded_group0 ; int ldv_work_8_0 ; int ldv_state_variable_75 ; int ldv_work_14_2 ; struct work_struct *ldv_work_struct_20_0 ; struct inode *i915_display_crc_ctl_fops_group1 ; int ldv_state_variable_33 ; struct work_struct *ldv_work_struct_13_2 ; struct drm_connector *intel_lvds_connector_funcs_group1 ; struct drm_i915_gem_object *i915_gem_object_stolen_ops_group0 ; int ldv_state_variable_123 ; struct trace_event_call *event_class_intel_gpu_freq_change_group0 ; int ldv_state_variable_161 ; struct inode *i915_drop_caches_fops_group1 ; int ldv_state_variable_172 ; int ldv_work_15_1 ; int ldv_work_3_0 ; struct device_attribute *dev_attr_gt_max_freq_mhz_group0 ; int ldv_work_10_0 ; int ldv_state_variable_65 ; struct file *i915_forcewake_fops_group2 ; struct drm_fb_helper *intel_fb_helper_funcs_group1 ; int ldv_state_variable_98 ; struct file *i915_error_state_fops_group2 ; struct drm_connector *intel_dp_connector_helper_funcs_group0 ; int ldv_timer_21_3 ; int ldv_state_variable_70 ; struct drm_connector *intel_crt_connector_helper_funcs_group0 ; int ldv_state_variable_142 ; struct work_struct *ldv_work_struct_15_0 ; int ldv_state_variable_158 ; int ldv_work_6_1 ; struct drm_i915_private *vlv_dpio_power_well_ops_group1 ; struct work_struct *ldv_work_struct_1_0 ; int ldv_work_7_0 ; struct drm_plane_state *intel_plane_funcs_group1 ; int ldv_state_variable_63 ; struct work_struct *ldv_work_struct_7_3 ; int ldv_state_variable_105 ; int ldv_state_variable_2 ; struct drm_i915_private *chv_pipe_power_well_ops_group1 ; int ldv_work_2_0 ; struct drm_display_mode *tfp410_ops_group1 ; struct work_struct *ldv_work_struct_10_0 ; struct i2c_adapter *gmbus_algorithm_group0 ; int ldv_work_4_2 ; int ldv_state_variable_11 ; int ldv_state_variable_113 ; int ldv_work_1_2 ; int ldv_state_variable_18 ; struct i915_power_well *hsw_power_well_ops_group0 ; int ldv_state_variable_150 ; struct drm_display_mode *ivch_ops_group1 ; struct intel_dvo_device *tfp410_ops_group0 ; struct work_struct *ldv_work_struct_5_0 ; int ldv_work_16_3 ; struct work_struct *ldv_work_struct_9_1 ; struct inode *i915_displayport_test_type_fops_group1 ; int ldv_state_variable_90 ; int ldv_state_variable_97 ; int ldv_work_11_2 ; int ldv_state_variable_162 ; int pci_counter ; struct intel_dvo_device *ch7xxx_ops_group0 ; int ldv_state_variable_30 ; int ldv_work_8_1 ; int ldv_state_variable_0 ; int ldv_state_variable_81 ; struct file *i915_displayport_test_type_fops_group2 ; int ldv_state_variable_102 ; struct drm_connector *intel_dp_connector_funcs_group1 ; struct inode *i915_displayport_test_data_fops_group1 ; int ldv_state_variable_87 ; int ldv_state_variable_136 ; struct drm_property *intel_plane_funcs_group2 ; int ldv_state_variable_73 ; int ldv_state_variable_29 ; struct trace_event_call *event_class_i915_gem_request_group0 ; int ldv_state_variable_115 ; int ldv_work_16_1 ; struct work_struct *ldv_work_struct_8_1 ; struct work_struct *ldv_work_struct_2_0 ; struct device *dev_attr_gt_min_freq_mhz_group1 ; struct fb_info *intelfb_ops_group1 ; struct work_struct *ldv_work_struct_20_1 ; struct file *i915_cur_wm_latency_fops_group2 ; int ldv_work_20_3 ; struct inode *i915_pri_wm_latency_fops_group1 ; int ldv_state_variable_91 ; struct fb_var_screeninfo *intelfb_ops_group0 ; struct work_struct *ldv_work_struct_6_1 ; struct drm_display_mode *ch7017_ops_group1 ; int ref_cnt ; int ldv_work_15_0 ; int ldv_state_variable_168 ; int ldv_work_20_1 ; struct work_struct *ldv_work_struct_10_3 ; int ldv_work_16_0 ; int ldv_state_variable_23 ; struct work_struct *ldv_work_struct_3_3 ; int ldv_state_variable_143 ; struct work_struct *ldv_work_struct_16_1 ; struct drm_i915_private *skl_power_well_ops_group1 ; struct file *i915_driver_fops_group2 ; struct trace_event_call *event_class_i915_pipe_update_end_group0 ; struct drm_property *intel_sdvo_connector_funcs_group0 ; struct drm_crtc *intel_helper_funcs_group0 ; struct work_struct *ldv_work_struct_1_1 ; int ldv_state_variable_59 ; struct inode *i915_error_state_fops_group1 ; struct intel_dvo_device *ivch_ops_group0 ; int ldv_state_variable_6 ; struct file *i915_min_freq_fops_group2 ; struct work_struct *ldv_work_struct_15_3 ; int ldv_state_variable_182 ; struct inode *i915_cur_wm_latency_fops_group1 ; struct drm_connector *intel_dsi_connector_funcs_group0 ; struct trace_event_call *event_class_i915_gem_request_wait_begin_group0 ; struct work_struct *ldv_work_struct_19_2 ; struct work_struct *ldv_work_struct_4_2 ; struct trace_event_call *event_class_i915_gem_evict_group0 ; struct drm_connector *intel_tv_connector_funcs_group1 ; struct drm_i915_private *i9xx_always_on_power_well_ops_group1 ; int ldv_state_variable_178 ; int ldv_state_variable_38 ; int ldv_state_variable_157 ; struct work_struct *ldv_work_struct_18_2 ; int ldv_work_18_1 ; struct file *i915_pri_wm_latency_fops_group2 ; int ldv_state_variable_126 ; int ldv_state_variable_104 ; struct work_struct *ldv_work_struct_12_1 ; int ldv_state_variable_52 ; int ldv_work_11_0 ; struct file *i915_ring_test_irq_fops_group2 ; struct trace_event_call *event_class_i915_va_group0 ; struct work_struct *ldv_work_struct_9_0 ; int ldv_work_10_2 ; int ldv_state_variable_103 ; int ldv_state_variable_60 ; int ldv_state_variable_36 ; struct device_attribute *dev_attr_gt_min_freq_mhz_group0 ; struct inode *i915_min_freq_fops_group1 ; struct work_struct *ldv_work_struct_13_0 ; int ldv_state_variable_140 ; int ldv_state_variable_107 ; int ldv_state_variable_48 ; struct drm_connector *intel_dsi_connector_helper_funcs_group0 ; struct work_struct *ldv_work_struct_14_1 ; struct file *i915_pipe_crc_fops_group2 ; int ldv_state_variable_148 ; int ldv_state_variable_163 ; int ldv_state_variable_191 ; int ldv_work_3_2 ; struct inode *i915_next_seqno_fops_group1 ; int ldv_state_variable_138 ; int ldv_work_13_3 ; int ldv_state_variable_82 ; struct work_struct *ldv_work_struct_2_3 ; int ldv_work_11_1 ; struct intel_dvo_device *sil164_ops_group0 ; int ldv_state_variable_49 ; struct drm_i915_gem_object *i915_gem_object_dmabuf_ops_group0 ; struct trace_event_call *event_class_i915_page_table_entry_group0 ; struct vm_area_struct *i915_gem_vm_ops_group0 ; int ldv_state_variable_24 ; struct work_struct *ldv_work_struct_19_3 ; struct work_struct *ldv_work_struct_18_0 ; struct file *error_state_attr_group1 ; struct drm_gem_object *driver_group1 ; struct drm_i915_private *vlv_dpio_cmn_power_well_ops_group1 ; int ldv_work_14_0 ; struct file *i915_next_seqno_fops_group2 ; struct timer_list *ldv_timer_list_22_3 ; int ldv_work_20_2 ; int ldv_state_variable_1 ; int ldv_state_variable_114 ; int ldv_state_variable_176 ; struct timer_list *ldv_timer_list_21_2 ; struct work_struct *ldv_work_struct_6_2 ; struct inode *i915_driver_fops_group1 ; int ldv_state_variable_16 ; struct work_struct *ldv_work_struct_12_2 ; struct trace_event_call *event_class_i915_ppgtt_group0 ; struct drm_encoder *intel_dp_enc_funcs_group0 ; int ldv_work_6_3 ; struct file *i915_dpcd_fops_group2 ; struct work_struct *ldv_work_struct_3_0 ; int ldv_state_variable_131 ; int ldv_state_variable_67 ; int ldv_state_variable_53 ; struct drm_property *intel_tv_connector_funcs_group0 ; struct dma_buf_attachment *i915_dmabuf_ops_group1 ; struct work_struct *ldv_work_struct_1_2 ; struct trace_event_call *event_class_i915_gem_object_group0 ; int ldv_work_18_2 ; struct drm_connector *intel_sdvo_connector_funcs_group1 ; struct work_struct *ldv_work_struct_4_1 ; int ldv_state_variable_92 ; struct inode *i915_ring_test_irq_fops_group1 ; struct trace_event_call *event_class_i915_vma_bind_group0 ; int ldv_state_variable_130 ; int ldv_work_10_3 ; int ldv_state_variable_189 ; int ldv_state_variable_156 ; int ldv_state_variable_179 ; int ldv_state_variable_35 ; struct file *i915_fbc_fc_fops_group2 ; struct trace_event_call *event_class_i915_gem_object_create_group0 ; struct work_struct *ldv_work_struct_3_1 ; int ldv_work_12_3 ; struct i915_power_well *vlv_dpio_cmn_power_well_ops_group0 ; int ldv_state_variable_106 ; struct file *i915_wedged_fops_group2 ; struct kobject *dpf_attrs_group0 ; int ldv_work_1_1 ; int ldv_state_variable_111 ; struct trace_event_call *event_class_i915_gem_evict_everything_group0 ; struct work_struct *ldv_work_struct_14_2 ; int ldv_work_9_3 ; struct work_struct *ldv_work_struct_16_2 ; int ldv_state_variable_149 ; int ldv_timer_22_3 ; int ldv_timer_21_1 ; int ldv_state_variable_109 ; struct file *i915_display_crc_ctl_fops_group2 ; int ldv_state_variable_14 ; struct drm_minor *driver_group0 ; struct device *i915_audio_component_bind_ops_group1 ; struct inode *i915_spr_wm_latency_fops_group1 ; int ldv_state_variable_37 ; int ldv_state_variable_51 ; struct work_struct *ldv_work_struct_10_1 ; struct drm_i915_private *vlv_display_power_well_ops_group1 ; struct file *i915_ring_stop_fops_group2 ; int ldv_work_7_2 ; int ldv_state_variable_190 ; int ldv_work_17_0 ; struct work_struct *ldv_work_struct_2_2 ; struct work_struct *ldv_work_struct_7_1 ; int ldv_work_13_0 ; int ldv_state_variable_42 ; struct mipi_dsi_host *intel_dsi_host_ops_group1 ; struct kobject *error_state_attr_group0 ; struct work_struct *ldv_work_struct_4_0 ; struct work_struct *ldv_work_struct_11_0 ; int ldv_state_variable_7 ; struct drm_connector *intel_dp_mst_connector_helper_funcs_group0 ; struct dma_buf *i915_dmabuf_ops_group0 ; int ldv_state_variable_164 ; struct drm_device *driver_group3 ; int ldv_state_variable_119 ; struct inode *i915_ring_stop_fops_group1 ; struct drm_connector *intel_dvo_connector_helper_funcs_group0 ; int ldv_state_variable_174 ; struct i915_power_well *chv_dpio_cmn_power_well_ops_group0 ; int ldv_work_4_0 ; struct work_struct *ldv_work_struct_2_1 ; struct work_struct *ldv_work_struct_12_3 ; struct inode *i915_dpcd_fops_group1 ; int ldv_state_variable_26 ; struct file *i915_drop_caches_fops_group2 ; struct work_struct *ldv_work_struct_17_1 ; struct work_struct *ldv_work_struct_7_0 ; int LDV_IN_INTERRUPT = 1; struct timer_list *ldv_timer_list_22_1 ; int ldv_state_variable_155 ; int ldv_state_variable_58 ; int ldv_state_variable_188 ; struct trace_event_call *event_class_i915_gem_ring_dispatch_group0 ; int ldv_timer_21_0 ; int ldv_work_5_2 ; struct trace_event_call *event_class_i915_gem_object_pwrite_group0 ; int ldv_state_variable_93 ; struct work_struct *ldv_work_struct_19_0 ; int ldv_state_variable_186 ; int ldv_state_variable_177 ; int ldv_state_variable_31 ; int ldv_state_variable_96 ; int ldv_state_variable_141 ; int ldv_state_variable_68 ; int ldv_work_2_1 ; struct inode *i915_pipe_crc_fops_group1 ; struct bin_attribute *dpf_attrs_1_group2 ; int ldv_state_variable_15 ; struct mipi_dsi_device *intel_dsi_host_ops_group0 ; struct i915_power_well *i9xx_always_on_power_well_ops_group0 ; struct file *dpf_attrs_group1 ; struct file *dpf_attrs_1_group1 ; struct work_struct *ldv_work_struct_1_3 ; struct work_struct *ldv_work_struct_18_1 ; struct inode *i915_fbc_fc_fops_group1 ; int ldv_state_variable_187 ; struct file *i915_spr_wm_latency_fops_group2 ; int ldv_state_variable_74 ; int ldv_state_variable_21 ; struct drm_crtc *intel_crtc_funcs_group0 ; struct work_struct *ldv_work_struct_8_0 ; struct drm_property *intel_hdmi_connector_funcs_group0 ; int ldv_state_variable_146 ; int ldv_state_variable_180 ; int ldv_state_variable_69 ; struct work_struct *ldv_work_struct_14_3 ; struct trace_event_call *event_class_i915_vma_unbind_group0 ; struct timer_list *ldv_timer_list_21_3 ; struct work_struct *ldv_work_struct_16_0 ; struct work_struct *ldv_work_struct_11_1 ; struct work_struct *ldv_work_struct_17_0 ; int ldv_state_variable_88 ; int ldv_work_12_2 ; struct work_struct *ldv_work_struct_16_3 ; struct drm_i915_private *hsw_power_well_ops_group1 ; int ldv_state_variable_139 ; int ldv_state_variable_94 ; struct i915_power_well *vlv_display_power_well_ops_group0 ; int ldv_work_20_0 ; struct drm_i915_gem_object *i915_gem_object_ops_group0 ; int ldv_state_variable_110 ; struct drm_property *intel_dp_connector_funcs_group0 ; struct inode *i915_ring_missed_irq_fops_group1 ; int ldv_work_5_3 ; int ldv_state_variable_41 ; int ldv_state_variable_62 ; int ldv_state_variable_40 ; int ldv_timer_22_2 ; struct file *i915_max_freq_fops_group2 ; int ldv_state_variable_10 ; int ldv_state_variable_133 ; struct work_struct *ldv_work_struct_12_0 ; struct timer_list *ldv_timer_list_21_0 ; int ldv_work_4_1 ; int ldv_work_10_1 ; struct drm_display_mode *ns2501_ops_group1 ; int ldv_work_17_1 ; int ldv_state_variable_25 ; int ldv_state_variable_154 ; int ldv_state_variable_79 ; int ldv_state_variable_127 ; struct drm_framebuffer *intel_fb_funcs_group0 ; struct i915_power_well *skl_power_well_ops_group0 ; int ldv_state_variable_183 ; struct device *i915_pm_ops_group1 ; int ldv_work_18_3 ; int ldv_work_2_2 ; struct trace_event_call *event_class_i915_gem_ring_flush_group0 ; int ldv_state_variable_108 ; int ldv_state_variable_32 ; int ldv_work_11_3 ; struct drm_i915_private *chv_dpio_cmn_power_well_ops_group1 ; struct drm_plane *intel_plane_helper_funcs_group0 ; struct drm_property *intel_dp_mst_connector_funcs_group0 ; int ldv_state_variable_181 ; struct drm_framebuffer *intel_plane_helper_funcs_group2 ; struct inode *i915_forcewake_fops_group1 ; struct work_struct *ldv_work_struct_19_1 ; int ldv_state_variable_45 ; int ldv_state_variable_12 ; struct work_struct *ldv_work_struct_13_1 ; int ldv_state_variable_171 ; int ldv_state_variable_95 ; int ldv_state_variable_122 ; struct inode *i915_cache_sharing_fops_group1 ; int ldv_state_variable_22 ; struct timer_list *ldv_timer_list_22_0 ; struct bin_attribute *error_state_attr_group2 ; int ldv_state_variable_147 ; int ldv_work_19_1 ; int ldv_state_variable_61 ; int ldv_work_9_0 ; int ldv_work_6_0 ; int ldv_timer_22_1 ; int ldv_work_18_0 ; int ldv_timer_21_2 ; struct work_struct *ldv_work_struct_8_3 ; int ldv_work_19_3 ; int ldv_state_variable_165 ; struct backlight_device *intel_backlight_device_ops_group0 ; int ldv_state_variable_72 ; struct drm_file *driver_group2 ; int ldv_state_variable_132 ; struct work_struct *ldv_work_struct_20_3 ; struct file *i915_ring_missed_irq_fops_group2 ; int ldv_state_variable_120 ; struct drm_connector *intel_dp_mst_connector_funcs_group1 ; struct trace_event_call *event_class_i915_pipe_update_start_group0 ; int ldv_work_5_0 ; struct bin_attribute *dpf_attrs_group2 ; int ldv_state_variable_50 ; struct drm_connector *intel_hdmi_connector_funcs_group1 ; int ldv_state_variable_84 ; struct pci_dev *i915_pci_driver_group1 ; struct inode *i915_wedged_fops_group1 ; int ldv_state_variable_86 ; int ldv_state_variable_44 ; struct work_struct *ldv_work_struct_17_3 ; struct drm_plane_state const *intel_plane_helper_funcs_group1 ; int ldv_state_variable_116 ; int ldv_state_variable_128 ; int ldv_state_variable_175 ; int ldv_state_variable_39 ; int ldv_state_variable_101 ; struct work_struct *ldv_work_struct_5_1 ; struct drm_display_mode *sil164_ops_group1 ; int ldv_state_variable_56 ; struct drm_i915_gem_object *i915_gem_phys_ops_group0 ; int ldv_state_variable_112 ; int ldv_state_variable_3 ; struct inode *i915_max_freq_fops_group1 ; struct drm_dp_mst_topology_mgr *mst_cbs_group0 ; int ldv_state_variable_135 ; int ldv_work_1_0 ; struct work_struct *ldv_work_struct_11_2 ; struct drm_device *intel_mode_funcs_group0 ; int ldv_state_variable_184 ; int ldv_state_variable_4 ; struct work_struct *ldv_work_struct_9_3 ; int ldv_state_variable_118 ; int ldv_work_9_2 ; int ldv_state_variable_117 ; struct work_struct *ldv_work_struct_6_3 ; struct work_struct *ldv_work_struct_5_2 ; struct drm_i915_gem_object *i915_gem_userptr_ops_group0 ; int ldv_work_9_1 ; struct work_struct *ldv_work_struct_5_3 ; int ldv_state_variable_5 ; struct work_struct *ldv_work_struct_18_3 ; struct i2c_adapter *intel_sdvo_ddc_proxy_group0 ; int ldv_state_variable_13 ; int ldv_state_variable_170 ; int ldv_work_19_0 ; struct trace_event_call *event_class_i915_gem_object_change_domain_group0 ; struct file *i915_cache_sharing_fops_group2 ; int ldv_work_19_2 ; int ldv_state_variable_152 ; int ldv_work_17_3 ; struct drm_connector *intel_hdmi_connector_helper_funcs_group0 ; int ldv_work_7_3 ; int ldv_state_variable_153 ; int ldv_work_12_0 ; int ldv_work_17_2 ; struct work_struct *ldv_work_struct_17_2 ; struct timer_list *ldv_timer_list_21_1 ; struct drm_property *intel_crt_connector_funcs_group0 ; struct drm_connector *intel_dvo_connector_funcs_group0 ; int ldv_state_variable_159 ; int ldv_work_12_1 ; int ldv_state_variable_85 ; struct drm_display_mode *ch7xxx_ops_group1 ; struct timer_list *ldv_timer_list_22_2 ; struct work_struct *ldv_work_struct_10_2 ; int ldv_state_variable_71 ; struct trace_event_call *event_class_i915_context_group0 ; int ldv_state_variable_77 ; struct trace_event_call *event_class_i915_reg_rw_group0 ; struct work_struct *ldv_work_struct_8_2 ; int ldv_state_variable_144 ; struct file *i915_displayport_test_active_fops_group2 ; int ldv_work_4_3 ; int ldv_work_3_1 ; int ldv_state_variable_43 ; int ldv_state_variable_121 ; int ldv_work_5_1 ; int ldv_state_variable_57 ; struct inode *i915_displayport_test_active_fops_group1 ; struct trace_event_call *event_class_i915_flip_complete_group0 ; int ldv_timer_22_0 ; struct kobject *dpf_attrs_1_group0 ; int ldv_state_variable_134 ; struct device *i915_audio_component_bind_ops_group0 ; int ldv_state_variable_167 ; int ldv_state_variable_185 ; int ldv_work_14_1 ; struct trace_event_call *event_class_switch_mm_group0 ; int ldv_state_variable_129 ; int ldv_work_8_2 ; struct drm_plane_state *intel_plane_helper_funcs_group3 ; int ldv_state_variable_34 ; struct trace_event_call *event_class_i915_gem_object_pread_group0 ; int ldv_work_15_3 ; int ldv_work_2_3 ; struct drm_connector *intel_lvds_connector_helper_funcs_group0 ; struct work_struct *ldv_work_struct_11_3 ; void ldv_file_operations_144(void) ; void ldv_initialize_drm_connector_funcs_50(void) ; void ldv_initialize_bin_attribute_171(void) ; void ldv_initialize_intel_dvo_dev_ops_64(void) ; void ldv_initialize_drm_connector_helper_funcs_56(void) ; void ldv_initialize_drm_i915_gem_object_ops_140(void) ; void ldv_initialize_drm_crtc_funcs_71(void) ; void ldv_pci_driver_187(void) ; void ldv_initialize_i915_power_well_ops_165(void) ; void ldv_initialize_trace_event_class_77(void) ; void ldv_file_operations_160(void) ; void work_init_1(void) ; void ldv_initialize_drm_connector_funcs_30(void) ; void call_and_disable_all_18(int state ) ; void ldv_initialize_trace_event_class_103(void) ; void ldv_initialize_mipi_dsi_host_ops_46(void) ; void work_init_19(void) ; void ldv_file_operations_154(void) ; void ldv_initialize_trace_event_class_93(void) ; void ldv_initialize_vga_switcheroo_client_ops_23(void) ; void ldv_initialize_trace_event_class_82(void) ; void ldv_initialize_i2c_algorithm_27(void) ; void ldv_file_operations_153(void) ; void ldv_initialize_trace_event_class_76(void) ; void ldv_initialize_drm_encoder_funcs_51(void) ; void ldv_initialize_drm_connector_helper_funcs_34(void) ; void ldv_initialize_drm_driver_188(void) ; void ldv_initialize_intel_dvo_dev_ops_63(void) ; void ldv_file_operations_156(void) ; void work_init_10(void) ; void ldv_file_operations_143(void) ; void ldv_initialize_drm_connector_funcs_38(void) ; void disable_work_18(struct work_struct *work ) ; void ldv_initialize_bin_attribute_180(void) ; void ldv_initialize_trace_event_class_86(void) ; void ldv_initialize_trace_event_class_83(void) ; void ldv_initialize_drm_connector_helper_funcs_44(void) ; void ldv_file_operations_155(void) ; void ldv_dev_pm_ops_191(void) ; void ldv_initialize_trace_event_class_94(void) ; void ldv_initialize_trace_event_class_91(void) ; void ldv_initialize_i915_power_well_ops_163(void) ; void ldv_initialize_component_ops_73(void) ; void ldv_initialize_i915_power_well_ops_170(void) ; void ldv_initialize_vm_operations_struct_190(void) ; void ldv_file_operations_149(void) ; void activate_work_18(struct work_struct *work , int state ) ; void ldv_initialize_drm_connector_helper_funcs_25(void) ; void ldv_initialize_drm_framebuffer_funcs_69(void) ; void ldv_initialize_trace_event_class_89(void) ; void ldv_initialize_drm_mode_config_funcs_68(void) ; void ldv_initialize_fb_ops_66(void) ; void ldv_initialize_device_attribute_177(void) ; void ldv_initialize_trace_event_class_104(void) ; void ldv_initialize_drm_connector_funcs_26(void) ; void ldv_initialize_trace_event_class_95(void) ; void ldv_initialize_drm_i915_gem_object_ops_139(void) ; void ldv_initialize_trace_event_class_85(void) ; void ldv_initialize_trace_event_class_88(void) ; void ldv_initialize_i915_power_well_ops_168(void) ; void ldv_file_operations_147(void) ; void ldv_initialize_drm_i915_gem_object_ops_138(void) ; void ldv_initialize_drm_panel_funcs_42(void) ; void work_init_15(void) ; void ldv_initialize_trace_event_class_96(void) ; void ldv_initialize_device_attribute_176(void) ; void ldv_initialize_drm_connector_funcs_41(void) ; void work_init_5(void) ; void work_init_9(void) ; void ldv_initialize_trace_event_class_84(void) ; void ldv_file_operations_158(void) ; void ldv_initialize_drm_fb_helper_funcs_65(void) ; void ldv_initialize_trace_event_class_90(void) ; void ldv_initialize_trace_event_class_101(void) ; void ldv_initialize_drm_connector_funcs_53(void) ; void ldv_initialize_intel_dvo_dev_ops_60(void) ; void ldv_initialize_trace_event_class_97(void) ; void ldv_initialize_bin_attribute_181(void) ; void ldv_initialize_trace_event_class_78(void) ; void work_init_20(void) ; void ldv_file_operations_161(void) ; void ldv_initialize_drm_connector_helper_funcs_37(void) ; void work_init_8(void) ; void ldv_initialize_trace_event_class_75(void) ; void ldv_initialize_dma_buf_ops_141(void) ; void work_init_14(void) ; void ldv_initialize_drm_plane_funcs_70(void) ; void ldv_initialize_i915_power_well_ops_169(void) ; void ldv_initialize_trace_event_class_102(void) ; void ldv_file_operations_148(void) ; void ldv_initialize_drm_connector_funcs_57(void) ; void ldv_file_operations_151(void) ; void ldv_initialize_drm_connector_helper_funcs_49(void) ; void ldv_initialize_drm_plane_helper_funcs_58(void) ; void ldv_file_operations_150(void) ; void work_init_13(void) ; void work_init_4(void) ; void ldv_file_operations_145(void) ; void ldv_initialize_drm_connector_helper_funcs_40(void) ; void ldv_file_operations_157(void) ; void ldv_initialize_drm_connector_helper_funcs_29(void) ; void work_init_16(void) ; void ldv_file_operations_189(void) ; void ldv_initialize_intel_dvo_dev_ops_62(void) ; void work_init_3(void) ; void work_init_11(void) ; void ldv_file_operations_142(void) ; void ldv_initialize_i2c_algorithm_35(void) ; void ldv_initialize_trace_event_class_81(void) ; void ldv_file_operations_152(void) ; void ldv_initialize_i915_power_well_ops_166(void) ; void ldv_initialize_trace_event_class_98(void) ; void ldv_file_operations_162(void) ; void ldv_initialize_backlight_ops_31(void) ; void work_init_7(void) ; void ldv_initialize_drm_crtc_helper_funcs_72(void) ; void ldv_initialize_drm_connector_helper_funcs_52(void) ; void ldv_initialize_drm_i915_gem_object_ops_137(void) ; void ldv_initialize_drm_connector_funcs_43(void) ; void ldv_file_operations_159(void) ; void work_init_17(void) ; void timer_init_22(void) ; void ldv_initialize_trace_event_class_87(void) ; void ldv_initialize_intel_dvo_dev_ops_61(void) ; void ldv_initialize_i915_power_well_ops_164(void) ; void ldv_initialize_drm_connector_funcs_33(void) ; void work_init_2(void) ; void ldv_initialize_intel_dvo_dev_ops_59(void) ; void work_init_6(void) ; void work_init_12(void) ; void ldv_file_operations_146(void) ; void ldv_initialize_i915_audio_component_ops_74(void) ; void ldv_initialize_i915_power_well_ops_167(void) ; void ldv_initialize_trace_event_class_80(void) ; void ldv_initialize_trace_event_class_99(void) ; void work_init_18(void) ; void ldv_initialize_drm_dp_mst_topology_cbs_47(void) ; void ldv_initialize_drm_i915_gem_object_ops_135(void) ; void timer_init_21(void) ; void ldv_initialize_trace_event_class_100(void) ; void ldv_initialize_trace_event_class_92(void) ; void ldv_initialize_trace_event_class_79(void) ; __inline static void *dev_get_drvdata(struct device const *dev ) { { return ((void *)dev->driver_data); } } extern void dev_err(struct device const * , char const * , ...) ; extern loff_t noop_llseek(struct file * , loff_t , int ) ; extern u32 acpi_target_system_state(void) ; extern void pci_dev_put(struct pci_dev * ) ; extern struct pci_dev *pci_get_class(unsigned int , struct pci_dev * ) ; extern int pci_enable_device(struct pci_dev * ) ; extern void pci_disable_device(struct pci_dev * ) ; extern void pci_set_master(struct pci_dev * ) ; extern int pci_save_state(struct pci_dev * ) ; extern int pci_set_power_state(struct pci_dev * , pci_power_t ) ; __inline static void *pci_get_drvdata(struct pci_dev *pdev ) { void *tmp ; { tmp = dev_get_drvdata((struct device const *)(& pdev->dev)); return (tmp); } } extern atomic_t kgdb_active ; extern void drm_modeset_lock_all(struct drm_device * ) ; extern void drm_modeset_unlock_all(struct drm_device * ) ; extern void drm_mode_config_reset(struct drm_device * ) ; extern void usleep_range(unsigned long , unsigned long ) ; extern void drm_ut_debug_printk(char const * , char const * , ...) ; extern void drm_err(char const * , ...) ; extern long drm_ioctl(struct file * , unsigned int , unsigned long ) ; extern int drm_open(struct inode * , struct file * ) ; extern ssize_t drm_read(struct file * , char * , size_t , loff_t * ) ; extern int drm_release(struct inode * , struct file * ) ; extern unsigned int drm_poll(struct file * , struct poll_table_struct * ) ; extern void drm_put_dev(struct drm_device * ) ; extern unsigned int drm_debug ; extern int drm_gem_prime_handle_to_fd(struct drm_device * , struct drm_file * , uint32_t , uint32_t , int * ) ; extern int drm_gem_prime_fd_to_handle(struct drm_device * , struct drm_file * , int , uint32_t * ) ; extern int drm_pci_init(struct drm_driver * , struct pci_driver * ) ; extern void drm_pci_exit(struct drm_driver * , struct pci_driver * ) ; extern int drm_get_pci_dev(struct pci_dev * , struct pci_device_id const * , struct drm_driver * ) ; extern int drm_pci_set_busid(struct drm_device * , struct drm_master * ) ; __inline static bool drm_can_sleep(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_43356; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_43356; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_43356; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_43356; default: __bad_percpu_size(); } ldv_43356: pscr_ret__ = pfo_ret__; goto ldv_43362; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_43366; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_43366; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_43366; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_43366; default: __bad_percpu_size(); } ldv_43366: pscr_ret__ = pfo_ret_____0; goto ldv_43362; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_43375; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_43375; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_43375; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_43375; default: __bad_percpu_size(); } ldv_43375: pscr_ret__ = pfo_ret_____1; goto ldv_43362; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_43384; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_43384; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_43384; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_43384; default: __bad_percpu_size(); } ldv_43384: pscr_ret__ = pfo_ret_____2; goto ldv_43362; default: __bad_size_call_parameter(); goto ldv_43362; } ldv_43362: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } void i915_gem_suspend_gtt_mappings(struct drm_device *dev ) ; void i915_gem_restore_gtt_mappings(struct drm_device *dev ) ; extern int intel_iommu_gfx_mapped ; extern void drm_gem_vm_open(struct vm_area_struct * ) ; extern void drm_gem_vm_close(struct vm_area_struct * ) ; extern int drm_gem_mmap(struct file * , struct vm_area_struct * ) ; extern int drm_gem_dumb_destroy(struct drm_file * , struct drm_device * , uint32_t ) ; __inline static struct drm_i915_private *to_i915(struct drm_device const *dev ) { { return ((struct drm_i915_private *)dev->dev_private); } } __inline static struct drm_i915_private *dev_to_i915(struct device *dev ) { void *tmp ; struct drm_i915_private *tmp___0 ; { tmp = dev_get_drvdata((struct device const *)dev); tmp___0 = to_i915((struct drm_device const *)tmp); return (tmp___0); } } extern bool drm_helper_hpd_irq_event(struct drm_device * ) ; extern void drm_kms_helper_poll_disable(struct drm_device * ) ; extern void drm_kms_helper_poll_enable(struct drm_device * ) ; void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv ) ; void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv ) ; void intel_prepare_ddi(struct drm_device *dev ) ; void intel_crtc_control(struct drm_crtc *crtc , bool enable ) ; void hsw_enable_pc8(struct drm_i915_private *dev_priv ) ; void hsw_disable_pc8(struct drm_i915_private *dev_priv ) ; void broxton_init_cdclk(struct drm_device *dev ) ; void broxton_uninit_cdclk(struct drm_device *dev ) ; void broxton_ddi_phy_init(struct drm_device *dev ) ; void broxton_ddi_phy_uninit(struct drm_device *dev ) ; void bxt_enable_dc9(struct drm_i915_private *dev_priv ) ; void bxt_disable_dc9(struct drm_i915_private *dev_priv ) ; void skl_init_cdclk(struct drm_i915_private *dev_priv ) ; void skl_uninit_cdclk(struct drm_i915_private *dev_priv ) ; void intel_csr_load_status_set(struct drm_i915_private *dev_priv , enum csr_state state ) ; void intel_csr_load_program(struct drm_device *dev ) ; void intel_dp_mst_suspend(struct drm_device *dev ) ; void intel_dp_mst_resume(struct drm_device *dev ) ; void intel_fbdev_set_suspend(struct drm_device *dev , int state , bool synchronous ) ; void intel_overlay_reset(struct drm_i915_private *dev_priv ) ; void intel_power_domains_init_hw(struct drm_i915_private *dev_priv ) ; void intel_display_set_init_power(struct drm_i915_private *dev_priv , bool enable ) ; void intel_init_clock_gating(struct drm_device *dev ) ; void intel_suspend_hw(struct drm_device *dev ) ; void intel_enable_gt_powersave(struct drm_device *dev ) ; void intel_suspend_gt_powersave(struct drm_device *dev ) ; void intel_reset_gt_powersave(struct drm_device *dev ) ; void gen6_update_ring_freq(struct drm_device *dev ) ; struct drm_ioctl_desc const i915_ioctls[54U] ; int i915_max_ioctl ; int i915_suspend_legacy(struct drm_device *dev , pm_message_t state ) ; int i915_resume_legacy(struct drm_device *dev ) ; struct i915_params i915 ; int i915_driver_load(struct drm_device *dev , unsigned long flags ) ; int i915_driver_unload(struct drm_device *dev ) ; int i915_driver_open(struct drm_device *dev , struct drm_file *file ) ; void i915_driver_lastclose(struct drm_device *dev ) ; void i915_driver_preclose(struct drm_device *dev , struct drm_file *file ) ; void i915_driver_postclose(struct drm_device *dev , struct drm_file *file ) ; int i915_driver_device_is_agp(struct drm_device *dev ) ; long i915_compat_ioctl(struct file *filp , unsigned int cmd , unsigned long arg ) ; int intel_gpu_reset(struct drm_device *dev ) ; int i915_reset(struct drm_device *dev ) ; int vlv_force_gfx_clock(struct drm_i915_private *dev_priv , bool force_on ) ; void intel_hpd_cancel_work(struct drm_i915_private *dev_priv ) ; void i915_firmware_load_error_print(char const *fw_path , int err ) ; void intel_hpd_init(struct drm_i915_private *dev_priv ) ; void intel_uncore_sanitize(struct drm_device *dev ) ; void intel_uncore_early_sanitize(struct drm_device *dev , bool restore_forcewake ) ; void intel_uncore_forcewake_reset(struct drm_device *dev , bool restore ) ; void assert_forcewakes_inactive(struct drm_i915_private *dev_priv ) ; void i915_gem_free_object(struct drm_gem_object *gem_obj ) ; void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv ) ; int i915_gem_dumb_create(struct drm_file *file , struct drm_device *dev , struct drm_mode_create_dumb *args ) ; int i915_gem_mmap_gtt(struct drm_file *file , struct drm_device *dev , uint32_t handle , uint64_t *offset ) ; __inline static bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv ) { { return ((bool )(dev_priv->gpu_error.stop_rings == 0U || (dev_priv->gpu_error.stop_rings & 1073741824U) != 0U)); } } void i915_gem_reset(struct drm_device *dev ) ; int i915_gem_init_hw(struct drm_device *dev ) ; void i915_gem_init_swizzling(struct drm_device *dev ) ; int i915_gem_suspend(struct drm_device *dev ) ; int i915_gem_fault(struct vm_area_struct *vma , struct vm_fault *vmf ) ; struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev , struct dma_buf *dma_buf ) ; struct dma_buf *i915_gem_prime_export(struct drm_device *dev , struct drm_gem_object *gem_obj , int flags ) ; void i915_gem_restore_fences(struct drm_device *dev ) ; int i915_debugfs_init(struct drm_minor *minor ) ; void i915_debugfs_cleanup(struct drm_minor *minor ) ; int i915_save_state(struct drm_device *dev ) ; int i915_restore_state(struct drm_device *dev ) ; int intel_opregion_setup(struct drm_device *dev ) ; void intel_opregion_init(struct drm_device *dev ) ; void intel_opregion_fini(struct drm_device *dev ) ; int intel_opregion_notify_adapter(struct drm_device *dev , pci_power_t state ) ; void intel_modeset_init_hw(struct drm_device *dev ) ; void intel_modeset_setup_hw_state(struct drm_device *dev , bool force_restore ) ; void intel_init_pch_refclk(struct drm_device *dev ) ; void intel_detect_pch(struct drm_device *dev ) ; int intel_enable_rc6(struct drm_device const *dev ) ; bool i915_semaphore_is_enabled(struct drm_device *dev ) ; extern bool vgacon_text_force(void) ; __inline static void pm_runtime_mark_last_busy(struct device *dev ) { unsigned long __var ; { __var = 0UL; *((unsigned long volatile *)(& dev->power.last_busy)) = jiffies; return; } } static struct drm_driver driver ; static struct intel_device_info const intel_i830_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 2U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_845g_info = {0U, (unsigned short)0, 1U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 2U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_i85x_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 2U, 1U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_i865g_info = {0U, (unsigned short)0, 1U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 2U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_i915g_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 3U, 1U, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_i915gm_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 3U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_i945g_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 3U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_i945gm_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 3U, 1U, 1U, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, 1U, 1U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_i965g_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 4U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_i965gm_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 4U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_g33_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 3U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_g45_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 4U, 3U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_gm45_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 4U, 3U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_pineview_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 3U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_ironlake_d_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 5U, 3U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_ironlake_m_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 5U, 3U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_sandybridge_d_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 6U, 7U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_sandybridge_m_info = {0U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 6U, 7U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_ivybridge_d_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 7U, 7U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_ivybridge_m_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 7U, 7U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_ivybridge_q_info = {0U, (unsigned short)0, 0U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 7U, 7U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_valleyview_m_info = {1572864U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 7U, 7U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, 0U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 0U, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_valleyview_d_info = {1572864U, (unsigned short)0, 2U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 7U, 7U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, 0U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 0U, (unsigned char)0, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_haswell_d_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 7U, 15U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_haswell_m_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 7U, 15U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_broadwell_d_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 8U, 15U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_broadwell_m_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 8U, 15U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_broadwell_gt3d_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 8U, 31U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_broadwell_gt3m_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 8U, 31U, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_cherryview_info = {1572864U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 8U, 15U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {458752, 462848, 475136}, {393216, 397312, 405504}, {40960, 43008, 49152}, {458880, 458944, 458976}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_skylake_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 9U, 15U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_skylake_gt3_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 9U, 31U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct intel_device_info const intel_broxton_info = {0U, (unsigned short)0, 3U, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, 9U, 15U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, 1U, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, {458752, 462848, 466944, 520192}, {393216, 397312, 401408, 454656}, {40960, 43008}, {458880, 462976, 467072}, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, {(unsigned char)0, (unsigned char)0, (unsigned char)0}, (unsigned char)0, (unsigned char)0, (unsigned char)0}; static struct pci_device_id const pciidlist[152U] = { {32902U, 13687U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i830_info)}, {32902U, 9570U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_845g_info)}, {32902U, 13698U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i85x_info)}, {32902U, 13710U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i85x_info)}, {32902U, 9586U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i865g_info)}, {32902U, 9602U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i915g_info)}, {32902U, 9610U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i915g_info)}, {32902U, 9618U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i915gm_info)}, {32902U, 10098U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i945g_info)}, {32902U, 10146U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i945gm_info)}, {32902U, 10158U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i945gm_info)}, {32902U, 10610U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i965g_info)}, {32902U, 10626U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i965g_info)}, {32902U, 10642U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i965g_info)}, {32902U, 10658U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i965g_info)}, {32902U, 10674U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_g33_info)}, {32902U, 10690U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_g33_info)}, {32902U, 10706U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_g33_info)}, {32902U, 10754U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i965gm_info)}, {32902U, 10770U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_i965gm_info)}, {32902U, 10818U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_gm45_info)}, {32902U, 11778U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_g45_info)}, {32902U, 11794U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_g45_info)}, {32902U, 11810U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_g45_info)}, {32902U, 11826U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_g45_info)}, {32902U, 11842U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_g45_info)}, {32902U, 11922U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_g45_info)}, {32902U, 40961U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_pineview_info)}, {32902U, 40977U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_pineview_info)}, {32902U, 66U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_ironlake_d_info)}, {32902U, 70U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_ironlake_m_info)}, {32902U, 258U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_sandybridge_d_info)}, {32902U, 274U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_sandybridge_d_info)}, {32902U, 290U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_sandybridge_d_info)}, {32902U, 266U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_sandybridge_d_info)}, {32902U, 262U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_sandybridge_m_info)}, {32902U, 278U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_sandybridge_m_info)}, {32902U, 294U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_sandybridge_m_info)}, {32902U, 362U, 5421U, 35216U, 196608U, 16711680U, (unsigned long )(& intel_ivybridge_q_info)}, {32902U, 342U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_ivybridge_m_info)}, {32902U, 358U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_ivybridge_m_info)}, {32902U, 338U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_ivybridge_d_info)}, {32902U, 354U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_ivybridge_d_info)}, {32902U, 346U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_ivybridge_d_info)}, {32902U, 362U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_ivybridge_d_info)}, {32902U, 1026U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1042U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1058U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1034U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1050U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1066U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1035U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1051U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1067U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1038U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1054U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1070U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3074U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3090U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3106U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3082U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3098U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3114U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3083U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3099U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3115U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3086U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3102U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3118U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 2562U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 2578U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 2594U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 2570U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 2586U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 2602U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 2571U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 2587U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 2603U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3330U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3346U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3362U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3338U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3354U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3370U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3339U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3355U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3371U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3342U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3358U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 3374U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_d_info)}, {32902U, 1030U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 1046U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 1062U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 3078U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 3094U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 3110U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 2566U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 2582U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 2598U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 2574U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 2590U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 2606U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 3334U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 3350U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 3366U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_haswell_m_info)}, {32902U, 3888U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_valleyview_m_info)}, {32902U, 3889U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_valleyview_m_info)}, {32902U, 3890U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_valleyview_m_info)}, {32902U, 3891U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_valleyview_m_info)}, {32902U, 343U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_valleyview_m_info)}, {32902U, 341U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_valleyview_d_info)}, {32902U, 5634U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_m_info)}, {32902U, 5638U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_m_info)}, {32902U, 5643U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_m_info)}, {32902U, 5646U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_m_info)}, {32902U, 5650U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_m_info)}, {32902U, 5654U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_m_info)}, {32902U, 5659U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_m_info)}, {32902U, 5662U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_m_info)}, {32902U, 5642U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_d_info)}, {32902U, 5645U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_d_info)}, {32902U, 5658U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_d_info)}, {32902U, 5661U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_d_info)}, {32902U, 5666U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_gt3m_info)}, {32902U, 5670U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_gt3m_info)}, {32902U, 5675U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_gt3m_info)}, {32902U, 5678U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_gt3m_info)}, {32902U, 5674U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_gt3d_info)}, {32902U, 5677U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broadwell_gt3d_info)}, {32902U, 8880U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_cherryview_info)}, {32902U, 8881U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_cherryview_info)}, {32902U, 8882U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_cherryview_info)}, {32902U, 8883U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_cherryview_info)}, {32902U, 6406U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6414U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6402U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6411U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6410U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6422U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6433U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6430U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6418U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6427U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6426U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6429U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_info)}, {32902U, 6438U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_gt3_info)}, {32902U, 6443U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_gt3_info)}, {32902U, 6442U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_skylake_gt3_info)}, {32902U, 2692U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broxton_info)}, {32902U, 6788U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broxton_info)}, {32902U, 23172U, 4294967295U, 4294967295U, 196608U, 16711680U, (unsigned long )(& intel_broxton_info)}, {0U, 0U, 0U, 0U, 0U, 0U, 0UL}}; struct pci_device_id const __mod_pci__pciidlist_device_table[152U] ; void intel_detect_pch(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct pci_dev *pch ; struct drm_i915_private *__p ; unsigned short id ; long tmp ; int __ret_warn_on ; struct drm_i915_private *__p___0 ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; int __ret_warn_on___1 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int tmp___5 ; long tmp___6 ; long tmp___7 ; int __ret_warn_on___2 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; int tmp___8 ; long tmp___9 ; int __ret_warn_on___3 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; struct drm_i915_private *__p___12 ; struct drm_i915_private *__p___13 ; struct drm_i915_private *__p___14 ; int tmp___10 ; long tmp___11 ; long tmp___12 ; int __ret_warn_on___4 ; struct drm_i915_private *__p___15 ; struct drm_i915_private *__p___16 ; struct drm_i915_private *__p___17 ; int tmp___13 ; long tmp___14 ; int __ret_warn_on___5 ; struct drm_i915_private *__p___18 ; struct drm_i915_private *__p___19 ; struct drm_i915_private *__p___20 ; struct drm_i915_private *__p___21 ; struct drm_i915_private *__p___22 ; struct drm_i915_private *__p___23 ; struct drm_i915_private *__p___24 ; int tmp___15 ; long tmp___16 ; long tmp___17 ; int __ret_warn_on___6 ; struct drm_i915_private *__p___25 ; long tmp___18 ; long tmp___19 ; int __ret_warn_on___7 ; struct drm_i915_private *__p___26 ; long tmp___20 ; long tmp___21 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pch = (struct pci_dev *)0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 38UL) == 0U) { dev_priv->pch_type = 5; return; } else { } goto ldv_52295; ldv_52297: ; if ((unsigned int )pch->vendor == 32902U) { id = (unsigned int )pch->device & 65280U; dev_priv->pch_id = id; if ((unsigned int )id == 15104U) { dev_priv->pch_type = 1; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_detect_pch", "Found Ibex Peak PCH\n"); } else { } __p___0 = to_i915((struct drm_device const *)dev); __ret_warn_on = (unsigned int )((unsigned char )__p___0->info.gen) != 5U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 479, "WARN_ON(!IS_GEN5(dev))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else if ((unsigned int )id == 7168U) { dev_priv->pch_type = 2; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_detect_pch", "Found CougarPoint PCH\n"); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) != 6U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } __ret_warn_on___0 = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 483, "WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else if ((unsigned int )id == 7680U) { dev_priv->pch_type = 2; tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_detect_pch", "Found PantherPoint PCH\n"); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) != 6U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { tmp___5 = 1; } else { tmp___5 = 0; } } else { tmp___5 = 0; } __ret_warn_on___1 = tmp___5; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 488, "WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)))"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); } else if ((unsigned int )id == 35840U) { dev_priv->pch_type = 3; tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_detect_pch", "Found LynxPoint PCH\n"); } else { } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U) { tmp___8 = 1; } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) != 8U) { tmp___8 = 1; } else { tmp___8 = 0; } } } else { tmp___8 = 0; } __ret_warn_on___2 = tmp___8; tmp___9 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 492, "WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev))"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { __p___9 = to_i915((struct drm_device const *)dev); if (((int )__p___9->info.device_id & 65280) == 2560) { tmp___10 = 1; } else { goto _L; } } else { _L: /* CIL Label */ __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 45UL) == 0U) { __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___11->info.gen) == 8U) { __p___12 = to_i915((struct drm_device const *)dev); if (((int )__p___12->info.device_id & 15) == 6) { tmp___10 = 1; } else { __p___13 = to_i915((struct drm_device const *)dev); if (((int )__p___13->info.device_id & 15) == 11) { tmp___10 = 1; } else { __p___14 = to_i915((struct drm_device const *)dev); if (((int )__p___14->info.device_id & 15) == 14) { tmp___10 = 1; } else { tmp___10 = 0; } } } } else { tmp___10 = 0; } } else { tmp___10 = 0; } } __ret_warn_on___3 = tmp___10; tmp___11 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___11 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 493, "WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev))"); } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); } else if ((unsigned int )id == 39936U) { dev_priv->pch_type = 3; tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("intel_detect_pch", "Found LynxPoint LP PCH\n"); } else { } __p___15 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___15 + 45UL) == 0U) { __p___16 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___16 + 45UL) != 0U) { tmp___13 = 1; } else { __p___17 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___17->info.gen) != 8U) { tmp___13 = 1; } else { tmp___13 = 0; } } } else { tmp___13 = 0; } __ret_warn_on___4 = tmp___13; tmp___14 = ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); if (tmp___14 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 497, "WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev))"); } else { } ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); __p___18 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___18 + 45UL) == 0U) { goto _L___0; } else { __p___19 = to_i915((struct drm_device const *)dev); if (((int )__p___19->info.device_id & 65280) != 2560) { _L___0: /* CIL Label */ __p___20 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___20 + 45UL) != 0U) { tmp___15 = 1; } else { __p___21 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___21->info.gen) != 8U) { tmp___15 = 1; } else { __p___22 = to_i915((struct drm_device const *)dev); if (((int )__p___22->info.device_id & 15) != 6) { __p___23 = to_i915((struct drm_device const *)dev); if (((int )__p___23->info.device_id & 15) != 11) { __p___24 = to_i915((struct drm_device const *)dev); if (((int )__p___24->info.device_id & 15) != 14) { tmp___15 = 1; } else { tmp___15 = 0; } } else { tmp___15 = 0; } } else { tmp___15 = 0; } } } } else { tmp___15 = 0; } } __ret_warn_on___5 = tmp___15; tmp___16 = ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); if (tmp___16 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 498, "WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev))"); } else { } ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); } else if ((unsigned int )id == 41216U) { dev_priv->pch_type = 4; tmp___17 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___17 != 0L) { drm_ut_debug_printk("intel_detect_pch", "Found SunrisePoint PCH\n"); } else { } __p___25 = to_i915((struct drm_device const *)dev); __ret_warn_on___6 = (unsigned int )*((unsigned char *)__p___25 + 45UL) == 0U; tmp___18 = ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); if (tmp___18 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 502, "WARN_ON(!IS_SKYLAKE(dev))"); } else { } ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); } else if ((unsigned int )id == 40192U) { dev_priv->pch_type = 4; tmp___19 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___19 != 0L) { drm_ut_debug_printk("intel_detect_pch", "Found SunrisePoint LP PCH\n"); } else { } __p___26 = to_i915((struct drm_device const *)dev); __ret_warn_on___7 = (unsigned int )*((unsigned char *)__p___26 + 45UL) == 0U; tmp___20 = ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); if (tmp___20 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 506, "WARN_ON(!IS_SKYLAKE(dev))"); } else { } ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); } else { goto ldv_52295; } goto ldv_52296; } else { } ldv_52295: pch = pci_get_class(393472U, pch); if ((unsigned long )pch != (unsigned long )((struct pci_dev *)0)) { goto ldv_52297; } else { } ldv_52296: ; if ((unsigned long )pch == (unsigned long )((struct pci_dev *)0)) { tmp___21 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___21 != 0L) { drm_ut_debug_printk("intel_detect_pch", "No PCH found.\n"); } else { } } else { } pci_dev_put(pch); return; } } bool i915_semaphore_is_enabled(struct drm_device *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return (0); } else { } if (i915.semaphores >= 0) { return (i915.semaphores != 0); } else { } if (i915.enable_execlists != 0) { return (0); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { return (0); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 6U && intel_iommu_gfx_mapped != 0) { return (0); } else { } return (1); } } void intel_hpd_cancel_work(struct drm_i915_private *dev_priv ) { { spin_lock_irq(& dev_priv->irq_lock); dev_priv->long_hpd_port_mask = 0U; dev_priv->short_hpd_port_mask = 0U; dev_priv->hpd_event_bits = 0U; spin_unlock_irq(& dev_priv->irq_lock); ldv_cancel_work_sync_10(& dev_priv->dig_port_work); ldv_cancel_work_sync_11(& dev_priv->hotplug_work); ldv_cancel_delayed_work_sync_12(& dev_priv->hotplug_reenable_work); return; } } void i915_firmware_load_error_print(char const *fw_path , int err ) { { drm_err("failed to load firmware %s (%d)\n", fw_path, err); if (err == 0) { err = -2; } else { } return; drm_err("The driver is built-in, so to load the firmware you need to\ninclude it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\nin your initrd/initramfs image.\n"); } } static void intel_suspend_encoders(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct drm_encoder *encoder ; struct list_head const *__mptr ; struct intel_encoder *intel_encoder ; struct drm_encoder const *__mptr___0 ; struct list_head const *__mptr___1 ; { dev = dev_priv->dev; drm_modeset_lock_all(dev); __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct drm_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_52339; ldv_52338: __mptr___0 = (struct drm_encoder const *)encoder; intel_encoder = (struct intel_encoder *)__mptr___0; if ((unsigned long )intel_encoder->suspend != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(intel_encoder->suspend))(intel_encoder); } else { } __mptr___1 = (struct list_head const *)encoder->head.next; encoder = (struct drm_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_52339: ; if ((unsigned long )(& encoder->head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52338; } else { } drm_modeset_unlock_all(dev); return; } } static int intel_suspend_complete(struct drm_i915_private *dev_priv ) ; static int vlv_resume_prepare(struct drm_i915_private *dev_priv , bool rpm_resume ) ; static int skl_resume_prepare(struct drm_i915_private *dev_priv ) ; static int bxt_resume_prepare(struct drm_i915_private *dev_priv ) ; static int i915_drm_suspend(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; pci_power_t opregion_target_state ; int error ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; u32 tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev_priv->modeset_restore_lock, 0U); dev_priv->modeset_restore = 2; mutex_unlock(& dev_priv->modeset_restore_lock); intel_display_set_init_power(dev_priv, 1); drm_kms_helper_poll_disable(dev); pci_save_state(dev->pdev); error = i915_gem_suspend(dev); if (error != 0) { dev_err((struct device const *)(& (dev->pdev)->dev), "GEM idle failed, resume might fail\n"); return (error); } else { } intel_suspend_gt_powersave(dev); drm_modeset_lock_all(dev); __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_52362; ldv_52361: intel_crtc_control(crtc, 0); __mptr___0 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_52362: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_52361; } else { } drm_modeset_unlock_all(dev); intel_dp_mst_suspend(dev); intel_runtime_pm_disable_interrupts(dev_priv); intel_hpd_cancel_work(dev_priv); intel_suspend_encoders(dev_priv); intel_suspend_hw(dev); i915_gem_suspend_gtt_mappings(dev); i915_save_state(dev); opregion_target_state = 4; tmp = acpi_target_system_state(); if (tmp <= 2U) { opregion_target_state = 1; } else { } intel_opregion_notify_adapter(dev, opregion_target_state); intel_uncore_forcewake_reset(dev, 0); intel_opregion_fini(dev); intel_fbdev_set_suspend(dev, 1, 1); dev_priv->suspend_count = dev_priv->suspend_count + 1U; intel_display_set_init_power(dev_priv, 0); return (0); } } static int i915_drm_suspend_late(struct drm_device *drm_dev , bool hibernation ) { struct drm_i915_private *dev_priv ; int ret ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)drm_dev->dev_private; ret = intel_suspend_complete(dev_priv); if (ret != 0) { drm_err("Suspend complete failed: %d\n", ret); return (ret); } else { } pci_disable_device(drm_dev->pdev); if (! hibernation || (unsigned int )(drm_dev->pdev)->subsystem_vendor != 6058U) { pci_set_power_state(drm_dev->pdev, 3); } else { __p = dev_priv; if ((unsigned int )((unsigned char )__p->info.gen) != 4U) { pci_set_power_state(drm_dev->pdev, 3); } else { } } return (0); } } int i915_suspend_legacy(struct drm_device *dev , pm_message_t state ) { int error ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; int tmp___3 ; { if ((unsigned long )dev == (unsigned long )((struct drm_device *)0) || (unsigned long )dev->dev_private == (unsigned long )((void *)0)) { drm_err("dev: %p\n", dev); drm_err("DRM not initialized, aborting suspend.\n"); return (-19); } else { } __ret_warn_once = state.event != 2 && state.event != 1; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 711, "WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && state.event != PM_EVENT_FREEZE)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { return (-22); } else { } if (dev->switch_power_state == 1) { return (0); } else { } error = i915_drm_suspend(dev); if (error != 0) { return (error); } else { } tmp___3 = i915_drm_suspend_late(dev, 0); return (tmp___3); } } static int i915_drm_resume(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev->struct_mutex, 0U); i915_gem_restore_gtt_mappings(dev); mutex_unlock(& dev->struct_mutex); i915_restore_state(dev); intel_opregion_setup(dev); intel_init_pch_refclk(dev); drm_mode_config_reset(dev); intel_runtime_pm_enable_interrupts(dev_priv); mutex_lock_nested(& dev->struct_mutex, 0U); tmp = i915_gem_init_hw(dev); if (tmp != 0) { drm_err("failed to re-initialize GPU, declaring wedged!\n"); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; orl %0,%1": : "r" (2147483648U), "m" (dev_priv->gpu_error.reset_counter): "memory"); } else { } mutex_unlock(& dev->struct_mutex); intel_modeset_init_hw(dev); spin_lock_irq(& dev_priv->irq_lock); if ((unsigned long )dev_priv->display.hpd_irq_setup != (unsigned long )((void (*)(struct drm_device * ))0)) { (*(dev_priv->display.hpd_irq_setup))(dev); } else { } spin_unlock_irq(& dev_priv->irq_lock); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, 1); drm_modeset_unlock_all(dev); intel_dp_mst_resume(dev); intel_hpd_init(dev_priv); drm_helper_hpd_irq_event(dev); intel_opregion_init(dev); intel_fbdev_set_suspend(dev, 0, 0); mutex_lock_nested(& dev_priv->modeset_restore_lock, 0U); dev_priv->modeset_restore = 1; mutex_unlock(& dev_priv->modeset_restore_lock); intel_opregion_notify_adapter(dev, 0); drm_kms_helper_poll_enable(dev); return (0); } } static int i915_drm_resume_early(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; int tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; tmp = pci_enable_device(dev->pdev); if (tmp != 0) { return (-5); } else { } pci_set_master(dev->pdev); __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { ret = vlv_resume_prepare(dev_priv, 0); } else { } if (ret != 0) { drm_err("Resume prepare failed: %d, continuing anyway\n", ret); } else { } intel_uncore_early_sanitize(dev, 1); __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 9U) { ret = bxt_resume_prepare(dev_priv); } else { goto _L; } } else { _L: /* CIL Label */ __p___3 = dev_priv; if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { ret = skl_resume_prepare(dev_priv); } else { __p___0 = dev_priv; if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { hsw_disable_pc8(dev_priv); } else { __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { hsw_disable_pc8(dev_priv); } else { } } else { } } } } intel_uncore_sanitize(dev); intel_power_domains_init_hw(dev_priv); return (ret); } } int i915_resume_legacy(struct drm_device *dev ) { int ret ; int tmp ; { if (dev->switch_power_state == 1) { return (0); } else { } ret = i915_drm_resume_early(dev); if (ret != 0) { return (ret); } else { } tmp = i915_drm_resume(dev); return (tmp); } } int i915_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; bool simulated ; int ret ; bool tmp ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (! i915.reset) { return (0); } else { } intel_reset_gt_powersave(dev); mutex_lock_nested(& dev->struct_mutex, 0U); i915_gem_reset(dev); simulated = dev_priv->gpu_error.stop_rings != 0U; ret = intel_gpu_reset(dev); if ((int )simulated) { printk("\016[drm] Simulated gpu hang, resetting stop_rings\n"); dev_priv->gpu_error.stop_rings = 0U; if (ret == -19) { printk("\016[drm] Reset not implemented, but ignoring error for simulated gpu hangs\n"); ret = 0; } else { } } else { } tmp = i915_stop_ring_allow_warn(dev_priv); if ((int )tmp) { printk("\rdrm/i915: Resetting chip after gpu hang\n"); } else { } if (ret != 0) { drm_err("Failed to reset chip: %i\n", ret); mutex_unlock(& dev->struct_mutex); return (ret); } else { } intel_overlay_reset(dev_priv); dev_priv->gpu_error.reload_in_reset = 1; ret = i915_gem_init_hw(dev); dev_priv->gpu_error.reload_in_reset = 0; mutex_unlock(& dev->struct_mutex); if (ret != 0) { drm_err("Failed hw init on reset %d\n", ret); return (ret); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { intel_enable_gt_powersave(dev); } else { } return (0); } } static int i915_pci_probe(struct pci_dev *pdev , struct pci_device_id const *ent ) { struct intel_device_info *intel_info ; int tmp ; { intel_info = (struct intel_device_info *)ent->driver_data; if ((unsigned int )*((unsigned char *)intel_info + 13UL) != 0U && i915.preliminary_hw_support == 0U) { printk("\016[drm] This hardware requires preliminary hardware support.\nSee CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); return (-19); } else { } if ((pdev->devfn & 7U) != 0U) { return (-19); } else { } driver.driver_features = driver.driver_features & 4294967294U; tmp = drm_get_pci_dev(pdev, ent, & driver); return (tmp); } } static void i915_pci_remove(struct pci_dev *pdev ) { struct drm_device *dev ; void *tmp ; { tmp = pci_get_drvdata(pdev); dev = (struct drm_device *)tmp; drm_put_dev(dev); return; } } static int i915_pm_suspend(struct device *dev ) { struct pci_dev *pdev ; struct device const *__mptr ; struct drm_device *drm_dev ; void *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; pdev = (struct pci_dev *)__mptr + 0xffffffffffffff68UL; tmp = pci_get_drvdata(pdev); drm_dev = (struct drm_device *)tmp; if ((unsigned long )drm_dev == (unsigned long )((struct drm_device *)0) || (unsigned long )drm_dev->dev_private == (unsigned long )((void *)0)) { dev_err((struct device const *)dev, "DRM not initialized, aborting suspend.\n"); return (-19); } else { } if (drm_dev->switch_power_state == 1) { return (0); } else { } tmp___0 = i915_drm_suspend(drm_dev); return (tmp___0); } } static int i915_pm_suspend_late(struct device *dev ) { struct drm_device *drm_dev ; struct drm_i915_private *tmp ; int tmp___0 ; { tmp = dev_to_i915(dev); drm_dev = tmp->dev; if (drm_dev->switch_power_state == 1) { return (0); } else { } tmp___0 = i915_drm_suspend_late(drm_dev, 0); return (tmp___0); } } static int i915_pm_poweroff_late(struct device *dev ) { struct drm_device *drm_dev ; struct drm_i915_private *tmp ; int tmp___0 ; { tmp = dev_to_i915(dev); drm_dev = tmp->dev; if (drm_dev->switch_power_state == 1) { return (0); } else { } tmp___0 = i915_drm_suspend_late(drm_dev, 1); return (tmp___0); } } static int i915_pm_resume_early(struct device *dev ) { struct drm_device *drm_dev ; struct drm_i915_private *tmp ; int tmp___0 ; { tmp = dev_to_i915(dev); drm_dev = tmp->dev; if (drm_dev->switch_power_state == 1) { return (0); } else { } tmp___0 = i915_drm_resume_early(drm_dev); return (tmp___0); } } static int i915_pm_resume(struct device *dev ) { struct drm_device *drm_dev ; struct drm_i915_private *tmp ; int tmp___0 ; { tmp = dev_to_i915(dev); drm_dev = tmp->dev; if (drm_dev->switch_power_state == 1) { return (0); } else { } tmp___0 = i915_drm_resume(drm_dev); return (tmp___0); } } static int skl_suspend_complete(struct drm_i915_private *dev_priv ) { { intel_csr_load_status_set(dev_priv, 0); skl_uninit_cdclk(dev_priv); return (0); } } static int hsw_suspend_complete(struct drm_i915_private *dev_priv ) { { hsw_enable_pc8(dev_priv); return (0); } } static int bxt_suspend_complete(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; { dev = dev_priv->dev; broxton_ddi_phy_uninit(dev); broxton_uninit_cdclk(dev); bxt_enable_dc9(dev_priv); return (0); } } static int bxt_resume_prepare(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; { dev = dev_priv->dev; bxt_disable_dc9(dev_priv); broxton_init_cdclk(dev); broxton_ddi_phy_init(dev); intel_prepare_ddi(dev); return (0); } } static int skl_resume_prepare(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; { dev = dev_priv->dev; skl_init_cdclk(dev_priv); intel_csr_load_program(dev); return (0); } } static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv ) { struct vlv_s0ix_state *s ; int i ; { s = & dev_priv->vlv_s0ix_state; s->wr_watermark = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16424L, 1); s->gfx_prio_ctrl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16428L, 1); s->arb_mode = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16432L, 1); s->gfx_pend_tlb0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16436L, 1); s->gfx_pend_tlb1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16440L, 1); i = 0; goto ldv_52511; ldv_52510: s->lra_limits[i] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((i + 4111) * 4), 1); i = i + 1; ldv_52511: ; if ((unsigned int )i <= 12U) { goto ldv_52510; } else { } s->media_max_req_count = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16496L, 1); s->gfx_max_req_count = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16500L, 1); s->render_hwsp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16512L, 1); s->ecochk = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16528L, 1); s->bsd_hwsp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16768L, 1); s->blt_hwsp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 17024L, 1); s->tlb_rd_addr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 18176L, 1); s->g3dctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36900L, 1); s->gsckgctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36904L, 1); s->mbctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36988L, 1); s->ucgctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37888L, 1); s->ucgctl3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37896L, 1); s->rcgctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37904L, 1); s->rcgctl2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37908L, 1); s->rstctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37920L, 1); s->misccpctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37924L, 1); s->gfxpause = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 40960L, 1); s->rpdeuhwtc = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41088L, 1); s->rpdeuc = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41092L, 1); s->ecobus = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41344L, 1); s->pwrdwnupctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41620L, 1); s->rp_down_timeout = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 40976L, 1); s->rp_deucsw = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41096L, 1); s->rcubmabdtmr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41136L, 1); s->rcedata = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41148L, 1); s->spare2gh = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41364L, 1); s->gt_imr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278548L, 1); s->gt_ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278556L, 1); s->pm_imr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278564L, 1); s->pm_ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278572L, 1); i = 0; goto ldv_52516; ldv_52515: s->gt_scratch[i] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((i + 80960) * 4), 1); i = i + 1; ldv_52516: ; if ((unsigned int )i <= 7U) { goto ldv_52515; } else { } s->tilectl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1052672L, 1); s->gt_fifoctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1179656L, 1); s->gtlc_wake_ctrl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245328L, 1); s->gtlc_survive = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245336L, 1); s->pmwgicz = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245348L, 1); s->gu_ctl0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581104L, 1); s->gu_ctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581108L, 1); s->pcbr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581344L, 1); s->clock_gate_dis2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581156L, 1); return; } } static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv ) { struct vlv_s0ix_state *s ; u32 val ; int i ; { s = & dev_priv->vlv_s0ix_state; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16424L, s->wr_watermark, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16428L, s->gfx_prio_ctrl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16432L, s->arb_mode | 4294901760U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16436L, s->gfx_pend_tlb0, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16440L, s->gfx_pend_tlb1, 1); i = 0; goto ldv_52527; ldv_52526: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i + 4111) * 4), s->lra_limits[i], 1); i = i + 1; ldv_52527: ; if ((unsigned int )i <= 12U) { goto ldv_52526; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16496L, s->media_max_req_count, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16500L, s->gfx_max_req_count, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16512L, s->render_hwsp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16528L, s->ecochk, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16768L, s->bsd_hwsp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 17024L, s->blt_hwsp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 18176L, s->tlb_rd_addr, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 36900L, s->g3dctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 36904L, s->gsckgctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 36988L, s->mbctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37888L, s->ucgctl1, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37896L, s->ucgctl3, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37904L, s->rcgctl1, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37908L, s->rcgctl2, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37920L, s->rstctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37924L, s->misccpctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40960L, s->gfxpause, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41088L, s->rpdeuhwtc, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41092L, s->rpdeuc, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41344L, s->ecobus, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41620L, s->pwrdwnupctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40976L, s->rp_down_timeout, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41096L, s->rp_deucsw, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41136L, s->rcubmabdtmr, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41148L, s->rcedata, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41364L, s->spare2gh, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278548L, s->gt_imr, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278556L, s->gt_ier, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278564L, s->pm_imr, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278572L, s->pm_ier, 1); i = 0; goto ldv_52532; ldv_52531: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i + 80960) * 4), s->gt_scratch[i], 1); i = i + 1; ldv_52532: ; if ((unsigned int )i <= 7U) { goto ldv_52531; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1052672L, s->tilectl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1179656L, s->gt_fifoctl, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245328L, 1); val = val & 1U; val = (s->gtlc_wake_ctrl & 4294967294U) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245328L, val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245336L, 1); val = val & 4U; val = (s->gtlc_survive & 4294967291U) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245336L, val, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245348L, s->pmwgicz, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581104L, s->gu_ctl0, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581108L, s->gu_ctl1, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581344L, s->pcbr, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581156L, s->clock_gate_dis2, 1); return; } } int vlv_force_gfx_clock(struct drm_i915_private *dev_priv , bool force_on ) { u32 val ; int err ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245336L, 1); val = val & 4294967291U; if ((int )force_on) { val = val | 4U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245336L, val, 1); if (! force_on) { return (0); } else { } tmp = msecs_to_jiffies(20U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52550; ldv_52549: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245336L, 1); if ((tmp___0 & 8U) == 0U) { ret__ = -110; } else { } goto ldv_52548; } else { } tmp___1 = drm_can_sleep(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52550: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245336L, 1); if ((tmp___2 & 8U) == 0U) { goto ldv_52549; } else { } ldv_52548: err = ret__; if (err != 0) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245336L, 1); drm_err("timeout waiting for GFX clock force-on (%08x)\n", tmp___3); } else { } return (err); } } static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv , bool allow ) { u32 val ; int err ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; { err = 0; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245328L, 1); val = val & 4294967294U; if ((int )allow) { val = val | 1U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245328L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245328L, 0); tmp = msecs_to_jiffies(1U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52568; ldv_52567: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); if (((int )tmp___0 & 1) != (int )allow) { ret__ = -110; } else { } goto ldv_52566; } else { } tmp___1 = drm_can_sleep(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52568: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); if (((int )tmp___2 & 1) != (int )allow) { goto ldv_52567; } else { } ldv_52566: err = ret__; if (err != 0) { drm_err("timeout disabling GT waking\n"); } else { } return (err); } } static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv , bool wait_for_on ) { u32 mask ; u32 val ; int err ; uint32_t tmp ; uint32_t tmp___0 ; long tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; { mask = 160U; val = (int )wait_for_on ? mask : 0U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); if ((tmp & mask) == val) { return (0); } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); drm_ut_debug_printk("vlv_wait_for_gt_wells", "waiting for GT wells to go %s (%08x)\n", (int )wait_for_on ? (char *)"on" : (char *)"off", tmp___0); } else { } tmp___2 = msecs_to_jiffies(3U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52588; ldv_52587: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); if ((tmp___3 & mask) != val) { ret__ = -110; } else { } goto ldv_52586; } else { } tmp___4 = drm_can_sleep(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52588: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); if ((tmp___5 & mask) != val) { goto ldv_52587; } else { } ldv_52586: err = ret__; if (err != 0) { drm_err("timeout waiting for GT wells to go %s\n", (int )wait_for_on ? (char *)"on" : (char *)"off"); } else { } return (err); } } static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv ) { uint32_t tmp ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); if ((tmp & 2U) == 0U) { return; } else { } drm_err("GT register access while GT waking disabled\n"); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245332L, 2U, 1); return; } } static int vlv_suspend_complete(struct drm_i915_private *dev_priv ) { u32 mask ; int err ; int __ret_warn_on ; uint32_t tmp ; long tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { vlv_wait_for_gt_wells(dev_priv, 0); mask = 50331648U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245328L, 1); __ret_warn_on = (tmp & mask) != mask; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 1395, "WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); vlv_check_no_gt_access(dev_priv); err = vlv_force_gfx_clock(dev_priv, 1); if (err != 0) { goto err1; } else { } err = vlv_allow_gt_wake(dev_priv, 0); if (err != 0) { goto err2; } else { } __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { vlv_save_gunit_s0ix_state(dev_priv); } else { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 8U) { vlv_save_gunit_s0ix_state(dev_priv); } else { } } err = vlv_force_gfx_clock(dev_priv, 0); if (err != 0) { goto err2; } else { } return (0); err2: vlv_allow_gt_wake(dev_priv, 1); err1: vlv_force_gfx_clock(dev_priv, 0); return (err); } } static int vlv_resume_prepare(struct drm_i915_private *dev_priv , bool rpm_resume ) { struct drm_device *dev ; int err ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = dev_priv->dev; ret = vlv_force_gfx_clock(dev_priv, 1); __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { vlv_restore_gunit_s0ix_state(dev_priv); } else { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 8U) { vlv_restore_gunit_s0ix_state(dev_priv); } else { } } err = vlv_allow_gt_wake(dev_priv, 1); if (ret == 0) { ret = err; } else { } err = vlv_force_gfx_clock(dev_priv, 0); if (ret == 0) { ret = err; } else { } vlv_check_no_gt_access(dev_priv); if ((int )rpm_resume) { intel_init_clock_gating(dev); i915_gem_restore_fences(dev); } else { } return (ret); } } static int intel_runtime_suspend(struct device *device ) { struct pci_dev *pdev ; struct device const *__mptr ; struct drm_device *dev ; void *tmp ; struct drm_i915_private *dev_priv ; int ret ; bool __warned ; int __ret_warn_once ; int tmp___0 ; int tmp___1 ; int __ret_warn_on ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; bool __warned___0 ; int __ret_warn_once___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int tmp___6 ; int __ret_warn_on___0 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; long tmp___11 ; long tmp___12 ; int tmp___13 ; struct drm_i915_private *__p___5 ; long tmp___14 ; { __mptr = (struct device const *)device; pdev = (struct pci_dev *)__mptr + 0xffffffffffffff68UL; tmp = pci_get_drvdata(pdev); dev = (struct drm_device *)tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; if (! dev_priv->rps.enabled) { tmp___1 = 1; } else { tmp___0 = intel_enable_rc6((struct drm_device const *)dev); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } __ret_warn_once = tmp___1; tmp___4 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___4 != 0L) { __ret_warn_on = ! __warned; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 1467, "WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { __warned = 1; } else { } } else { } tmp___5 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___5 != 0L) { return (-19); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { _L: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { tmp___6 = 1; } else { tmp___6 = 0; } } else { tmp___6 = 0; } } else { tmp___6 = 0; } } } else { tmp___6 = 0; } } else { tmp___6 = 0; } __ret_warn_once___0 = tmp___6; tmp___9 = ldv__builtin_expect(__ret_warn_once___0 != 0, 0L); if (tmp___9 != 0L) { __ret_warn_on___0 = ! __warned___0; tmp___7 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___7 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 1470, "WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))"); } else { } tmp___8 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___8 != 0L) { __warned___0 = 1; } else { } } else { } tmp___10 = ldv__builtin_expect(__ret_warn_once___0 != 0, 0L); if (tmp___10 != 0L) { return (-19); } else { } tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("intel_runtime_suspend", "Suspending device\n"); } else { } tmp___13 = mutex_trylock(& dev->struct_mutex); if (tmp___13 == 0) { tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("intel_runtime_suspend", "device lock contention, deffering suspend\n"); } else { } pm_runtime_mark_last_busy(device); return (-11); } else { } i915_gem_release_all_mmaps(dev_priv); mutex_unlock(& dev->struct_mutex); intel_suspend_gt_powersave(dev); intel_runtime_pm_disable_interrupts(dev_priv); ret = intel_suspend_complete(dev_priv); if (ret != 0) { drm_err("Runtime suspend failed, disabling it (%d)\n", ret); intel_runtime_pm_enable_interrupts(dev_priv); return (ret); } else { } ldv_cancel_delayed_work_sync_13(& dev_priv->gpu_error.hangcheck_work); intel_uncore_forcewake_reset(dev, 0); dev_priv->pm.suspended = 1; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { intel_opregion_notify_adapter(dev, 1); } else { intel_opregion_notify_adapter(dev, 3); } assert_forcewakes_inactive(dev_priv); tmp___14 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("intel_runtime_suspend", "Device suspended\n"); } else { } return (0); } } static int intel_runtime_resume(struct device *device ) { struct pci_dev *pdev ; struct device const *__mptr ; struct drm_device *dev ; void *tmp ; struct drm_i915_private *dev_priv ; int ret ; bool __warned ; int __ret_warn_once ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int tmp___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; struct drm_i915_private *__p___12 ; long tmp___6 ; { __mptr = (struct device const *)device; pdev = (struct pci_dev *)__mptr + 0xffffffffffffff68UL; tmp = pci_get_drvdata(pdev); dev = (struct drm_device *)tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { _L: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { tmp___0 = 1; } else { tmp___0 = 0; } } else { tmp___0 = 0; } } else { tmp___0 = 0; } } } else { tmp___0 = 0; } } else { tmp___0 = 0; } __ret_warn_once = tmp___0; tmp___3 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___3 != 0L) { __ret_warn_on = ! __warned; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_drv.c", 1552, "WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { __warned = 1; } else { } } else { } tmp___4 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___4 != 0L) { return (-19); } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_runtime_resume", "Resuming device\n"); } else { } intel_opregion_notify_adapter(dev, 0); dev_priv->pm.suspended = 0; __p___5 = dev_priv; if ((unsigned int )((unsigned char )__p___5->info.gen) == 6U) { intel_init_pch_refclk(dev); } else { } __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___11 + 45UL) == 0U) { __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___12->info.gen) == 9U) { ret = bxt_resume_prepare(dev_priv); } else { goto _L___1; } } else { _L___1: /* CIL Label */ __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 45UL) != 0U) { ret = skl_resume_prepare(dev_priv); } else { __p___7 = dev_priv; if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { hsw_disable_pc8(dev_priv); } else { __p___8 = dev_priv; if ((unsigned int )*((unsigned char *)__p___8 + 45UL) == 0U) { __p___9 = dev_priv; if ((unsigned int )((unsigned char )__p___9->info.gen) == 8U) { hsw_disable_pc8(dev_priv); } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___6 = dev_priv; if ((unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U) { ret = vlv_resume_prepare(dev_priv, 1); } else { } } } } } i915_gem_init_swizzling(dev); gen6_update_ring_freq(dev); intel_runtime_pm_enable_interrupts(dev_priv); intel_enable_gt_powersave(dev); if (ret != 0) { drm_err("Runtime resume failed, disabling it (%d)\n", ret); } else { tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_runtime_resume", "Device resumed\n"); } else { } } return (ret); } } static int intel_suspend_complete(struct drm_i915_private *dev_priv ) { int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { __p___4 = dev_priv; if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = dev_priv; if ((unsigned int )((unsigned char )__p___5->info.gen) == 9U) { ret = bxt_suspend_complete(dev_priv); } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___3 = dev_priv; if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { ret = skl_suspend_complete(dev_priv); } else { __p___0 = dev_priv; if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { ret = hsw_suspend_complete(dev_priv); } else { __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { ret = hsw_suspend_complete(dev_priv); } else { goto _L; } } else { _L: /* CIL Label */ __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { ret = vlv_suspend_complete(dev_priv); } else { ret = 0; } } } } } return (ret); } } static struct dev_pm_ops const i915_pm_ops = {0, 0, & i915_pm_suspend, & i915_pm_resume, & i915_pm_suspend, & i915_pm_resume, & i915_pm_suspend, & i915_pm_resume, & i915_pm_suspend_late, & i915_pm_resume_early, & i915_pm_suspend_late, & i915_pm_resume_early, & i915_pm_poweroff_late, & i915_pm_resume_early, 0, 0, 0, 0, 0, 0, & intel_runtime_suspend, & intel_runtime_resume, 0}; static struct vm_operations_struct const i915_gem_vm_ops = {& drm_gem_vm_open, & drm_gem_vm_close, & i915_gem_fault, 0, 0, 0, 0, 0, 0, 0, 0}; static struct file_operations const i915_driver_fops = {& __this_module, & noop_llseek, & drm_read, 0, 0, 0, 0, & drm_poll, & drm_ioctl, & i915_compat_ioctl, & drm_gem_mmap, 0, & drm_open, 0, & drm_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static struct drm_driver driver = {& i915_driver_load, 0, & i915_driver_open, & i915_driver_preclose, & i915_driver_postclose, & i915_driver_lastclose, & i915_driver_unload, & i915_suspend_legacy, & i915_resume_legacy, 0, 0, 0, & drm_pci_set_busid, 0, 0, 0, & i915_driver_device_is_agp, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & i915_debugfs_init, & i915_debugfs_cleanup, & i915_gem_free_object, 0, 0, & drm_gem_prime_handle_to_fd, & drm_gem_prime_fd_to_handle, & i915_gem_prime_export, & i915_gem_prime_import, 0, 0, 0, 0, 0, 0, 0, 0, 0, & i915_gem_dumb_create, & i915_gem_mmap_gtt, & drm_gem_dumb_destroy, & i915_gem_vm_ops, 1, 6, 0, (char *)"i915", (char *)"Intel Graphics", (char *)"20150522", 53441U, 0, (struct drm_ioctl_desc const *)(& i915_ioctls), 0, & i915_driver_fops, {0, 0}}; static struct pci_driver i915_pci_driver = {{0, 0}, "i915", (struct pci_device_id const *)(& pciidlist), & i915_pci_probe, & i915_pci_remove, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & i915_pm_ops, 0}, {{{{{{0}}, 0U, 0U, 0, {0, {0, 0}, 0, 0, 0UL}}}}, {0, 0}}}; static int i915_init(void) { bool tmp ; long tmp___0 ; int tmp___1 ; { driver.num_ioctls = i915_max_ioctl; if (i915.modeset != 0) { driver.driver_features = driver.driver_features | 8192U; } else { } if (i915.modeset == 1) { driver.driver_features = driver.driver_features | 8192U; } else { } tmp = vgacon_text_force(); if ((int )tmp && i915.modeset == -1) { driver.driver_features = driver.driver_features & 4294959103U; } else { } if ((driver.driver_features & 8192U) == 0U) { driver.get_vblank_timestamp = (int (*)(struct drm_device * , int , int * , struct timeval * , unsigned int ))0; tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_init", "KMS and UMS disabled.\n"); } else { } return (0); } else { } if ((int )i915.nuclear_pageflip) { driver.driver_features = driver.driver_features | 65536U; } else { } tmp___1 = drm_pci_init(& driver, & i915_pci_driver); return (tmp___1); } } static void i915_exit(void) { { if ((driver.driver_features & 8192U) == 0U) { return; } else { } drm_pci_exit(& driver, & i915_pci_driver); return; } } int ldv_retval_20 ; extern int ldv_freeze_noirq_191(void) ; int ldv_retval_18 ; extern int ldv_shutdown_187(void) ; extern int ldv_release_188(void) ; int ldv_retval_26 ; int ldv_retval_23 ; int ldv_retval_11 ; int ldv_retval_25 ; int ldv_retval_22 ; int ldv_retval_36 ; int ldv_retval_27 ; int ldv_retval_15 ; int ldv_retval_16 ; int ldv_retval_29 ; int ldv_retval_24 ; void ldv_check_final_state(void) ; int ldv_retval_8 ; int ldv_retval_31 ; int ldv_retval_7 ; extern int ldv_prepare_191(void) ; int ldv_retval_19 ; extern int ldv_resume_noirq_191(void) ; int ldv_retval_14 ; int ldv_retval_17 ; int ldv_retval_41 ; int ldv_retval_40 ; int ldv_retval_12 ; extern int ldv_poweroff_noirq_191(void) ; extern void ldv_initialize(void) ; int ldv_retval_39 ; extern int ldv_restore_noirq_191(void) ; extern int ldv_complete_191(void) ; int ldv_retval_21 ; int ldv_retval_13 ; int ldv_retval_10 ; int ldv_retval_9 ; extern int ldv_thaw_noirq_191(void) ; extern int ldv_suspend_noirq_191(void) ; void ldv_pci_driver_187(void) { void *tmp ; { tmp = ldv_init_zalloc(2976UL); i915_pci_driver_group1 = (struct pci_dev *)tmp; return; } } void ldv_initialize_drm_driver_188(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; { tmp = ldv_init_zalloc(240UL); driver_group0 = (struct drm_minor *)tmp; tmp___0 = ldv_init_zalloc(744UL); driver_group2 = (struct drm_file *)tmp___0; tmp___1 = ldv_init_zalloc(248UL); driver_group1 = (struct drm_gem_object *)tmp___1; tmp___2 = ldv_init_zalloc(3320UL); driver_group3 = (struct drm_device *)tmp___2; return; } } void ldv_dev_pm_ops_191(void) { void *tmp ; { tmp = ldv_init_zalloc(1416UL); i915_pm_ops_group1 = (struct device *)tmp; return; } } void ldv_initialize_vm_operations_struct_190(void) { void *tmp ; { tmp = ldv_init_zalloc(184UL); i915_gem_vm_ops_group0 = (struct vm_area_struct *)tmp; return; } } void ldv_file_operations_189(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_driver_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_driver_fops_group2 = (struct file *)tmp___0; return; } } void ldv_main_exported_31(void) ; void ldv_main_exported_69(void) ; void ldv_main_exported_72(void) ; void ldv_main_exported_71(void) ; void ldv_main_exported_70(void) ; void ldv_main_exported_68(void) ; void ldv_main_exported_67(void) ; void ldv_main_exported_38(void) ; void ldv_main_exported_36(void) ; void ldv_main_exported_37(void) ; void ldv_main_exported_52(void) ; void ldv_main_exported_53(void) ; void ldv_main_exported_51(void) ; void ldv_main_exported_139(void) ; void ldv_main_exported_138(void) ; void ldv_main_exported_25(void) ; void ldv_main_exported_24(void) ; void ldv_main_exported_26(void) ; void ldv_main_exported_170(void) ; void ldv_main_exported_165(void) ; void ldv_main_exported_168(void) ; void ldv_main_exported_167(void) ; void ldv_main_exported_163(void) ; void ldv_main_exported_166(void) ; void ldv_main_exported_164(void) ; void ldv_main_exported_169(void) ; void ldv_main_exported_127(void) ; void ldv_main_exported_90(void) ; void ldv_main_exported_118(void) ; void ldv_main_exported_102(void) ; void ldv_main_exported_129(void) ; void ldv_main_exported_80(void) ; void ldv_main_exported_119(void) ; void ldv_main_exported_99(void) ; void ldv_main_exported_116(void) ; void ldv_main_exported_88(void) ; void ldv_main_exported_125(void) ; void ldv_main_exported_100(void) ; void ldv_main_exported_110(void) ; void ldv_main_exported_82(void) ; void ldv_main_exported_128(void) ; void ldv_main_exported_84(void) ; void ldv_main_exported_120(void) ; void ldv_main_exported_134(void) ; void ldv_main_exported_95(void) ; void ldv_main_exported_75(void) ; void ldv_main_exported_83(void) ; void ldv_main_exported_108(void) ; void ldv_main_exported_115(void) ; void ldv_main_exported_112(void) ; void ldv_main_exported_109(void) ; void ldv_main_exported_92(void) ; void ldv_main_exported_103(void) ; void ldv_main_exported_89(void) ; void ldv_main_exported_113(void) ; void ldv_main_exported_124(void) ; void ldv_main_exported_104(void) ; void ldv_main_exported_131(void) ; void ldv_main_exported_130(void) ; void ldv_main_exported_122(void) ; void ldv_main_exported_91(void) ; void ldv_main_exported_78(void) ; void ldv_main_exported_121(void) ; void ldv_main_exported_107(void) ; void ldv_main_exported_79(void) ; void ldv_main_exported_87(void) ; void ldv_main_exported_77(void) ; void ldv_main_exported_93(void) ; void ldv_main_exported_106(void) ; void ldv_main_exported_133(void) ; void ldv_main_exported_96(void) ; void ldv_main_exported_105(void) ; void ldv_main_exported_126(void) ; void ldv_main_exported_123(void) ; void ldv_main_exported_85(void) ; void ldv_main_exported_94(void) ; void ldv_main_exported_97(void) ; void ldv_main_exported_114(void) ; void ldv_main_exported_111(void) ; void ldv_main_exported_81(void) ; void ldv_main_exported_98(void) ; void ldv_main_exported_132(void) ; void ldv_main_exported_101(void) ; void ldv_main_exported_117(void) ; void ldv_main_exported_86(void) ; void ldv_main_exported_76(void) ; void ldv_main_exported_46(void) ; void ldv_main_exported_45(void) ; void ldv_main_exported_43(void) ; void ldv_main_exported_44(void) ; void ldv_main_exported_33(void) ; void ldv_main_exported_32(void) ; void ldv_main_exported_34(void) ; void ldv_main_exported_64(void) ; void ldv_main_exported_60(void) ; void ldv_main_exported_181(void) ; void ldv_main_exported_180(void) ; void ldv_main_exported_174(void) ; void ldv_main_exported_179(void) ; void ldv_main_exported_186(void) ; void ldv_main_exported_182(void) ; void ldv_main_exported_185(void) ; void ldv_main_exported_183(void) ; void ldv_main_exported_176(void) ; void ldv_main_exported_184(void) ; void ldv_main_exported_172(void) ; void ldv_main_exported_173(void) ; void ldv_main_exported_178(void) ; void ldv_main_exported_177(void) ; void ldv_main_exported_175(void) ; void ldv_main_exported_171(void) ; void ldv_main_exported_27(void) ; void ldv_main_exported_28(void) ; void ldv_main_exported_30(void) ; void ldv_main_exported_29(void) ; void ldv_main_exported_74(void) ; void ldv_main_exported_73(void) ; void ldv_main_exported_66(void) ; void ldv_main_exported_65(void) ; void ldv_main_exported_39(void) ; void ldv_main_exported_40(void) ; void ldv_main_exported_41(void) ; void ldv_main_exported_152(void) ; void ldv_main_exported_155(void) ; void ldv_main_exported_142(void) ; void ldv_main_exported_143(void) ; void ldv_main_exported_158(void) ; void ldv_main_exported_154(void) ; void ldv_main_exported_162(void) ; void ldv_main_exported_144(void) ; void ldv_main_exported_157(void) ; void ldv_main_exported_159(void) ; void ldv_main_exported_147(void) ; void ldv_main_exported_149(void) ; void ldv_main_exported_161(void) ; void ldv_main_exported_156(void) ; void ldv_main_exported_160(void) ; void ldv_main_exported_146(void) ; void ldv_main_exported_153(void) ; void ldv_main_exported_145(void) ; void ldv_main_exported_151(void) ; void ldv_main_exported_148(void) ; void ldv_main_exported_150(void) ; void ldv_main_exported_57(void) ; void ldv_main_exported_56(void) ; void ldv_main_exported_55(void) ; void ldv_main_exported_137(void) ; void ldv_main_exported_61(void) ; void ldv_main_exported_23(void) ; void ldv_main_exported_58(void) ; void ldv_main_exported_50(void) ; void ldv_main_exported_49(void) ; void ldv_main_exported_48(void) ; void ldv_main_exported_47(void) ; void ldv_main_exported_59(void) ; void ldv_main_exported_54(void) ; void ldv_main_exported_35(void) ; void ldv_main_exported_141(void) ; void ldv_main_exported_140(void) ; void ldv_main_exported_63(void) ; void ldv_main_exported_42(void) ; void ldv_main_exported_136(void) ; void ldv_main_exported_135(void) ; void ldv_main_exported_62(void) ; int main(void) { struct vm_fault *ldvarg30 ; void *tmp ; struct pci_device_id *ldvarg179 ; void *tmp___0 ; loff_t ldvarg322 ; unsigned long ldvarg319 ; char *ldvarg329 ; void *tmp___1 ; struct vm_area_struct *ldvarg323 ; void *tmp___2 ; loff_t *ldvarg327 ; void *tmp___3 ; struct poll_table_struct *ldvarg326 ; void *tmp___4 ; unsigned int ldvarg320 ; unsigned int ldvarg325 ; size_t ldvarg328 ; int ldvarg321 ; unsigned long ldvarg324 ; uint32_t ldvarg418 ; uint32_t ldvarg422 ; struct dma_buf *ldvarg421 ; void *tmp___5 ; struct drm_master *ldvarg412 ; void *tmp___6 ; uint32_t ldvarg417 ; unsigned long ldvarg416 ; int ldvarg413 ; int ldvarg410 ; int *ldvarg419 ; void *tmp___7 ; struct drm_mode_create_dumb *ldvarg411 ; void *tmp___8 ; uint32_t *ldvarg414 ; void *tmp___9 ; uint64_t *ldvarg423 ; void *tmp___10 ; pm_message_t ldvarg420 ; uint32_t ldvarg415 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; { tmp = ldv_init_zalloc(56UL); ldvarg30 = (struct vm_fault *)tmp; tmp___0 = ldv_init_zalloc(32UL); ldvarg179 = (struct pci_device_id *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg329 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(184UL); ldvarg323 = (struct vm_area_struct *)tmp___2; tmp___3 = ldv_init_zalloc(8UL); ldvarg327 = (loff_t *)tmp___3; tmp___4 = ldv_init_zalloc(16UL); ldvarg326 = (struct poll_table_struct *)tmp___4; tmp___5 = __VERIFIER_nondet_pointer(); ldvarg421 = (struct dma_buf *)tmp___5; tmp___6 = ldv_init_zalloc(352UL); ldvarg412 = (struct drm_master *)tmp___6; tmp___7 = ldv_init_zalloc(4UL); ldvarg419 = (int *)tmp___7; tmp___8 = ldv_init_zalloc(32UL); ldvarg411 = (struct drm_mode_create_dumb *)tmp___8; tmp___9 = ldv_init_zalloc(4UL); ldvarg414 = (uint32_t *)tmp___9; tmp___10 = ldv_init_zalloc(8UL); ldvarg423 = (uint64_t *)tmp___10; ldv_initialize(); ldv_memset((void *)(& ldvarg322), 0, 8UL); ldv_memset((void *)(& ldvarg319), 0, 8UL); ldv_memset((void *)(& ldvarg320), 0, 4UL); ldv_memset((void *)(& ldvarg325), 0, 4UL); ldv_memset((void *)(& ldvarg328), 0, 8UL); ldv_memset((void *)(& ldvarg321), 0, 4UL); ldv_memset((void *)(& ldvarg324), 0, 8UL); ldv_memset((void *)(& ldvarg418), 0, 4UL); ldv_memset((void *)(& ldvarg422), 0, 4UL); ldv_memset((void *)(& ldvarg417), 0, 4UL); ldv_memset((void *)(& ldvarg416), 0, 8UL); ldv_memset((void *)(& ldvarg413), 0, 4UL); ldv_memset((void *)(& ldvarg410), 0, 4UL); ldv_memset((void *)(& ldvarg420), 0, 4UL); ldv_memset((void *)(& ldvarg415), 0, 4UL); ldv_state_variable_127 = 0; ldv_state_variable_32 = 0; ldv_state_variable_90 = 0; ldv_state_variable_118 = 0; ldv_state_variable_71 = 0; ldv_state_variable_102 = 0; work_init_18(); ldv_state_variable_18 = 1; ldv_state_variable_125 = 0; work_init_16(); ldv_state_variable_16 = 1; ldv_state_variable_44 = 0; ldv_state_variable_55 = 0; ldv_state_variable_84 = 0; ldv_state_variable_27 = 0; ldv_state_variable_190 = 0; ldv_state_variable_161 = 0; ldv_state_variable_95 = 0; ldv_state_variable_57 = 0; work_init_20(); ldv_state_variable_20 = 1; ldv_state_variable_163 = 0; ldv_state_variable_109 = 0; ldv_state_variable_151 = 0; ldv_state_variable_89 = 0; ldv_state_variable_175 = 0; ldv_state_variable_148 = 0; ldv_state_variable_31 = 0; ldv_state_variable_35 = 0; work_init_11(); ldv_state_variable_11 = 1; ldv_state_variable_78 = 0; ldv_state_variable_93 = 0; ldv_state_variable_106 = 0; ldv_state_variable_157 = 0; ldv_state_variable_65 = 0; ldv_state_variable_29 = 0; ldv_state_variable_138 = 0; ldv_state_variable_114 = 0; ldv_state_variable_58 = 0; ldv_state_variable_153 = 0; work_init_15(); ldv_state_variable_15 = 1; ldv_state_variable_137 = 0; ldv_state_variable_81 = 0; ldv_state_variable_60 = 0; ldv_state_variable_101 = 0; ldv_state_variable_73 = 0; ldv_state_variable_86 = 0; ldv_state_variable_76 = 0; ldv_state_variable_62 = 0; ldv_state_variable_67 = 0; ldv_state_variable_165 = 0; ldv_state_variable_139 = 0; ldv_state_variable_129 = 0; work_init_2(); ldv_state_variable_2 = 1; work_init_17(); ldv_state_variable_17 = 1; ldv_state_variable_186 = 0; ldv_state_variable_110 = 0; ldv_state_variable_82 = 0; ldv_state_variable_147 = 0; ldv_state_variable_168 = 0; ldv_state_variable_184 = 0; ldv_state_variable_135 = 0; work_init_14(); ldv_state_variable_14 = 1; ldv_state_variable_112 = 0; ldv_state_variable_69 = 0; ldv_state_variable_191 = 0; ldv_state_variable_172 = 0; ldv_state_variable_145 = 0; ldv_state_variable_49 = 0; ldv_state_variable_178 = 0; ldv_state_variable_24 = 0; ldv_state_variable_187 = 0; ldv_state_variable_140 = 0; ldv_state_variable_124 = 0; ldv_state_variable_104 = 0; ldv_state_variable_131 = 0; ldv_state_variable_181 = 0; ldv_state_variable_121 = 0; ldv_state_variable_79 = 0; ldv_state_variable_154 = 0; ref_cnt = 0; ldv_state_variable_0 = 1; ldv_state_variable_23 = 0; ldv_state_variable_96 = 0; ldv_state_variable_126 = 0; ldv_state_variable_159 = 0; ldv_state_variable_160 = 0; ldv_state_variable_176 = 0; ldv_state_variable_47 = 0; work_init_8(); ldv_state_variable_8 = 1; ldv_state_variable_98 = 0; ldv_state_variable_37 = 0; ldv_state_variable_117 = 0; ldv_state_variable_43 = 0; work_init_5(); ldv_state_variable_5 = 1; ldv_state_variable_170 = 0; ldv_state_variable_33 = 0; timer_init_21(); ldv_state_variable_21 = 1; ldv_state_variable_63 = 0; work_init_7(); ldv_state_variable_7 = 1; ldv_state_variable_26 = 0; ldv_state_variable_80 = 0; ldv_state_variable_119 = 0; ldv_state_variable_180 = 0; ldv_state_variable_99 = 0; ldv_state_variable_179 = 0; ldv_state_variable_162 = 0; ldv_state_variable_72 = 0; ldv_state_variable_74 = 0; ldv_state_variable_182 = 0; ldv_state_variable_61 = 0; ldv_state_variable_108 = 0; ldv_state_variable_115 = 0; ldv_state_variable_92 = 0; ldv_state_variable_103 = 0; work_init_10(); ldv_state_variable_10 = 1; ldv_state_variable_113 = 0; ldv_state_variable_152 = 0; ldv_state_variable_189 = 0; ldv_state_variable_142 = 0; ldv_state_variable_91 = 0; ldv_state_variable_167 = 0; ldv_state_variable_48 = 0; ldv_state_variable_107 = 0; ldv_state_variable_87 = 0; ldv_state_variable_174 = 0; ldv_state_variable_77 = 0; ldv_state_variable_133 = 0; ldv_state_variable_149 = 0; ldv_state_variable_123 = 0; ldv_state_variable_50 = 0; ldv_state_variable_39 = 0; ldv_state_variable_64 = 0; ldv_state_variable_97 = 0; work_init_12(); ldv_state_variable_12 = 1; ldv_state_variable_41 = 0; ldv_state_variable_52 = 0; ldv_state_variable_173 = 0; ldv_state_variable_56 = 0; ldv_state_variable_45 = 0; ldv_state_variable_66 = 0; work_init_19(); ldv_state_variable_19 = 1; ldv_state_variable_54 = 0; ldv_state_variable_70 = 0; ldv_state_variable_188 = 0; ldv_state_variable_68 = 0; ldv_state_variable_166 = 0; work_init_1(); ldv_state_variable_1 = 1; ldv_state_variable_136 = 0; ldv_state_variable_88 = 0; ldv_state_variable_116 = 0; ldv_state_variable_144 = 0; ldv_state_variable_141 = 0; ldv_state_variable_30 = 0; ldv_state_variable_100 = 0; ldv_state_variable_25 = 0; ldv_state_variable_128 = 0; ldv_state_variable_28 = 0; ldv_state_variable_120 = 0; ldv_state_variable_156 = 0; ldv_state_variable_134 = 0; ldv_state_variable_40 = 0; ldv_state_variable_75 = 0; ldv_state_variable_83 = 0; ldv_state_variable_59 = 0; ldv_state_variable_177 = 0; ldv_state_variable_150 = 0; ldv_state_variable_155 = 0; ldv_state_variable_130 = 0; ldv_state_variable_53 = 0; ldv_state_variable_122 = 0; ldv_state_variable_143 = 0; ldv_state_variable_158 = 0; ldv_state_variable_42 = 0; timer_init_22(); ldv_state_variable_22 = 1; ldv_state_variable_46 = 0; work_init_13(); ldv_state_variable_13 = 1; ldv_state_variable_105 = 0; work_init_6(); ldv_state_variable_6 = 1; ldv_state_variable_85 = 0; ldv_state_variable_185 = 0; ldv_state_variable_36 = 0; work_init_3(); ldv_state_variable_3 = 1; ldv_state_variable_183 = 0; ldv_state_variable_94 = 0; ldv_state_variable_146 = 0; ldv_state_variable_51 = 0; work_init_9(); ldv_state_variable_9 = 1; ldv_state_variable_111 = 0; ldv_state_variable_38 = 0; work_init_4(); ldv_state_variable_4 = 1; ldv_state_variable_34 = 0; ldv_state_variable_169 = 0; ldv_state_variable_164 = 0; ldv_state_variable_132 = 0; ldv_state_variable_171 = 0; ldv_53570: tmp___11 = __VERIFIER_nondet_int(); switch (tmp___11) { case 0: ; if (ldv_state_variable_127 != 0) { ldv_main_exported_127(); } else { } goto ldv_53305; case 1: ; if (ldv_state_variable_32 != 0) { ldv_main_exported_32(); } else { } goto ldv_53305; case 2: ; if (ldv_state_variable_90 != 0) { ldv_main_exported_90(); } else { } goto ldv_53305; case 3: ; if (ldv_state_variable_118 != 0) { ldv_main_exported_118(); } else { } goto ldv_53305; case 4: ; if (ldv_state_variable_71 != 0) { ldv_main_exported_71(); } else { } goto ldv_53305; case 5: ; if (ldv_state_variable_102 != 0) { ldv_main_exported_102(); } else { } goto ldv_53305; case 6: ; goto ldv_53305; case 7: ; if (ldv_state_variable_125 != 0) { ldv_main_exported_125(); } else { } goto ldv_53305; case 8: ; goto ldv_53305; case 9: ; if (ldv_state_variable_44 != 0) { ldv_main_exported_44(); } else { } goto ldv_53305; case 10: ; if (ldv_state_variable_55 != 0) { ldv_main_exported_55(); } else { } goto ldv_53305; case 11: ; if (ldv_state_variable_84 != 0) { ldv_main_exported_84(); } else { } goto ldv_53305; case 12: ; if (ldv_state_variable_27 != 0) { ldv_main_exported_27(); } else { } goto ldv_53305; case 13: ; if (ldv_state_variable_190 != 0) { tmp___12 = __VERIFIER_nondet_int(); switch (tmp___12) { case 0: ; if (ldv_state_variable_190 == 1) { i915_gem_fault(i915_gem_vm_ops_group0, ldvarg30); ldv_state_variable_190 = 1; } else { } if (ldv_state_variable_190 == 2) { i915_gem_fault(i915_gem_vm_ops_group0, ldvarg30); ldv_state_variable_190 = 2; } else { } goto ldv_53320; case 1: ; if (ldv_state_variable_190 == 2) { drm_gem_vm_close(i915_gem_vm_ops_group0); ldv_state_variable_190 = 1; } else { } goto ldv_53320; case 2: ; if (ldv_state_variable_190 == 1) { drm_gem_vm_open(i915_gem_vm_ops_group0); ldv_state_variable_190 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_53320; default: ldv_stop(); } ldv_53320: ; } else { } goto ldv_53305; case 14: ; if (ldv_state_variable_161 != 0) { ldv_main_exported_161(); } else { } goto ldv_53305; case 15: ; if (ldv_state_variable_95 != 0) { ldv_main_exported_95(); } else { } goto ldv_53305; case 16: ; if (ldv_state_variable_57 != 0) { ldv_main_exported_57(); } else { } goto ldv_53305; case 17: ; goto ldv_53305; case 18: ; if (ldv_state_variable_163 != 0) { ldv_main_exported_163(); } else { } goto ldv_53305; case 19: ; if (ldv_state_variable_109 != 0) { ldv_main_exported_109(); } else { } goto ldv_53305; case 20: ; if (ldv_state_variable_151 != 0) { ldv_main_exported_151(); } else { } goto ldv_53305; case 21: ; if (ldv_state_variable_89 != 0) { ldv_main_exported_89(); } else { } goto ldv_53305; case 22: ; if (ldv_state_variable_175 != 0) { ldv_main_exported_175(); } else { } goto ldv_53305; case 23: ; if (ldv_state_variable_148 != 0) { ldv_main_exported_148(); } else { } goto ldv_53305; case 24: ; if (ldv_state_variable_31 != 0) { ldv_main_exported_31(); } else { } goto ldv_53305; case 25: ; if (ldv_state_variable_35 != 0) { ldv_main_exported_35(); } else { } goto ldv_53305; case 26: ; goto ldv_53305; case 27: ; if (ldv_state_variable_78 != 0) { ldv_main_exported_78(); } else { } goto ldv_53305; case 28: ; if (ldv_state_variable_93 != 0) { ldv_main_exported_93(); } else { } goto ldv_53305; case 29: ; if (ldv_state_variable_106 != 0) { ldv_main_exported_106(); } else { } goto ldv_53305; case 30: ; if (ldv_state_variable_157 != 0) { ldv_main_exported_157(); } else { } goto ldv_53305; case 31: ; if (ldv_state_variable_65 != 0) { ldv_main_exported_65(); } else { } goto ldv_53305; case 32: ; if (ldv_state_variable_29 != 0) { ldv_main_exported_29(); } else { } goto ldv_53305; case 33: ; if (ldv_state_variable_138 != 0) { ldv_main_exported_138(); } else { } goto ldv_53305; case 34: ; if (ldv_state_variable_114 != 0) { ldv_main_exported_114(); } else { } goto ldv_53305; case 35: ; if (ldv_state_variable_58 != 0) { ldv_main_exported_58(); } else { } goto ldv_53305; case 36: ; if (ldv_state_variable_153 != 0) { ldv_main_exported_153(); } else { } goto ldv_53305; case 37: ; goto ldv_53305; case 38: ; if (ldv_state_variable_137 != 0) { ldv_main_exported_137(); } else { } goto ldv_53305; case 39: ; if (ldv_state_variable_81 != 0) { ldv_main_exported_81(); } else { } goto ldv_53305; case 40: ; if (ldv_state_variable_60 != 0) { ldv_main_exported_60(); } else { } goto ldv_53305; case 41: ; if (ldv_state_variable_101 != 0) { ldv_main_exported_101(); } else { } goto ldv_53305; case 42: ; if (ldv_state_variable_73 != 0) { ldv_main_exported_73(); } else { } goto ldv_53305; case 43: ; if (ldv_state_variable_86 != 0) { ldv_main_exported_86(); } else { } goto ldv_53305; case 44: ; if (ldv_state_variable_76 != 0) { ldv_main_exported_76(); } else { } goto ldv_53305; case 45: ; if (ldv_state_variable_62 != 0) { ldv_main_exported_62(); } else { } goto ldv_53305; case 46: ; if (ldv_state_variable_67 != 0) { ldv_main_exported_67(); } else { } goto ldv_53305; case 47: ; if (ldv_state_variable_165 != 0) { ldv_main_exported_165(); } else { } goto ldv_53305; case 48: ; if (ldv_state_variable_139 != 0) { ldv_main_exported_139(); } else { } goto ldv_53305; case 49: ; if (ldv_state_variable_129 != 0) { ldv_main_exported_129(); } else { } goto ldv_53305; case 50: ; goto ldv_53305; case 51: ; goto ldv_53305; case 52: ; if (ldv_state_variable_186 != 0) { ldv_main_exported_186(); } else { } goto ldv_53305; case 53: ; if (ldv_state_variable_110 != 0) { ldv_main_exported_110(); } else { } goto ldv_53305; case 54: ; if (ldv_state_variable_82 != 0) { ldv_main_exported_82(); } else { } goto ldv_53305; case 55: ; if (ldv_state_variable_147 != 0) { ldv_main_exported_147(); } else { } goto ldv_53305; case 56: ; if (ldv_state_variable_168 != 0) { ldv_main_exported_168(); } else { } goto ldv_53305; case 57: ; if (ldv_state_variable_184 != 0) { ldv_main_exported_184(); } else { } goto ldv_53305; case 58: ; if (ldv_state_variable_135 != 0) { ldv_main_exported_135(); } else { } goto ldv_53305; case 59: ; goto ldv_53305; case 60: ; if (ldv_state_variable_112 != 0) { ldv_main_exported_112(); } else { } goto ldv_53305; case 61: ; if (ldv_state_variable_69 != 0) { ldv_main_exported_69(); } else { } goto ldv_53305; case 62: ; if (ldv_state_variable_191 != 0) { tmp___13 = __VERIFIER_nondet_int(); switch (tmp___13) { case 0: ; if (ldv_state_variable_191 == 2) { ldv_retval_27 = intel_runtime_resume(i915_pm_ops_group1); if (ldv_retval_27 == 0) { ldv_state_variable_191 = 1; ref_cnt = ref_cnt - 1; } else { } } else { } goto ldv_53374; case 1: ; if (ldv_state_variable_191 == 15) { ldv_retval_26 = i915_pm_resume(i915_pm_ops_group1); if (ldv_retval_26 == 0) { ldv_state_variable_191 = 16; } else { } } else { } goto ldv_53374; case 2: ; if (ldv_state_variable_191 == 11) { ldv_retval_25 = i915_pm_resume_early(i915_pm_ops_group1); if (ldv_retval_25 == 0) { ldv_state_variable_191 = 15; } else { } } else { } goto ldv_53374; case 3: ; if (ldv_state_variable_191 == 3) { ldv_retval_24 = i915_pm_suspend(i915_pm_ops_group1); if (ldv_retval_24 == 0) { ldv_state_variable_191 = 4; } else { } } else { } goto ldv_53374; case 4: ; if (ldv_state_variable_191 == 5) { ldv_retval_23 = i915_pm_poweroff_late(i915_pm_ops_group1); if (ldv_retval_23 == 0) { ldv_state_variable_191 = 9; } else { } } else { } goto ldv_53374; case 5: ; if (ldv_state_variable_191 == 14) { ldv_retval_22 = i915_pm_resume(i915_pm_ops_group1); if (ldv_retval_22 == 0) { ldv_state_variable_191 = 16; } else { } } else { } goto ldv_53374; case 6: ; if (ldv_state_variable_191 == 1) { ldv_retval_21 = intel_runtime_suspend(i915_pm_ops_group1); if (ldv_retval_21 == 0) { ldv_state_variable_191 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_53374; case 7: ; if (ldv_state_variable_191 == 9) { ldv_retval_20 = i915_pm_resume_early(i915_pm_ops_group1); if (ldv_retval_20 == 0) { ldv_state_variable_191 = 14; } else { } } else { } goto ldv_53374; case 8: ; if (ldv_state_variable_191 == 7) { ldv_retval_19 = i915_pm_resume_early(i915_pm_ops_group1); if (ldv_retval_19 == 0) { ldv_state_variable_191 = 13; } else { } } else { } goto ldv_53374; case 9: ; if (ldv_state_variable_191 == 6) { ldv_retval_18 = i915_pm_suspend_late(i915_pm_ops_group1); if (ldv_retval_18 == 0) { ldv_state_variable_191 = 11; } else { } } else { } goto ldv_53374; case 10: ; if (ldv_state_variable_191 == 3) { ldv_retval_17 = i915_pm_suspend(i915_pm_ops_group1); if (ldv_retval_17 == 0) { ldv_state_variable_191 = 5; } else { } } else { } goto ldv_53374; case 11: ; if (ldv_state_variable_191 == 3) { ldv_retval_16 = i915_pm_suspend(i915_pm_ops_group1); if (ldv_retval_16 == 0) { ldv_state_variable_191 = 6; } else { } } else { } goto ldv_53374; case 12: ; if (ldv_state_variable_191 == 4) { ldv_retval_15 = i915_pm_suspend_late(i915_pm_ops_group1); if (ldv_retval_15 == 0) { ldv_state_variable_191 = 7; } else { } } else { } goto ldv_53374; case 13: ; if (ldv_state_variable_191 == 13) { ldv_retval_14 = i915_pm_resume(i915_pm_ops_group1); if (ldv_retval_14 == 0) { ldv_state_variable_191 = 16; } else { } } else { } goto ldv_53374; case 14: ; if (ldv_state_variable_191 == 8) { ldv_retval_13 = ldv_resume_noirq_191(); if (ldv_retval_13 == 0) { ldv_state_variable_191 = 13; } else { } } else { } goto ldv_53374; case 15: ; if (ldv_state_variable_191 == 6) { ldv_retval_12 = ldv_freeze_noirq_191(); if (ldv_retval_12 == 0) { ldv_state_variable_191 = 12; } else { } } else { } goto ldv_53374; case 16: ; if (ldv_state_variable_191 == 1) { ldv_retval_11 = ldv_prepare_191(); if (ldv_retval_11 == 0) { ldv_state_variable_191 = 3; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_53374; case 17: ; if (ldv_state_variable_191 == 12) { ldv_retval_10 = ldv_thaw_noirq_191(); if (ldv_retval_10 == 0) { ldv_state_variable_191 = 15; } else { } } else { } goto ldv_53374; case 18: ; if (ldv_state_variable_191 == 5) { ldv_retval_9 = ldv_poweroff_noirq_191(); if (ldv_retval_9 == 0) { ldv_state_variable_191 = 10; } else { } } else { } goto ldv_53374; case 19: ; if (ldv_state_variable_191 == 10) { ldv_retval_8 = ldv_restore_noirq_191(); if (ldv_retval_8 == 0) { ldv_state_variable_191 = 14; } else { } } else { } goto ldv_53374; case 20: ; if (ldv_state_variable_191 == 4) { ldv_retval_7 = ldv_suspend_noirq_191(); if (ldv_retval_7 == 0) { ldv_state_variable_191 = 8; } else { } } else { } goto ldv_53374; case 21: ; if (ldv_state_variable_191 == 16) { ldv_complete_191(); ldv_state_variable_191 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_53374; default: ldv_stop(); } ldv_53374: ; } else { } goto ldv_53305; case 63: ; if (ldv_state_variable_172 != 0) { ldv_main_exported_172(); } else { } goto ldv_53305; case 64: ; if (ldv_state_variable_145 != 0) { ldv_main_exported_145(); } else { } goto ldv_53305; case 65: ; if (ldv_state_variable_49 != 0) { ldv_main_exported_49(); } else { } goto ldv_53305; case 66: ; if (ldv_state_variable_178 != 0) { ldv_main_exported_178(); } else { } goto ldv_53305; case 67: ; if (ldv_state_variable_24 != 0) { ldv_main_exported_24(); } else { } goto ldv_53305; case 68: ; if (ldv_state_variable_187 != 0) { tmp___14 = __VERIFIER_nondet_int(); switch (tmp___14) { case 0: ; if (ldv_state_variable_187 == 1) { ldv_retval_29 = i915_pci_probe(i915_pci_driver_group1, (struct pci_device_id const *)ldvarg179); if (ldv_retval_29 == 0) { ldv_state_variable_187 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_53404; case 1: ; if (ldv_state_variable_187 == 2) { i915_pci_remove(i915_pci_driver_group1); ldv_state_variable_187 = 1; } else { } goto ldv_53404; case 2: ; if (ldv_state_variable_187 == 2) { ldv_shutdown_187(); ldv_state_variable_187 = 2; } else { } goto ldv_53404; default: ldv_stop(); } ldv_53404: ; } else { } goto ldv_53305; case 69: ; if (ldv_state_variable_140 != 0) { ldv_main_exported_140(); } else { } goto ldv_53305; case 70: ; if (ldv_state_variable_124 != 0) { ldv_main_exported_124(); } else { } goto ldv_53305; case 71: ; if (ldv_state_variable_104 != 0) { ldv_main_exported_104(); } else { } goto ldv_53305; case 72: ; if (ldv_state_variable_131 != 0) { ldv_main_exported_131(); } else { } goto ldv_53305; case 73: ; if (ldv_state_variable_181 != 0) { ldv_main_exported_181(); } else { } goto ldv_53305; case 74: ; if (ldv_state_variable_121 != 0) { ldv_main_exported_121(); } else { } goto ldv_53305; case 75: ; if (ldv_state_variable_79 != 0) { ldv_main_exported_79(); } else { } goto ldv_53305; case 76: ; if (ldv_state_variable_154 != 0) { ldv_main_exported_154(); } else { } goto ldv_53305; case 77: ; if (ldv_state_variable_0 != 0) { tmp___15 = __VERIFIER_nondet_int(); switch (tmp___15) { case 0: ; if (ldv_state_variable_0 == 2 && ref_cnt == 0) { i915_exit(); ldv_state_variable_0 = 3; goto ldv_final; } else { } goto ldv_53419; case 1: ; if (ldv_state_variable_0 == 1) { ldv_retval_31 = i915_init(); if (ldv_retval_31 != 0) { ldv_state_variable_0 = 3; goto ldv_final; } else { } if (ldv_retval_31 == 0) { ldv_state_variable_0 = 2; ldv_state_variable_171 = 1; ldv_initialize_bin_attribute_171(); ldv_state_variable_132 = 1; ldv_state_variable_164 = 1; ldv_initialize_i915_power_well_ops_164(); ldv_state_variable_169 = 1; ldv_initialize_i915_power_well_ops_169(); ldv_state_variable_34 = 1; ldv_initialize_drm_connector_helper_funcs_34(); ldv_state_variable_38 = 1; ldv_initialize_drm_connector_funcs_38(); ldv_state_variable_146 = 1; ldv_file_operations_146(); ldv_state_variable_111 = 1; ldv_state_variable_51 = 1; ldv_initialize_drm_encoder_funcs_51(); ldv_state_variable_94 = 1; ldv_initialize_trace_event_class_94(); ldv_state_variable_183 = 1; ldv_state_variable_36 = 1; ldv_state_variable_185 = 1; ldv_state_variable_85 = 1; ldv_initialize_trace_event_class_85(); ldv_state_variable_105 = 1; ldv_state_variable_46 = 1; ldv_initialize_mipi_dsi_host_ops_46(); ldv_state_variable_42 = 1; ldv_initialize_drm_panel_funcs_42(); ldv_state_variable_158 = 1; ldv_file_operations_158(); ldv_state_variable_143 = 1; ldv_file_operations_143(); ldv_state_variable_122 = 1; ldv_state_variable_53 = 1; ldv_initialize_drm_connector_funcs_53(); ldv_state_variable_130 = 1; ldv_state_variable_155 = 1; ldv_file_operations_155(); ldv_state_variable_150 = 1; ldv_file_operations_150(); ldv_state_variable_177 = 1; ldv_initialize_device_attribute_177(); ldv_state_variable_59 = 1; ldv_initialize_intel_dvo_dev_ops_59(); ldv_state_variable_134 = 1; ldv_state_variable_40 = 1; ldv_initialize_drm_connector_helper_funcs_40(); ldv_state_variable_75 = 1; ldv_initialize_trace_event_class_75(); ldv_state_variable_83 = 1; ldv_initialize_trace_event_class_83(); ldv_state_variable_156 = 1; ldv_file_operations_156(); ldv_state_variable_120 = 1; ldv_state_variable_28 = 1; ldv_state_variable_128 = 1; ldv_state_variable_25 = 1; ldv_initialize_drm_connector_helper_funcs_25(); ldv_state_variable_100 = 1; ldv_initialize_trace_event_class_100(); ldv_state_variable_144 = 1; ldv_file_operations_144(); ldv_state_variable_141 = 1; ldv_initialize_dma_buf_ops_141(); ldv_state_variable_30 = 1; ldv_initialize_drm_connector_funcs_30(); ldv_state_variable_136 = 1; ldv_state_variable_116 = 1; ldv_state_variable_88 = 1; ldv_initialize_trace_event_class_88(); ldv_state_variable_166 = 1; ldv_initialize_i915_power_well_ops_166(); ldv_state_variable_68 = 1; ldv_initialize_drm_mode_config_funcs_68(); ldv_state_variable_188 = 1; ldv_initialize_drm_driver_188(); ldv_state_variable_70 = 1; ldv_initialize_drm_plane_funcs_70(); ldv_state_variable_54 = 1; ldv_state_variable_66 = 1; ldv_initialize_fb_ops_66(); ldv_state_variable_45 = 1; ldv_state_variable_56 = 1; ldv_initialize_drm_connector_helper_funcs_56(); ldv_state_variable_173 = 1; ldv_state_variable_52 = 1; ldv_initialize_drm_connector_helper_funcs_52(); ldv_state_variable_41 = 1; ldv_initialize_drm_connector_funcs_41(); ldv_state_variable_97 = 1; ldv_initialize_trace_event_class_97(); ldv_state_variable_64 = 1; ldv_initialize_intel_dvo_dev_ops_64(); ldv_state_variable_39 = 1; ldv_state_variable_50 = 1; ldv_initialize_drm_connector_funcs_50(); ldv_state_variable_149 = 1; ldv_file_operations_149(); ldv_state_variable_123 = 1; ldv_state_variable_133 = 1; ldv_state_variable_77 = 1; ldv_initialize_trace_event_class_77(); ldv_state_variable_174 = 1; ldv_state_variable_87 = 1; ldv_initialize_trace_event_class_87(); ldv_state_variable_107 = 1; ldv_state_variable_48 = 1; ldv_state_variable_167 = 1; ldv_initialize_i915_power_well_ops_167(); ldv_state_variable_91 = 1; ldv_initialize_trace_event_class_91(); ldv_state_variable_142 = 1; ldv_file_operations_142(); ldv_state_variable_189 = 1; ldv_file_operations_189(); ldv_state_variable_152 = 1; ldv_file_operations_152(); ldv_state_variable_113 = 1; ldv_state_variable_103 = 1; ldv_initialize_trace_event_class_103(); ldv_state_variable_92 = 1; ldv_initialize_trace_event_class_92(); ldv_state_variable_115 = 1; ldv_state_variable_108 = 1; ldv_state_variable_61 = 1; ldv_initialize_intel_dvo_dev_ops_61(); ldv_state_variable_182 = 1; ldv_state_variable_74 = 1; ldv_initialize_i915_audio_component_ops_74(); ldv_state_variable_72 = 1; ldv_initialize_drm_crtc_helper_funcs_72(); ldv_state_variable_162 = 1; ldv_file_operations_162(); ldv_state_variable_179 = 1; ldv_state_variable_99 = 1; ldv_initialize_trace_event_class_99(); ldv_state_variable_180 = 1; ldv_initialize_bin_attribute_180(); ldv_state_variable_119 = 1; ldv_state_variable_80 = 1; ldv_initialize_trace_event_class_80(); ldv_state_variable_26 = 1; ldv_initialize_drm_connector_funcs_26(); ldv_state_variable_63 = 1; ldv_initialize_intel_dvo_dev_ops_63(); ldv_state_variable_33 = 1; ldv_initialize_drm_connector_funcs_33(); ldv_state_variable_170 = 1; ldv_initialize_i915_power_well_ops_170(); ldv_state_variable_43 = 1; ldv_initialize_drm_connector_funcs_43(); ldv_state_variable_117 = 1; ldv_state_variable_37 = 1; ldv_initialize_drm_connector_helper_funcs_37(); ldv_state_variable_98 = 1; ldv_initialize_trace_event_class_98(); ldv_state_variable_47 = 1; ldv_initialize_drm_dp_mst_topology_cbs_47(); ldv_state_variable_176 = 1; ldv_initialize_device_attribute_176(); ldv_state_variable_160 = 1; ldv_file_operations_160(); ldv_state_variable_159 = 1; ldv_file_operations_159(); ldv_state_variable_126 = 1; ldv_state_variable_96 = 1; ldv_initialize_trace_event_class_96(); ldv_state_variable_23 = 1; ldv_initialize_vga_switcheroo_client_ops_23(); ldv_state_variable_154 = 1; ldv_file_operations_154(); ldv_state_variable_79 = 1; ldv_initialize_trace_event_class_79(); ldv_state_variable_121 = 1; ldv_state_variable_181 = 1; ldv_initialize_bin_attribute_181(); ldv_state_variable_131 = 1; ldv_state_variable_104 = 1; ldv_initialize_trace_event_class_104(); ldv_state_variable_124 = 1; ldv_state_variable_140 = 1; ldv_initialize_drm_i915_gem_object_ops_140(); ldv_state_variable_187 = 1; ldv_pci_driver_187(); ldv_state_variable_24 = 1; ldv_state_variable_178 = 1; ldv_state_variable_49 = 1; ldv_initialize_drm_connector_helper_funcs_49(); ldv_state_variable_145 = 1; ldv_file_operations_145(); ldv_state_variable_172 = 1; ldv_state_variable_191 = 1; ldv_dev_pm_ops_191(); ldv_state_variable_69 = 1; ldv_initialize_drm_framebuffer_funcs_69(); ldv_state_variable_112 = 1; ldv_state_variable_135 = 1; ldv_initialize_drm_i915_gem_object_ops_135(); ldv_state_variable_184 = 1; ldv_state_variable_168 = 1; ldv_initialize_i915_power_well_ops_168(); ldv_state_variable_147 = 1; ldv_file_operations_147(); ldv_state_variable_82 = 1; ldv_initialize_trace_event_class_82(); ldv_state_variable_110 = 1; ldv_state_variable_186 = 1; ldv_state_variable_129 = 1; ldv_state_variable_139 = 1; ldv_initialize_drm_i915_gem_object_ops_139(); ldv_state_variable_165 = 1; ldv_initialize_i915_power_well_ops_165(); ldv_state_variable_67 = 1; ldv_state_variable_62 = 1; ldv_initialize_intel_dvo_dev_ops_62(); ldv_state_variable_76 = 1; ldv_initialize_trace_event_class_76(); ldv_state_variable_86 = 1; ldv_initialize_trace_event_class_86(); ldv_state_variable_73 = 1; ldv_initialize_component_ops_73(); ldv_state_variable_101 = 1; ldv_initialize_trace_event_class_101(); ldv_state_variable_60 = 1; ldv_initialize_intel_dvo_dev_ops_60(); ldv_state_variable_81 = 1; ldv_initialize_trace_event_class_81(); ldv_state_variable_137 = 1; ldv_initialize_drm_i915_gem_object_ops_137(); ldv_state_variable_153 = 1; ldv_file_operations_153(); ldv_state_variable_58 = 1; ldv_initialize_drm_plane_helper_funcs_58(); ldv_state_variable_114 = 1; ldv_state_variable_138 = 1; ldv_initialize_drm_i915_gem_object_ops_138(); ldv_state_variable_29 = 1; ldv_initialize_drm_connector_helper_funcs_29(); ldv_state_variable_65 = 1; ldv_initialize_drm_fb_helper_funcs_65(); ldv_state_variable_157 = 1; ldv_file_operations_157(); ldv_state_variable_106 = 1; ldv_state_variable_93 = 1; ldv_initialize_trace_event_class_93(); ldv_state_variable_78 = 1; ldv_initialize_trace_event_class_78(); ldv_state_variable_35 = 1; ldv_initialize_i2c_algorithm_35(); ldv_state_variable_31 = 1; ldv_initialize_backlight_ops_31(); ldv_state_variable_148 = 1; ldv_file_operations_148(); ldv_state_variable_175 = 1; ldv_state_variable_89 = 1; ldv_initialize_trace_event_class_89(); ldv_state_variable_151 = 1; ldv_file_operations_151(); ldv_state_variable_109 = 1; ldv_state_variable_163 = 1; ldv_initialize_i915_power_well_ops_163(); ldv_state_variable_57 = 1; ldv_initialize_drm_connector_funcs_57(); ldv_state_variable_95 = 1; ldv_initialize_trace_event_class_95(); ldv_state_variable_161 = 1; ldv_file_operations_161(); ldv_state_variable_190 = 1; ldv_initialize_vm_operations_struct_190(); ldv_state_variable_27 = 1; ldv_initialize_i2c_algorithm_27(); ldv_state_variable_84 = 1; ldv_initialize_trace_event_class_84(); ldv_state_variable_55 = 1; ldv_state_variable_44 = 1; ldv_initialize_drm_connector_helper_funcs_44(); ldv_state_variable_125 = 1; ldv_state_variable_102 = 1; ldv_initialize_trace_event_class_102(); ldv_state_variable_71 = 1; ldv_initialize_drm_crtc_funcs_71(); ldv_state_variable_118 = 1; ldv_state_variable_90 = 1; ldv_initialize_trace_event_class_90(); ldv_state_variable_32 = 1; ldv_state_variable_127 = 1; } else { } } else { } goto ldv_53419; default: ldv_stop(); } ldv_53419: ; } else { } goto ldv_53305; case 78: ; if (ldv_state_variable_23 != 0) { ldv_main_exported_23(); } else { } goto ldv_53305; case 79: ; if (ldv_state_variable_96 != 0) { ldv_main_exported_96(); } else { } goto ldv_53305; case 80: ; if (ldv_state_variable_126 != 0) { ldv_main_exported_126(); } else { } goto ldv_53305; case 81: ; if (ldv_state_variable_159 != 0) { ldv_main_exported_159(); } else { } goto ldv_53305; case 82: ; if (ldv_state_variable_160 != 0) { ldv_main_exported_160(); } else { } goto ldv_53305; case 83: ; if (ldv_state_variable_176 != 0) { ldv_main_exported_176(); } else { } goto ldv_53305; case 84: ; if (ldv_state_variable_47 != 0) { ldv_main_exported_47(); } else { } goto ldv_53305; case 85: ; goto ldv_53305; case 86: ; if (ldv_state_variable_98 != 0) { ldv_main_exported_98(); } else { } goto ldv_53305; case 87: ; if (ldv_state_variable_37 != 0) { ldv_main_exported_37(); } else { } goto ldv_53305; case 88: ; if (ldv_state_variable_117 != 0) { ldv_main_exported_117(); } else { } goto ldv_53305; case 89: ; if (ldv_state_variable_43 != 0) { ldv_main_exported_43(); } else { } goto ldv_53305; case 90: ; goto ldv_53305; case 91: ; if (ldv_state_variable_170 != 0) { ldv_main_exported_170(); } else { } goto ldv_53305; case 92: ; if (ldv_state_variable_33 != 0) { ldv_main_exported_33(); } else { } goto ldv_53305; case 93: ; goto ldv_53305; case 94: ; if (ldv_state_variable_63 != 0) { ldv_main_exported_63(); } else { } goto ldv_53305; case 95: ; goto ldv_53305; case 96: ; if (ldv_state_variable_26 != 0) { ldv_main_exported_26(); } else { } goto ldv_53305; case 97: ; if (ldv_state_variable_80 != 0) { ldv_main_exported_80(); } else { } goto ldv_53305; case 98: ; if (ldv_state_variable_119 != 0) { ldv_main_exported_119(); } else { } goto ldv_53305; case 99: ; if (ldv_state_variable_180 != 0) { ldv_main_exported_180(); } else { } goto ldv_53305; case 100: ; if (ldv_state_variable_99 != 0) { ldv_main_exported_99(); } else { } goto ldv_53305; case 101: ; if (ldv_state_variable_179 != 0) { ldv_main_exported_179(); } else { } goto ldv_53305; case 102: ; if (ldv_state_variable_162 != 0) { ldv_main_exported_162(); } else { } goto ldv_53305; case 103: ; if (ldv_state_variable_72 != 0) { ldv_main_exported_72(); } else { } goto ldv_53305; case 104: ; if (ldv_state_variable_74 != 0) { ldv_main_exported_74(); } else { } goto ldv_53305; case 105: ; if (ldv_state_variable_182 != 0) { ldv_main_exported_182(); } else { } goto ldv_53305; case 106: ; if (ldv_state_variable_61 != 0) { ldv_main_exported_61(); } else { } goto ldv_53305; case 107: ; if (ldv_state_variable_108 != 0) { ldv_main_exported_108(); } else { } goto ldv_53305; case 108: ; if (ldv_state_variable_115 != 0) { ldv_main_exported_115(); } else { } goto ldv_53305; case 109: ; if (ldv_state_variable_92 != 0) { ldv_main_exported_92(); } else { } goto ldv_53305; case 110: ; if (ldv_state_variable_103 != 0) { ldv_main_exported_103(); } else { } goto ldv_53305; case 111: ; goto ldv_53305; case 112: ; if (ldv_state_variable_113 != 0) { ldv_main_exported_113(); } else { } goto ldv_53305; case 113: ; if (ldv_state_variable_152 != 0) { ldv_main_exported_152(); } else { } goto ldv_53305; case 114: ; if (ldv_state_variable_189 != 0) { tmp___16 = __VERIFIER_nondet_int(); switch (tmp___16) { case 0: ; if (ldv_state_variable_189 == 2) { drm_read(i915_driver_fops_group2, ldvarg329, ldvarg328, ldvarg327); ldv_state_variable_189 = 2; } else { } goto ldv_53460; case 1: ; if (ldv_state_variable_189 == 2) { drm_poll(i915_driver_fops_group2, ldvarg326); ldv_state_variable_189 = 2; } else { } if (ldv_state_variable_189 == 1) { drm_poll(i915_driver_fops_group2, ldvarg326); ldv_state_variable_189 = 1; } else { } goto ldv_53460; case 2: ; if (ldv_state_variable_189 == 2) { i915_compat_ioctl(i915_driver_fops_group2, ldvarg325, ldvarg324); ldv_state_variable_189 = 2; } else { } goto ldv_53460; case 3: ; if (ldv_state_variable_189 == 1) { ldv_retval_36 = drm_open(i915_driver_fops_group1, i915_driver_fops_group2); if (ldv_retval_36 == 0) { ldv_state_variable_189 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_53460; case 4: ; if (ldv_state_variable_189 == 2) { drm_gem_mmap(i915_driver_fops_group2, ldvarg323); ldv_state_variable_189 = 2; } else { } if (ldv_state_variable_189 == 1) { drm_gem_mmap(i915_driver_fops_group2, ldvarg323); ldv_state_variable_189 = 1; } else { } goto ldv_53460; case 5: ; if (ldv_state_variable_189 == 2) { drm_release(i915_driver_fops_group1, i915_driver_fops_group2); ldv_state_variable_189 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_53460; case 6: ; if (ldv_state_variable_189 == 2) { noop_llseek(i915_driver_fops_group2, ldvarg322, ldvarg321); ldv_state_variable_189 = 2; } else { } goto ldv_53460; case 7: ; if (ldv_state_variable_189 == 2) { drm_ioctl(i915_driver_fops_group2, ldvarg320, ldvarg319); ldv_state_variable_189 = 2; } else { } goto ldv_53460; default: ldv_stop(); } ldv_53460: ; } else { } goto ldv_53305; case 115: ; if (ldv_state_variable_142 != 0) { ldv_main_exported_142(); } else { } goto ldv_53305; case 116: ; if (ldv_state_variable_91 != 0) { ldv_main_exported_91(); } else { } goto ldv_53305; case 117: ; if (ldv_state_variable_167 != 0) { ldv_main_exported_167(); } else { } goto ldv_53305; case 118: ; if (ldv_state_variable_48 != 0) { ldv_main_exported_48(); } else { } goto ldv_53305; case 119: ; if (ldv_state_variable_107 != 0) { ldv_main_exported_107(); } else { } goto ldv_53305; case 120: ; if (ldv_state_variable_87 != 0) { ldv_main_exported_87(); } else { } goto ldv_53305; case 121: ; if (ldv_state_variable_174 != 0) { ldv_main_exported_174(); } else { } goto ldv_53305; case 122: ; if (ldv_state_variable_77 != 0) { ldv_main_exported_77(); } else { } goto ldv_53305; case 123: ; if (ldv_state_variable_133 != 0) { ldv_main_exported_133(); } else { } goto ldv_53305; case 124: ; if (ldv_state_variable_149 != 0) { ldv_main_exported_149(); } else { } goto ldv_53305; case 125: ; if (ldv_state_variable_123 != 0) { ldv_main_exported_123(); } else { } goto ldv_53305; case 126: ; if (ldv_state_variable_50 != 0) { ldv_main_exported_50(); } else { } goto ldv_53305; case 127: ; if (ldv_state_variable_39 != 0) { ldv_main_exported_39(); } else { } goto ldv_53305; case 128: ; if (ldv_state_variable_64 != 0) { ldv_main_exported_64(); } else { } goto ldv_53305; case 129: ; if (ldv_state_variable_97 != 0) { ldv_main_exported_97(); } else { } goto ldv_53305; case 130: ; goto ldv_53305; case 131: ; if (ldv_state_variable_41 != 0) { ldv_main_exported_41(); } else { } goto ldv_53305; case 132: ; if (ldv_state_variable_52 != 0) { ldv_main_exported_52(); } else { } goto ldv_53305; case 133: ; if (ldv_state_variable_173 != 0) { ldv_main_exported_173(); } else { } goto ldv_53305; case 134: ; if (ldv_state_variable_56 != 0) { ldv_main_exported_56(); } else { } goto ldv_53305; case 135: ; if (ldv_state_variable_45 != 0) { ldv_main_exported_45(); } else { } goto ldv_53305; case 136: ; if (ldv_state_variable_66 != 0) { ldv_main_exported_66(); } else { } goto ldv_53305; case 137: ; goto ldv_53305; case 138: ; if (ldv_state_variable_54 != 0) { ldv_main_exported_54(); } else { } goto ldv_53305; case 139: ; if (ldv_state_variable_70 != 0) { ldv_main_exported_70(); } else { } goto ldv_53305; case 140: ; if (ldv_state_variable_188 != 0) { tmp___17 = __VERIFIER_nondet_int(); switch (tmp___17) { case 0: ; if (ldv_state_variable_188 == 2) { i915_driver_lastclose(driver_group3); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_driver_lastclose(driver_group3); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_driver_lastclose(driver_group3); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 1: ; if (ldv_state_variable_188 == 2) { i915_gem_mmap_gtt(driver_group2, driver_group3, ldvarg422, ldvarg423); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_gem_mmap_gtt(driver_group2, driver_group3, ldvarg422, ldvarg423); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_gem_mmap_gtt(driver_group2, driver_group3, ldvarg422, ldvarg423); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 2: ; if (ldv_state_variable_188 == 2) { i915_gem_prime_import(driver_group3, ldvarg421); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_gem_prime_import(driver_group3, ldvarg421); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_gem_prime_import(driver_group3, ldvarg421); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 3: ; if (ldv_state_variable_188 == 2) { ldv_retval_41 = i915_suspend_legacy(driver_group3, ldvarg420); if (ldv_retval_41 == 0) { ldv_state_variable_188 = 3; } else { } } else { } goto ldv_53496; case 4: ; if (ldv_state_variable_188 == 2) { i915_debugfs_cleanup(driver_group0); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_debugfs_cleanup(driver_group0); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_debugfs_cleanup(driver_group0); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 5: ; if (ldv_state_variable_188 == 1) { ldv_retval_40 = i915_driver_open(driver_group3, driver_group2); if (ldv_retval_40 == 0) { ldv_state_variable_188 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_53496; case 6: ; if (ldv_state_variable_188 == 2) { i915_gem_free_object(driver_group1); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_gem_free_object(driver_group1); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_gem_free_object(driver_group1); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 7: ; if (ldv_state_variable_188 == 2) { i915_driver_unload(driver_group3); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_driver_unload(driver_group3); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_driver_unload(driver_group3); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 8: ; if (ldv_state_variable_188 == 2) { drm_gem_prime_handle_to_fd(driver_group3, driver_group2, ldvarg417, ldvarg418, ldvarg419); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { drm_gem_prime_handle_to_fd(driver_group3, driver_group2, ldvarg417, ldvarg418, ldvarg419); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { drm_gem_prime_handle_to_fd(driver_group3, driver_group2, ldvarg417, ldvarg418, ldvarg419); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 9: ; if (ldv_state_variable_188 == 3) { ldv_retval_39 = i915_resume_legacy(driver_group3); if (ldv_retval_39 == 0) { ldv_state_variable_188 = 2; } else { } } else { } goto ldv_53496; case 10: ; if (ldv_state_variable_188 == 2) { i915_driver_load(driver_group3, ldvarg416); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_driver_load(driver_group3, ldvarg416); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_driver_load(driver_group3, ldvarg416); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 11: ; if (ldv_state_variable_188 == 2) { drm_gem_dumb_destroy(driver_group2, driver_group3, ldvarg415); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { drm_gem_dumb_destroy(driver_group2, driver_group3, ldvarg415); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { drm_gem_dumb_destroy(driver_group2, driver_group3, ldvarg415); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 12: ; if (ldv_state_variable_188 == 2) { drm_gem_prime_fd_to_handle(driver_group3, driver_group2, ldvarg413, ldvarg414); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { drm_gem_prime_fd_to_handle(driver_group3, driver_group2, ldvarg413, ldvarg414); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { drm_gem_prime_fd_to_handle(driver_group3, driver_group2, ldvarg413, ldvarg414); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 13: ; if (ldv_state_variable_188 == 2) { i915_driver_postclose(driver_group3, driver_group2); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_driver_postclose(driver_group3, driver_group2); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_driver_postclose(driver_group3, driver_group2); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 14: ; if (ldv_state_variable_188 == 2) { drm_pci_set_busid(driver_group3, ldvarg412); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { drm_pci_set_busid(driver_group3, ldvarg412); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { drm_pci_set_busid(driver_group3, ldvarg412); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 15: ; if (ldv_state_variable_188 == 2) { i915_gem_dumb_create(driver_group2, driver_group3, ldvarg411); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_gem_dumb_create(driver_group2, driver_group3, ldvarg411); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_gem_dumb_create(driver_group2, driver_group3, ldvarg411); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 16: ; if (ldv_state_variable_188 == 2) { i915_gem_prime_export(driver_group3, driver_group1, ldvarg410); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_gem_prime_export(driver_group3, driver_group1, ldvarg410); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_gem_prime_export(driver_group3, driver_group1, ldvarg410); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 17: ; if (ldv_state_variable_188 == 2) { i915_debugfs_init(driver_group0); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_debugfs_init(driver_group0); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_debugfs_init(driver_group0); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 18: ; if (ldv_state_variable_188 == 2) { i915_driver_preclose(driver_group3, driver_group2); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_driver_preclose(driver_group3, driver_group2); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_driver_preclose(driver_group3, driver_group2); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 19: ; if (ldv_state_variable_188 == 2) { i915_driver_device_is_agp(driver_group3); ldv_state_variable_188 = 2; } else { } if (ldv_state_variable_188 == 1) { i915_driver_device_is_agp(driver_group3); ldv_state_variable_188 = 1; } else { } if (ldv_state_variable_188 == 3) { i915_driver_device_is_agp(driver_group3); ldv_state_variable_188 = 3; } else { } goto ldv_53496; case 20: ; if (ldv_state_variable_188 == 2) { ldv_release_188(); ldv_state_variable_188 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_188 == 3) { ldv_release_188(); ldv_state_variable_188 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_53496; default: ldv_stop(); } ldv_53496: ; } else { } goto ldv_53305; case 141: ; if (ldv_state_variable_68 != 0) { ldv_main_exported_68(); } else { } goto ldv_53305; case 142: ; if (ldv_state_variable_166 != 0) { ldv_main_exported_166(); } else { } goto ldv_53305; case 143: ; goto ldv_53305; case 144: ; if (ldv_state_variable_136 != 0) { ldv_main_exported_136(); } else { } goto ldv_53305; case 145: ; if (ldv_state_variable_88 != 0) { ldv_main_exported_88(); } else { } goto ldv_53305; case 146: ; if (ldv_state_variable_116 != 0) { ldv_main_exported_116(); } else { } goto ldv_53305; case 147: ; if (ldv_state_variable_144 != 0) { ldv_main_exported_144(); } else { } goto ldv_53305; case 148: ; if (ldv_state_variable_141 != 0) { ldv_main_exported_141(); } else { } goto ldv_53305; case 149: ; if (ldv_state_variable_30 != 0) { ldv_main_exported_30(); } else { } goto ldv_53305; case 150: ; if (ldv_state_variable_100 != 0) { ldv_main_exported_100(); } else { } goto ldv_53305; case 151: ; if (ldv_state_variable_25 != 0) { ldv_main_exported_25(); } else { } goto ldv_53305; case 152: ; if (ldv_state_variable_128 != 0) { ldv_main_exported_128(); } else { } goto ldv_53305; case 153: ; if (ldv_state_variable_28 != 0) { ldv_main_exported_28(); } else { } goto ldv_53305; case 154: ; if (ldv_state_variable_120 != 0) { ldv_main_exported_120(); } else { } goto ldv_53305; case 155: ; if (ldv_state_variable_156 != 0) { ldv_main_exported_156(); } else { } goto ldv_53305; case 156: ; if (ldv_state_variable_134 != 0) { ldv_main_exported_134(); } else { } goto ldv_53305; case 157: ; if (ldv_state_variable_40 != 0) { ldv_main_exported_40(); } else { } goto ldv_53305; case 158: ; if (ldv_state_variable_75 != 0) { ldv_main_exported_75(); } else { } goto ldv_53305; case 159: ; if (ldv_state_variable_83 != 0) { ldv_main_exported_83(); } else { } goto ldv_53305; case 160: ; if (ldv_state_variable_59 != 0) { ldv_main_exported_59(); } else { } goto ldv_53305; case 161: ; if (ldv_state_variable_177 != 0) { ldv_main_exported_177(); } else { } goto ldv_53305; case 162: ; if (ldv_state_variable_150 != 0) { ldv_main_exported_150(); } else { } goto ldv_53305; case 163: ; if (ldv_state_variable_155 != 0) { ldv_main_exported_155(); } else { } goto ldv_53305; case 164: ; if (ldv_state_variable_130 != 0) { ldv_main_exported_130(); } else { } goto ldv_53305; case 165: ; if (ldv_state_variable_53 != 0) { ldv_main_exported_53(); } else { } goto ldv_53305; case 166: ; if (ldv_state_variable_122 != 0) { ldv_main_exported_122(); } else { } goto ldv_53305; case 167: ; if (ldv_state_variable_143 != 0) { ldv_main_exported_143(); } else { } goto ldv_53305; case 168: ; if (ldv_state_variable_158 != 0) { ldv_main_exported_158(); } else { } goto ldv_53305; case 169: ; if (ldv_state_variable_42 != 0) { ldv_main_exported_42(); } else { } goto ldv_53305; case 170: ; goto ldv_53305; case 171: ; if (ldv_state_variable_46 != 0) { ldv_main_exported_46(); } else { } goto ldv_53305; case 172: ; goto ldv_53305; case 173: ; if (ldv_state_variable_105 != 0) { ldv_main_exported_105(); } else { } goto ldv_53305; case 174: ; goto ldv_53305; case 175: ; if (ldv_state_variable_85 != 0) { ldv_main_exported_85(); } else { } goto ldv_53305; case 176: ; if (ldv_state_variable_185 != 0) { ldv_main_exported_185(); } else { } goto ldv_53305; case 177: ; if (ldv_state_variable_36 != 0) { ldv_main_exported_36(); } else { } goto ldv_53305; case 178: ; goto ldv_53305; case 179: ; if (ldv_state_variable_183 != 0) { ldv_main_exported_183(); } else { } goto ldv_53305; case 180: ; if (ldv_state_variable_94 != 0) { ldv_main_exported_94(); } else { } goto ldv_53305; case 181: ; if (ldv_state_variable_146 != 0) { ldv_main_exported_146(); } else { } goto ldv_53305; case 182: ; if (ldv_state_variable_51 != 0) { ldv_main_exported_51(); } else { } goto ldv_53305; case 183: ; goto ldv_53305; case 184: ; if (ldv_state_variable_111 != 0) { ldv_main_exported_111(); } else { } goto ldv_53305; case 185: ; if (ldv_state_variable_38 != 0) { ldv_main_exported_38(); } else { } goto ldv_53305; case 186: ; goto ldv_53305; case 187: ; if (ldv_state_variable_34 != 0) { ldv_main_exported_34(); } else { } goto ldv_53305; case 188: ; if (ldv_state_variable_169 != 0) { ldv_main_exported_169(); } else { } goto ldv_53305; case 189: ; if (ldv_state_variable_164 != 0) { ldv_main_exported_164(); } else { } goto ldv_53305; case 190: ; if (ldv_state_variable_132 != 0) { ldv_main_exported_132(); } else { } goto ldv_53305; case 191: ; if (ldv_state_variable_171 != 0) { ldv_main_exported_171(); } else { } goto ldv_53305; default: ldv_stop(); } ldv_53305: ; goto ldv_53570; ldv_final: ldv_check_final_state(); return 0; } } bool ldv_queue_work_on_5(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_6(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_7(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_8(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_9(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_cancel_work_sync_10(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(ldv_func_arg1); return (ldv_func_res); } } bool ldv_cancel_work_sync_11(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(ldv_func_arg1); return (ldv_func_res); } } bool ldv_cancel_delayed_work_sync_12(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_cancel_delayed_work_sync_13(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___6 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_queue_work_on_27(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_29(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_28(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_31(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_30(struct workqueue_struct *ldv_func_arg1 ) ; struct i915_params i915 = {-1, 1, -1, 0U, 0, -1, -1, -1, -1, -1, -1, 0, 1U, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0}; bool ldv_queue_work_on_27(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_28(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_29(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_30(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_31(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_41(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_43(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_42(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_45(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_44(struct workqueue_struct *ldv_func_arg1 ) ; extern int pci_bus_read_config_word(struct pci_bus * , unsigned int , int , u16 * ) ; extern int pci_bus_write_config_word(struct pci_bus * , unsigned int , int , u16 ) ; __inline static int pci_read_config_word(struct pci_dev const *dev , int where , u16 *val ) { int tmp ; { tmp = pci_bus_read_config_word(dev->bus, dev->devfn, where, val); return (tmp); } } __inline static int pci_write_config_word(struct pci_dev const *dev , int where , u16 val ) { int tmp ; { tmp = pci_bus_write_config_word(dev->bus, dev->devfn, where, (int )val); return (tmp); } } void intel_i2c_reset(struct drm_device *dev ) ; void i915_redisable_vga(struct drm_device *dev ) ; void intel_fbc_disable(struct drm_device *dev ) ; static void i915_save_display(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { dev_priv->regfile.saveDSPARB = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458800U), 1); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type == 1U) { dev_priv->regfile.saveLVDS = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 921984L, 1); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___4->pch_type == 2U) { dev_priv->regfile.saveLVDS = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 921984L, 1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 4U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) != 13687U) { dev_priv->regfile.saveLVDS = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397696L, 1); } else { } } else { } } else { } } } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___6->pch_type != 0U) { dev_priv->regfile.savePP_CONTROL = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815620L, 1); dev_priv->regfile.savePP_ON_DELAYS = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815624L, 1); dev_priv->regfile.savePP_OFF_DELAYS = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815628L, 1); dev_priv->regfile.savePP_DIVISOR = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815632L, 1); } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { dev_priv->regfile.savePP_CONTROL = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397828L, 1); dev_priv->regfile.savePP_ON_DELAYS = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397832L, 1); dev_priv->regfile.savePP_OFF_DELAYS = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397836L, 1); dev_priv->regfile.savePP_DIVISOR = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397840L, 1); } else { } } __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) <= 4U) { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 44UL) == 0U) { dev_priv->regfile.saveFBC_CONTROL = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 12808L, 1); } else { } } else { } } else { } return; } } static void i915_restore_display(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 mask ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mask = 4294967295U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458800U), dev_priv->regfile.saveDSPARB, 1); } else { } mask = 2147483647U; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type == 1U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 921984L, dev_priv->regfile.saveLVDS & mask, 1); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___4->pch_type == 2U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 921984L, dev_priv->regfile.saveLVDS & mask, 1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 4U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) != 13687U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397696L, dev_priv->regfile.saveLVDS & mask, 1); } else { } } else { } } else { } } } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___6->pch_type != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 815624L, dev_priv->regfile.savePP_ON_DELAYS, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 815628L, dev_priv->regfile.savePP_OFF_DELAYS, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 815632L, dev_priv->regfile.savePP_DIVISOR, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 815620L, dev_priv->regfile.savePP_CONTROL, 1); } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397832L, dev_priv->regfile.savePP_ON_DELAYS, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397836L, dev_priv->regfile.savePP_OFF_DELAYS, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397840L, dev_priv->regfile.savePP_DIVISOR, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397828L, dev_priv->regfile.savePP_CONTROL, 1); } else { } } intel_fbc_disable(dev); __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) <= 4U) { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 44UL) == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12808L, dev_priv->regfile.saveFBC_CONTROL, 1); } else { } } else { } } else { } i915_redisable_vga(dev); return; } } int i915_save_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int i ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev->struct_mutex, 0U); i915_save_display(dev); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 4U) { pci_read_config_word((struct pci_dev const *)dev->pdev, 204, & dev_priv->regfile.saveGCDGMBUS); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 6U) { dev_priv->regfile.saveCACHE_MODE_0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8480L, 1); } else { } dev_priv->regfile.saveMI_ARB_STATE = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8420L, 1); i = 0; goto ldv_48136; ldv_48135: dev_priv->regfile.saveSWF0[i] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )(i << 2)) + 463888U), 1); dev_priv->regfile.saveSWF1[i] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )(i << 2)) + 459792U), 1); i = i + 1; ldv_48136: ; if (i <= 15) { goto ldv_48135; } else { } i = 0; goto ldv_48139; ldv_48138: dev_priv->regfile.saveSWF2[i] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )(i << 2)) + 467988U), 1); i = i + 1; ldv_48139: ; if (i <= 2) { goto ldv_48138; } else { } mutex_unlock(& dev->struct_mutex); return (0); } } int i915_restore_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int i ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev->struct_mutex, 0U); i915_gem_restore_fences(dev); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 4U) { pci_write_config_word((struct pci_dev const *)dev->pdev, 204, (int )dev_priv->regfile.saveGCDGMBUS); } else { } i915_restore_display(dev); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 6U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, dev_priv->regfile.saveCACHE_MODE_0 | 4294901760U, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8420L, dev_priv->regfile.saveMI_ARB_STATE | 4294901760U, 1); i = 0; goto ldv_48159; ldv_48158: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )(i << 2)) + 463888U), dev_priv->regfile.saveSWF0[i], 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )(i << 2)) + 459792U), dev_priv->regfile.saveSWF1[i], 1); i = i + 1; ldv_48159: ; if (i <= 15) { goto ldv_48158; } else { } i = 0; goto ldv_48162; ldv_48161: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )(i << 2)) + 467988U), dev_priv->regfile.saveSWF2[i], 1); i = i + 1; ldv_48162: ; if (i <= 2) { goto ldv_48161; } else { } mutex_unlock(& dev->struct_mutex); intel_i2c_reset(dev); return (0); } } bool ldv_queue_work_on_41(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_42(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_43(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_44(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_45(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern int kstrtouint(char const * , unsigned int , unsigned int * ) ; __inline static int kstrtou32(char const *s , unsigned int base , u32 *res ) { int tmp ; { tmp = kstrtouint(s, base, res); return (tmp); } } extern int snprintf(char * , size_t , char const * , ...) ; extern void *memcpy(void * , void const * , size_t ) ; extern int mutex_lock_interruptible_nested(struct mutex * , unsigned int ) ; bool ldv_queue_work_on_55(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_57(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_56(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_59(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_58(struct workqueue_struct *ldv_func_arg1 ) ; extern bool flush_delayed_work(struct delayed_work * ) ; bool ldv_flush_delayed_work_60(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_61(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_62(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_63(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_64(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_65(struct delayed_work *ldv_func_arg1 ) ; extern char const power_group_name[] ; extern int sysfs_create_files(struct kobject * , struct attribute const ** ) ; extern void sysfs_remove_files(struct kobject * , struct attribute const ** ) ; extern int sysfs_create_bin_file(struct kobject * , struct bin_attribute const * ) ; extern void sysfs_remove_bin_file(struct kobject * , struct bin_attribute const * ) ; extern int sysfs_merge_group(struct kobject * , struct attribute_group const * ) ; extern void sysfs_unmerge_group(struct kobject * , struct attribute_group const * ) ; extern void kfree(void const * ) ; extern void *__kmalloc(size_t , gfp_t ) ; __inline static void *kmalloc(size_t size , gfp_t flags ) { void *tmp___2 ; { tmp___2 = __kmalloc(size, flags); return (tmp___2); } } __inline static void *kzalloc(size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc(size, flags | 32768U); return (tmp); } } void call_and_disable_work_18(struct work_struct *work ) ; extern int device_create_bin_file(struct device * , struct bin_attribute const * ) ; extern void device_remove_bin_file(struct device * , struct bin_attribute const * ) ; int i915_mutex_lock_interruptible(struct drm_device *dev ) ; int i915_gpu_idle(struct drm_device *dev ) ; int i915_error_state_to_str(struct drm_i915_error_state_buf *m , struct i915_error_state_file_priv const *error_priv ) ; int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf , struct drm_i915_private *i915___0 , size_t count , loff_t pos ) ; __inline static void i915_error_state_buf_release(struct drm_i915_error_state_buf *eb ) { { kfree((void const *)eb->buf); return; } } void i915_error_state_get(struct drm_device *dev , struct i915_error_state_file_priv *error_priv ) ; void i915_error_state_put(struct i915_error_state_file_priv *error_priv ) ; void i915_destroy_error_state(struct drm_device *dev ) ; void i915_setup_sysfs(struct drm_device *dev ) ; void i915_teardown_sysfs(struct drm_device *dev ) ; void intel_set_rps(struct drm_device *dev , u8 val ) ; u32 vlv_punit_read(struct drm_i915_private *dev_priv , u32 addr ) ; int intel_gpu_freq(struct drm_i915_private *dev_priv , int val ) ; int intel_freq_opcode(struct drm_i915_private *dev_priv , int val ) ; void intel_runtime_pm_get(struct drm_i915_private *dev_priv ) ; void intel_runtime_pm_put(struct drm_i915_private *dev_priv ) ; static u32 calc_residency(struct drm_device *dev , u32 const reg ) { struct drm_i915_private *dev_priv ; u64 raw_time ; u64 units ; u64 div ; u64 bias ; u32 ret ; int tmp ; u32 clk_reg ; u32 czcount_30ns ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp___0 ; int __ret_warn_on ; long tmp___1 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; unsigned long long _tmp ; uint32_t __base ; uint32_t __rem ; uint32_t tmp___2 ; struct drm_i915_private *__p___3 ; uint32_t tmp___3 ; unsigned long long _tmp___0 ; uint32_t __base___0 ; uint32_t __rem___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; units = 128ULL; div = 100000ULL; bias = 100ULL; tmp = intel_enable_rc6((struct drm_device const *)dev); if (tmp == 0) { return (0U); } else { } intel_runtime_pm_get(dev_priv); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { clk_reg = 1052928U; } else { clk_reg = 1052932U; } } else { clk_reg = 1052932U; } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )clk_reg, 1); czcount_30ns = tmp___0 >> 28; if (czcount_30ns == 0U) { __ret_warn_on = czcount_30ns == 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_sysfs.c", 62, "bogus CZ count value"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = 0U; goto out; } else { } units = 0ULL; div = 1000000ULL; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { if (czcount_30ns == 1U) { div = 10000000ULL; units = 3125ULL; } else { czcount_30ns = czcount_30ns + 1U; } } else { } } else { } if (units == 0ULL) { _tmp = (bias * 30ULL + (unsigned long long )czcount_30ns) - 1ULL; __base = czcount_30ns; __rem = (uint32_t )(_tmp % (unsigned long long )__base); _tmp = _tmp / (unsigned long long )__base; units = _tmp; } else { } tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278212L, 1); if ((tmp___2 & 32768U) != 0U) { units = units << 8; } else { } div = div * bias; } else { } tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); raw_time = (u64 )tmp___3 * units; _tmp___0 = (raw_time + div) - 1ULL; __base___0 = (uint32_t )div; __rem___0 = (uint32_t )(_tmp___0 % (unsigned long long )__base___0); _tmp___0 = _tmp___0 / (unsigned long long )__base___0; ret = (u32 )_tmp___0; out: intel_runtime_pm_put(dev_priv); return (ret); } } static ssize_t show_rc6_mask(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *dminor ; void *tmp ; int tmp___0 ; int tmp___1 ; { tmp = dev_get_drvdata((struct device const *)kdev); dminor = (struct drm_minor *)tmp; tmp___0 = intel_enable_rc6((struct drm_device const *)dminor->dev); tmp___1 = snprintf(buf, 4096UL, "%x\n", tmp___0); return ((ssize_t )tmp___1); } } static ssize_t show_rc6_ms(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *dminor ; void *tmp ; u32 rc6_residency ; u32 tmp___0 ; int tmp___1 ; { tmp = dev_get_drvdata((struct device const *)kdev); dminor = (struct drm_minor *)tmp; tmp___0 = calc_residency(dminor->dev, 1278216U); rc6_residency = tmp___0; tmp___1 = snprintf(buf, 4096UL, "%u\n", rc6_residency); return ((ssize_t )tmp___1); } } static ssize_t show_rc6p_ms(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *dminor ; void *tmp ; u32 rc6p_residency ; u32 tmp___0 ; int tmp___1 ; { tmp = dev_get_drvdata((struct device const *)kdev); dminor = (struct drm_minor *)tmp; tmp___0 = calc_residency(dminor->dev, 1278220U); rc6p_residency = tmp___0; tmp___1 = snprintf(buf, 4096UL, "%u\n", rc6p_residency); return ((ssize_t )tmp___1); } } static ssize_t show_rc6pp_ms(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *dminor ; void *tmp ; u32 rc6pp_residency ; u32 tmp___0 ; int tmp___1 ; { tmp = dev_get_drvdata((struct device const *)kdev); dminor = (struct drm_minor *)tmp; tmp___0 = calc_residency(dminor->dev, 1278224U); rc6pp_residency = tmp___0; tmp___1 = snprintf(buf, 4096UL, "%u\n", rc6pp_residency); return ((ssize_t )tmp___1); } } static ssize_t show_media_rc6_ms(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *dminor ; void *tmp ; u32 rc6_residency ; u32 tmp___0 ; int tmp___1 ; { tmp = dev_get_drvdata((struct device const *)kdev); dminor = (struct drm_minor *)tmp; tmp___0 = calc_residency(dminor->dev, 1278220U); rc6_residency = tmp___0; tmp___1 = snprintf(buf, 4096UL, "%u\n", rc6_residency); return ((ssize_t )tmp___1); } } static struct device_attribute dev_attr_rc6_enable = {{"rc6_enable", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & show_rc6_mask, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_rc6_residency_ms = {{"rc6_residency_ms", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & show_rc6_ms, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_rc6p_residency_ms = {{"rc6p_residency_ms", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & show_rc6p_ms, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_rc6pp_residency_ms = {{"rc6pp_residency_ms", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & show_rc6pp_ms, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_media_rc6_residency_ms = {{"media_rc6_residency_ms", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & show_media_rc6_ms, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct attribute *rc6_attrs[3U] = { & dev_attr_rc6_enable.attr, & dev_attr_rc6_residency_ms.attr, (struct attribute *)0}; static struct attribute_group rc6_attr_group = {(char const *)(& power_group_name), 0, (struct attribute **)(& rc6_attrs), 0}; static struct attribute *rc6p_attrs[3U] = { & dev_attr_rc6p_residency_ms.attr, & dev_attr_rc6pp_residency_ms.attr, (struct attribute *)0}; static struct attribute_group rc6p_attr_group = {(char const *)(& power_group_name), 0, (struct attribute **)(& rc6p_attrs), 0}; static struct attribute *media_rc6_attrs[2U] = { & dev_attr_media_rc6_residency_ms.attr, (struct attribute *)0}; static struct attribute_group media_rc6_attr_group = {(char const *)(& power_group_name), 0, (struct attribute **)(& media_rc6_attrs), 0}; static int l3_access_valid(struct drm_device *dev , loff_t offset ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { return (-1); } else { } } else { } if (((unsigned long )offset & 3UL) != 0UL) { return (-22); } else { } if (offset > 127LL) { return (-6); } else { } return (0); } } static ssize_t i915_l3_read(struct file *filp , struct kobject *kobj , struct bin_attribute *attr , char *buf , loff_t offset , size_t count ) { struct device *dev ; struct kobject const *__mptr ; struct drm_minor *dminor ; void *tmp ; struct drm_device *drm_dev ; struct drm_i915_private *dev_priv ; int slice ; int ret ; size_t __min1 ; size_t __min2 ; { __mptr = (struct kobject const *)kobj; dev = (struct device *)__mptr + 0xfffffffffffffff0UL; tmp = dev_get_drvdata((struct device const *)dev); dminor = (struct drm_minor *)tmp; drm_dev = dminor->dev; dev_priv = (struct drm_i915_private *)drm_dev->dev_private; slice = (int )((long )attr->private); count = count & 0xfffffffffffffffcUL; ret = l3_access_valid(drm_dev, offset); if (ret != 0) { return ((ssize_t )ret); } else { } __min1 = (size_t )(128LL - offset); __min2 = count; count = __min1 < __min2 ? __min1 : __min2; ret = i915_mutex_lock_interruptible(drm_dev); if (ret != 0) { return ((ssize_t )ret); } else { } if ((unsigned long )dev_priv->l3_parity.remap_info[slice] != (unsigned long )((u32 *)0U)) { memcpy((void *)buf, (void const *)dev_priv->l3_parity.remap_info[slice] + (unsigned long )(offset / 4LL), count); } else { memset((void *)buf, 0, count); } mutex_unlock(& drm_dev->struct_mutex); return ((ssize_t )count); } } static ssize_t i915_l3_write(struct file *filp , struct kobject *kobj , struct bin_attribute *attr , char *buf , loff_t offset , size_t count ) { struct device *dev ; struct kobject const *__mptr ; struct drm_minor *dminor ; void *tmp ; struct drm_device *drm_dev ; struct drm_i915_private *dev_priv ; struct intel_context *ctx ; u32 *temp ; int slice ; int ret ; struct drm_i915_private *__p ; void *tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { __mptr = (struct kobject const *)kobj; dev = (struct device *)__mptr + 0xfffffffffffffff0UL; tmp = dev_get_drvdata((struct device const *)dev); dminor = (struct drm_minor *)tmp; drm_dev = dminor->dev; dev_priv = (struct drm_i915_private *)drm_dev->dev_private; temp = (u32 *)0U; slice = (int )((long )attr->private); __p = to_i915((struct drm_device const *)drm_dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return (-6L); } else { } ret = l3_access_valid(drm_dev, offset); if (ret != 0) { return ((ssize_t )ret); } else { } ret = i915_mutex_lock_interruptible(drm_dev); if (ret != 0) { return ((ssize_t )ret); } else { } if ((unsigned long )dev_priv->l3_parity.remap_info[slice] == (unsigned long )((u32 *)0U)) { tmp___0 = kzalloc(128UL, 208U); temp = (u32 *)tmp___0; if ((unsigned long )temp == (unsigned long )((u32 *)0U)) { mutex_unlock(& drm_dev->struct_mutex); return (-12L); } else { } } else { } ret = i915_gpu_idle(drm_dev); if (ret != 0) { kfree((void const *)temp); mutex_unlock(& drm_dev->struct_mutex); return ((ssize_t )ret); } else { } if ((unsigned long )temp != (unsigned long )((u32 *)0U)) { dev_priv->l3_parity.remap_info[slice] = temp; } else { } memcpy((void *)dev_priv->l3_parity.remap_info[slice] + (unsigned long )(offset / 4LL), (void const *)buf, count); __mptr___0 = (struct list_head const *)dev_priv->context_list.next; ctx = (struct intel_context *)__mptr___0 + 0xffffffffffffff30UL; goto ldv_48225; ldv_48224: ctx->remap_slice = (uint8_t )((int )((signed char )ctx->remap_slice) | (int )((signed char )(1 << slice))); __mptr___1 = (struct list_head const *)ctx->link.next; ctx = (struct intel_context *)__mptr___1 + 0xffffffffffffff30UL; ldv_48225: ; if ((unsigned long )(& ctx->link) != (unsigned long )(& dev_priv->context_list)) { goto ldv_48224; } else { } mutex_unlock(& drm_dev->struct_mutex); return ((ssize_t )count); } } static struct bin_attribute dpf_attrs = {{"l3_parity", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 128UL, (void *)0, & i915_l3_read, & i915_l3_write, (int (*)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ))0}; static struct bin_attribute dpf_attrs_1 = {{"l3_parity_slice_1", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 128UL, (void *)1, & i915_l3_read, & i915_l3_write, (int (*)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ))0}; static ssize_t gt_act_freq_mhz_show(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; u32 freq ; u32 rpstat ; uint32_t tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; int tmp___1 ; { tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ldv_flush_delayed_work_60(& dev_priv->rps.delayed_resume_work); intel_runtime_pm_get(dev_priv); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); __p___3 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { freq = vlv_punit_read(dev_priv, 216U); ret = intel_gpu_freq(dev_priv, (int )(freq >> 8) & 255); } else { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 40988L, 1); rpstat = tmp___0; __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { ret = (int )(rpstat >> 23); } else { __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { ret = (int )((rpstat & 16256U) >> 7); } else { __p___0 = dev_priv; if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = dev_priv; if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { ret = (int )((rpstat & 16256U) >> 7); } else { ret = (int )((rpstat & 32512U) >> 8); } } else { ret = (int )((rpstat & 32512U) >> 8); } } } ret = intel_gpu_freq(dev_priv, ret); } mutex_unlock(& dev_priv->rps.hw_lock); intel_runtime_pm_put(dev_priv); tmp___1 = snprintf(buf, 4096UL, "%d\n", ret); return ((ssize_t )tmp___1); } } static ssize_t gt_cur_freq_mhz_show(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; int tmp___0 ; { tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ldv_flush_delayed_work_61(& dev_priv->rps.delayed_resume_work); intel_runtime_pm_get(dev_priv); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); ret = intel_gpu_freq(dev_priv, (int )dev_priv->rps.cur_freq); mutex_unlock(& dev_priv->rps.hw_lock); intel_runtime_pm_put(dev_priv); tmp___0 = snprintf(buf, 4096UL, "%d\n", ret); return ((ssize_t )tmp___0); } } static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int tmp___0 ; int tmp___1 ; { tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.efficient_freq); tmp___1 = snprintf(buf, 4096UL, "%d\n", tmp___0); return ((ssize_t )tmp___1); } } static ssize_t gt_max_freq_mhz_show(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; int tmp___0 ; { tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ldv_flush_delayed_work_62(& dev_priv->rps.delayed_resume_work); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); ret = intel_gpu_freq(dev_priv, (int )dev_priv->rps.max_freq_softlimit); mutex_unlock(& dev_priv->rps.hw_lock); tmp___0 = snprintf(buf, 4096UL, "%d\n", ret); return ((ssize_t )tmp___0); } } static ssize_t gt_max_freq_mhz_store(struct device *kdev , struct device_attribute *attr , char const *buf , size_t count ) { struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 val ; ssize_t ret ; int tmp___0 ; int tmp___1 ; int tmp___2 ; long tmp___3 ; int __min1 ; int __max1 ; int __max2 ; int __min2 ; { tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = kstrtou32(buf, 0U, & val); ret = (ssize_t )tmp___0; if (ret != 0L) { return (ret); } else { } ldv_flush_delayed_work_63(& dev_priv->rps.delayed_resume_work); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp___1 = intel_freq_opcode(dev_priv, (int )val); val = (u32 )tmp___1; if (((u32 )dev_priv->rps.min_freq > val || (u32 )dev_priv->rps.max_freq < val) || (u32 )dev_priv->rps.min_freq_softlimit > val) { mutex_unlock(& dev_priv->rps.hw_lock); return (-22L); } else { } if ((u32 )dev_priv->rps.rp0_freq < val) { tmp___3 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = intel_gpu_freq(dev_priv, (int )val); drm_ut_debug_printk("gt_max_freq_mhz_store", "User requested overclocking to %d\n", tmp___2); } else { } } else { } dev_priv->rps.max_freq_softlimit = (u8 )val; __max1 = (int )dev_priv->rps.cur_freq; __max2 = (int )dev_priv->rps.min_freq_softlimit; __min1 = __max1 > __max2 ? __max1 : __max2; __min2 = (int )dev_priv->rps.max_freq_softlimit; val = (u32 )(__min1 < __min2 ? __min1 : __min2); intel_set_rps(dev, (int )((u8 )val)); mutex_unlock(& dev_priv->rps.hw_lock); return ((ssize_t )count); } } static ssize_t gt_min_freq_mhz_show(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; int tmp___0 ; { tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ldv_flush_delayed_work_64(& dev_priv->rps.delayed_resume_work); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); ret = intel_gpu_freq(dev_priv, (int )dev_priv->rps.min_freq_softlimit); mutex_unlock(& dev_priv->rps.hw_lock); tmp___0 = snprintf(buf, 4096UL, "%d\n", ret); return ((ssize_t )tmp___0); } } static ssize_t gt_min_freq_mhz_store(struct device *kdev , struct device_attribute *attr , char const *buf , size_t count ) { struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 val ; ssize_t ret ; int tmp___0 ; int tmp___1 ; int __min1 ; int __max1 ; int __max2 ; int __min2 ; { tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = kstrtou32(buf, 0U, & val); ret = (ssize_t )tmp___0; if (ret != 0L) { return (ret); } else { } ldv_flush_delayed_work_65(& dev_priv->rps.delayed_resume_work); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp___1 = intel_freq_opcode(dev_priv, (int )val); val = (u32 )tmp___1; if (((u32 )dev_priv->rps.min_freq > val || (u32 )dev_priv->rps.max_freq < val) || (u32 )dev_priv->rps.max_freq_softlimit < val) { mutex_unlock(& dev_priv->rps.hw_lock); return (-22L); } else { } dev_priv->rps.min_freq_softlimit = (u8 )val; __max1 = (int )dev_priv->rps.cur_freq; __max2 = (int )dev_priv->rps.min_freq_softlimit; __min1 = __max1 > __max2 ? __max1 : __max2; __min2 = (int )dev_priv->rps.max_freq_softlimit; val = (u32 )(__min1 < __min2 ? __min1 : __min2); intel_set_rps(dev, (int )((u8 )val)); mutex_unlock(& dev_priv->rps.hw_lock); return ((ssize_t )count); } } static struct device_attribute dev_attr_gt_act_freq_mhz = {{"gt_act_freq_mhz", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & gt_act_freq_mhz_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_gt_cur_freq_mhz = {{"gt_cur_freq_mhz", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & gt_cur_freq_mhz_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_gt_max_freq_mhz = {{"gt_max_freq_mhz", 420U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & gt_max_freq_mhz_show, & gt_max_freq_mhz_store}; static struct device_attribute dev_attr_gt_min_freq_mhz = {{"gt_min_freq_mhz", 420U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & gt_min_freq_mhz_show, & gt_min_freq_mhz_store}; static struct device_attribute dev_attr_vlv_rpe_freq_mhz = {{"vlv_rpe_freq_mhz", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & vlv_rpe_freq_mhz_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static ssize_t gt_rp_mhz_show(struct device *kdev , struct device_attribute *attr , char *buf ) ; static struct device_attribute dev_attr_gt_RP0_freq_mhz = {{"gt_RP0_freq_mhz", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & gt_rp_mhz_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_gt_RP1_freq_mhz = {{"gt_RP1_freq_mhz", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & gt_rp_mhz_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_gt_RPn_freq_mhz = {{"gt_RPn_freq_mhz", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & gt_rp_mhz_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static ssize_t gt_rp_mhz_show(struct device *kdev , struct device_attribute *attr , char *buf ) { struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 val ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )attr == (unsigned long )(& dev_attr_gt_RP0_freq_mhz)) { tmp___0 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.rp0_freq); val = (u32 )tmp___0; } else if ((unsigned long )attr == (unsigned long )(& dev_attr_gt_RP1_freq_mhz)) { tmp___1 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.rp1_freq); val = (u32 )tmp___1; } else if ((unsigned long )attr == (unsigned long )(& dev_attr_gt_RPn_freq_mhz)) { tmp___2 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.min_freq); val = (u32 )tmp___2; } else { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_sysfs.c"), "i" (520), "i" (12UL)); ldv_48457: ; goto ldv_48457; } tmp___3 = snprintf(buf, 4096UL, "%d\n", val); return ((ssize_t )tmp___3); } } static struct attribute const *gen6_attrs[8U] = { (struct attribute const *)(& dev_attr_gt_act_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_cur_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_max_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_min_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_RP0_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_RP1_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_RPn_freq_mhz.attr), (struct attribute const *)0}; static struct attribute const *vlv_attrs[9U] = { (struct attribute const *)(& dev_attr_gt_act_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_cur_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_max_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_min_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_RP0_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_RP1_freq_mhz.attr), (struct attribute const *)(& dev_attr_gt_RPn_freq_mhz.attr), (struct attribute const *)(& dev_attr_vlv_rpe_freq_mhz.attr), (struct attribute const *)0}; static ssize_t error_state_read(struct file *filp , struct kobject *kobj , struct bin_attribute *attr , char *buf , loff_t off , size_t count ) { struct device *kdev ; struct kobject const *__mptr ; struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; struct i915_error_state_file_priv error_priv ; struct drm_i915_error_state_buf error_str ; ssize_t ret_count ; int ret ; struct drm_i915_private *tmp___0 ; { __mptr = (struct kobject const *)kobj; kdev = (struct device *)__mptr + 0xfffffffffffffff0UL; tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; ret_count = 0L; memset((void *)(& error_priv), 0, 16UL); tmp___0 = to_i915((struct drm_device const *)dev); ret = i915_error_state_buf_init(& error_str, tmp___0, count, off); if (ret != 0) { return ((ssize_t )ret); } else { } error_priv.dev = dev; i915_error_state_get(dev, & error_priv); ret = i915_error_state_to_str(& error_str, (struct i915_error_state_file_priv const *)(& error_priv)); if (ret != 0) { goto out; } else { } ret_count = (ssize_t )(count < (size_t )error_str.bytes ? count : (size_t )error_str.bytes); memcpy((void *)buf, (void const *)error_str.buf, (size_t )ret_count); out: i915_error_state_put(& error_priv); i915_error_state_buf_release(& error_str); return (ret != 0 ? (ssize_t )ret : ret_count); } } static ssize_t error_state_write(struct file *file , struct kobject *kobj , struct bin_attribute *attr , char *buf , loff_t off , size_t count ) { struct device *kdev ; struct kobject const *__mptr ; struct drm_minor *minor ; void *tmp ; struct drm_device *dev ; int ret ; long tmp___0 ; { __mptr = (struct kobject const *)kobj; kdev = (struct device *)__mptr + 0xfffffffffffffff0UL; tmp = dev_get_drvdata((struct device const *)kdev); minor = (struct drm_minor *)tmp; dev = minor->dev; tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("error_state_write", "Resetting error state\n"); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return ((ssize_t )ret); } else { } i915_destroy_error_state(dev); mutex_unlock(& dev->struct_mutex); return ((ssize_t )count); } } static struct bin_attribute error_state_attr = {{"error", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & error_state_read, & error_state_write, 0}; void i915_setup_sysfs(struct drm_device *dev ) { int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; int tmp ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { ret = sysfs_merge_group(& ((dev->primary)->kdev)->kobj, (struct attribute_group const *)(& rc6_attr_group)); if (ret != 0) { drm_err("RC6 residency sysfs setup failed\n"); } else { } } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { _L: /* CIL Label */ ret = sysfs_merge_group(& ((dev->primary)->kdev)->kobj, (struct attribute_group const *)(& rc6p_attr_group)); if (ret != 0) { drm_err("RC6p residency sysfs setup failed\n"); } else { } } else { } } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { ret = sysfs_merge_group(& ((dev->primary)->kdev)->kobj, (struct attribute_group const *)(& media_rc6_attr_group)); if (ret != 0) { drm_err("Media RC6 residency sysfs setup failed\n"); } else { } } else { } __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { goto _L___2; } else { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { _L___2: /* CIL Label */ ret = device_create_bin_file((dev->primary)->kdev, (struct bin_attribute const *)(& dpf_attrs)); if (ret != 0) { drm_err("l3 parity sysfs setup failed\n"); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if (((int )__p___4->info.device_id & 240) == 32) { goto _L___0; } else { goto _L___1; } } else { _L___1: /* CIL Label */ __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { tmp = 1; } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U) { tmp = 1; } else { tmp = 0; } } if (tmp > 1) { _L___0: /* CIL Label */ ret = device_create_bin_file((dev->primary)->kdev, (struct bin_attribute const *)(& dpf_attrs_1)); if (ret != 0) { drm_err("l3 parity slice 1 setup failed\n"); } else { } } else { } } } else { } } ret = 0; __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 45UL) != 0U) { ret = sysfs_create_files(& ((dev->primary)->kdev)->kobj, (struct attribute const **)(& vlv_attrs)); } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___9->info.gen) > 5U) { ret = sysfs_create_files(& ((dev->primary)->kdev)->kobj, (struct attribute const **)(& gen6_attrs)); } else { } } if (ret != 0) { drm_err("RPS sysfs setup failed\n"); } else { } ret = sysfs_create_bin_file(& ((dev->primary)->kdev)->kobj, (struct bin_attribute const *)(& error_state_attr)); if (ret != 0) { drm_err("error_state sysfs setup failed\n"); } else { } return; } } void i915_teardown_sysfs(struct drm_device *dev ) { struct drm_i915_private *__p ; { sysfs_remove_bin_file(& ((dev->primary)->kdev)->kobj, (struct bin_attribute const *)(& error_state_attr)); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { sysfs_remove_files(& ((dev->primary)->kdev)->kobj, (struct attribute const **)(& vlv_attrs)); } else { sysfs_remove_files(& ((dev->primary)->kdev)->kobj, (struct attribute const **)(& gen6_attrs)); } device_remove_bin_file((dev->primary)->kdev, (struct bin_attribute const *)(& dpf_attrs_1)); device_remove_bin_file((dev->primary)->kdev, (struct bin_attribute const *)(& dpf_attrs)); sysfs_unmerge_group(& ((dev->primary)->kdev)->kobj, (struct attribute_group const *)(& rc6_attr_group)); sysfs_unmerge_group(& ((dev->primary)->kdev)->kobj, (struct attribute_group const *)(& rc6p_attr_group)); return; } } extern int ldv_release_181(void) ; extern int ldv_probe_180(void) ; extern int ldv_release_180(void) ; extern int ldv_probe_181(void) ; extern int ldv_probe_171(void) ; extern int ldv_release_171(void) ; void ldv_initialize_bin_attribute_171(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(296UL); error_state_attr_group0 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); error_state_attr_group1 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(72UL); error_state_attr_group2 = (struct bin_attribute *)tmp___1; return; } } void ldv_initialize_bin_attribute_180(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(296UL); dpf_attrs_1_group0 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); dpf_attrs_1_group1 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(72UL); dpf_attrs_1_group2 = (struct bin_attribute *)tmp___1; return; } } void ldv_initialize_device_attribute_177(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); dev_attr_gt_max_freq_mhz_group0 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); dev_attr_gt_max_freq_mhz_group1 = (struct device *)tmp___0; return; } } void ldv_initialize_device_attribute_176(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); dev_attr_gt_min_freq_mhz_group0 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); dev_attr_gt_min_freq_mhz_group1 = (struct device *)tmp___0; return; } } void ldv_initialize_bin_attribute_181(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(296UL); dpf_attrs_group0 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); dpf_attrs_group1 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(72UL); dpf_attrs_group2 = (struct bin_attribute *)tmp___1; return; } } void ldv_main_exported_181(void) { char *ldvarg192 ; void *tmp ; size_t ldvarg188 ; loff_t ldvarg193 ; char *ldvarg189 ; void *tmp___0 ; loff_t ldvarg190 ; size_t ldvarg191 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg192 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg189 = (char *)tmp___0; ldv_memset((void *)(& ldvarg188), 0, 8UL); ldv_memset((void *)(& ldvarg193), 0, 8UL); ldv_memset((void *)(& ldvarg190), 0, 8UL); ldv_memset((void *)(& ldvarg191), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_181 == 2) { i915_l3_write(dpf_attrs_group1, dpf_attrs_group0, dpf_attrs_group2, ldvarg192, ldvarg193, ldvarg191); ldv_state_variable_181 = 2; } else { } goto ldv_48616; case 1: ; if (ldv_state_variable_181 == 2) { i915_l3_read(dpf_attrs_group1, dpf_attrs_group0, dpf_attrs_group2, ldvarg189, ldvarg190, ldvarg188); ldv_state_variable_181 = 2; } else { } goto ldv_48616; case 2: ; if (ldv_state_variable_181 == 2) { ldv_release_181(); ldv_state_variable_181 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48616; case 3: ; if (ldv_state_variable_181 == 1) { ldv_probe_181(); ldv_state_variable_181 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48616; default: ldv_stop(); } ldv_48616: ; return; } } void ldv_main_exported_180(void) { loff_t ldvarg270 ; loff_t ldvarg273 ; size_t ldvarg268 ; char *ldvarg269 ; void *tmp ; size_t ldvarg271 ; char *ldvarg272 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg269 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg272 = (char *)tmp___0; ldv_memset((void *)(& ldvarg270), 0, 8UL); ldv_memset((void *)(& ldvarg273), 0, 8UL); ldv_memset((void *)(& ldvarg268), 0, 8UL); ldv_memset((void *)(& ldvarg271), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_180 == 2) { i915_l3_write(dpf_attrs_1_group1, dpf_attrs_1_group0, dpf_attrs_1_group2, ldvarg272, ldvarg273, ldvarg271); ldv_state_variable_180 = 2; } else { } goto ldv_48631; case 1: ; if (ldv_state_variable_180 == 2) { i915_l3_read(dpf_attrs_1_group1, dpf_attrs_1_group0, dpf_attrs_1_group2, ldvarg269, ldvarg270, ldvarg268); ldv_state_variable_180 = 2; } else { } goto ldv_48631; case 2: ; if (ldv_state_variable_180 == 2) { ldv_release_180(); ldv_state_variable_180 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48631; case 3: ; if (ldv_state_variable_180 == 1) { ldv_probe_180(); ldv_state_variable_180 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48631; default: ldv_stop(); } ldv_48631: ; return; } } void ldv_main_exported_174(void) { struct device_attribute *ldvarg345 ; void *tmp ; char *ldvarg344 ; void *tmp___0 ; struct device *ldvarg343 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg345 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg344 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg343 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_174 == 1) { gt_rp_mhz_show(ldvarg343, ldvarg345, ldvarg344); ldv_state_variable_174 = 1; } else { } goto ldv_48643; default: ldv_stop(); } ldv_48643: ; return; } } void ldv_main_exported_179(void) { struct device *ldvarg276 ; void *tmp ; char *ldvarg277 ; void *tmp___0 ; struct device_attribute *ldvarg278 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg276 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg277 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg278 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_179 == 1) { gt_act_freq_mhz_show(ldvarg276, ldvarg278, ldvarg277); ldv_state_variable_179 = 1; } else { } goto ldv_48652; default: ldv_stop(); } ldv_48652: ; return; } } void ldv_main_exported_186(void) { struct device *ldvarg139 ; void *tmp ; struct device_attribute *ldvarg141 ; void *tmp___0 ; char *ldvarg140 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg139 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg141 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg140 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_186 == 1) { show_rc6_mask(ldvarg139, ldvarg141, ldvarg140); ldv_state_variable_186 = 1; } else { } goto ldv_48661; default: ldv_stop(); } ldv_48661: ; return; } } void ldv_main_exported_182(void) { struct device_attribute *ldvarg294 ; void *tmp ; struct device *ldvarg292 ; void *tmp___0 ; char *ldvarg293 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg294 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg292 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg293 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_182 == 1) { show_media_rc6_ms(ldvarg292, ldvarg294, ldvarg293); ldv_state_variable_182 = 1; } else { } goto ldv_48670; default: ldv_stop(); } ldv_48670: ; return; } } void ldv_main_exported_185(void) { struct device_attribute *ldvarg537 ; void *tmp ; char *ldvarg536 ; void *tmp___0 ; struct device *ldvarg538 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg537 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg536 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg538 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_185 == 1) { show_rc6_ms(ldvarg538, ldvarg537, ldvarg536); ldv_state_variable_185 = 1; } else { } goto ldv_48679; default: ldv_stop(); } ldv_48679: ; return; } } void ldv_main_exported_183(void) { struct device *ldvarg542 ; void *tmp ; char *ldvarg540 ; void *tmp___0 ; struct device_attribute *ldvarg541 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg542 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg540 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg541 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_183 == 1) { show_rc6pp_ms(ldvarg542, ldvarg541, ldvarg540); ldv_state_variable_183 = 1; } else { } goto ldv_48688; default: ldv_stop(); } ldv_48688: ; return; } } void ldv_main_exported_176(void) { char *ldvarg226 ; void *tmp ; char *ldvarg224 ; void *tmp___0 ; size_t ldvarg225 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg226 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg224 = (char *)tmp___0; ldv_memset((void *)(& ldvarg225), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_176 == 1) { gt_min_freq_mhz_store(dev_attr_gt_min_freq_mhz_group1, dev_attr_gt_min_freq_mhz_group0, (char const *)ldvarg226, ldvarg225); ldv_state_variable_176 = 1; } else { } goto ldv_48697; case 1: ; if (ldv_state_variable_176 == 1) { gt_min_freq_mhz_show(dev_attr_gt_min_freq_mhz_group1, dev_attr_gt_min_freq_mhz_group0, ldvarg224); ldv_state_variable_176 = 1; } else { } goto ldv_48697; default: ldv_stop(); } ldv_48697: ; return; } } void ldv_main_exported_184(void) { struct device *ldvarg155 ; void *tmp ; struct device_attribute *ldvarg157 ; void *tmp___0 ; char *ldvarg156 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg155 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg157 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg156 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_184 == 1) { show_rc6p_ms(ldvarg155, ldvarg157, ldvarg156); ldv_state_variable_184 = 1; } else { } goto ldv_48707; default: ldv_stop(); } ldv_48707: ; return; } } void ldv_main_exported_172(void) { struct device *ldvarg163 ; void *tmp ; struct device_attribute *ldvarg165 ; void *tmp___0 ; char *ldvarg164 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg163 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg165 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg164 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_172 == 1) { gt_rp_mhz_show(ldvarg163, ldvarg165, ldvarg164); ldv_state_variable_172 = 1; } else { } goto ldv_48716; default: ldv_stop(); } ldv_48716: ; return; } } void ldv_main_exported_173(void) { char *ldvarg386 ; void *tmp ; struct device_attribute *ldvarg387 ; void *tmp___0 ; struct device *ldvarg385 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg386 = (char *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg387 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg385 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_173 == 1) { gt_rp_mhz_show(ldvarg385, ldvarg387, ldvarg386); ldv_state_variable_173 = 1; } else { } goto ldv_48725; default: ldv_stop(); } ldv_48725: ; return; } } void ldv_main_exported_178(void) { char *ldvarg176 ; void *tmp ; struct device *ldvarg175 ; void *tmp___0 ; struct device_attribute *ldvarg177 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg176 = (char *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg175 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg177 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_178 == 1) { gt_cur_freq_mhz_show(ldvarg175, ldvarg177, ldvarg176); ldv_state_variable_178 = 1; } else { } goto ldv_48734; default: ldv_stop(); } ldv_48734: ; return; } } void ldv_main_exported_177(void) { size_t ldvarg494 ; char *ldvarg493 ; void *tmp ; char *ldvarg492 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg493 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg492 = (char *)tmp___0; ldv_memset((void *)(& ldvarg494), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_177 == 1) { gt_max_freq_mhz_store(dev_attr_gt_max_freq_mhz_group1, dev_attr_gt_max_freq_mhz_group0, (char const *)ldvarg493, ldvarg494); ldv_state_variable_177 = 1; } else { } goto ldv_48743; case 1: ; if (ldv_state_variable_177 == 1) { gt_max_freq_mhz_show(dev_attr_gt_max_freq_mhz_group1, dev_attr_gt_max_freq_mhz_group0, ldvarg492); ldv_state_variable_177 = 1; } else { } goto ldv_48743; default: ldv_stop(); } ldv_48743: ; return; } } void ldv_main_exported_175(void) { struct device *ldvarg62 ; void *tmp ; struct device_attribute *ldvarg64 ; void *tmp___0 ; char *ldvarg63 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg62 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg64 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg63 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_175 == 1) { vlv_rpe_freq_mhz_show(ldvarg62, ldvarg64, ldvarg63); ldv_state_variable_175 = 1; } else { } goto ldv_48753; default: ldv_stop(); } ldv_48753: ; return; } } void ldv_main_exported_171(void) { loff_t ldvarg573 ; size_t ldvarg568 ; size_t ldvarg571 ; char *ldvarg569 ; void *tmp ; char *ldvarg572 ; void *tmp___0 ; loff_t ldvarg570 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg569 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg572 = (char *)tmp___0; ldv_memset((void *)(& ldvarg573), 0, 8UL); ldv_memset((void *)(& ldvarg568), 0, 8UL); ldv_memset((void *)(& ldvarg571), 0, 8UL); ldv_memset((void *)(& ldvarg570), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_171 == 2) { error_state_write(error_state_attr_group1, error_state_attr_group0, error_state_attr_group2, ldvarg572, ldvarg573, ldvarg571); ldv_state_variable_171 = 2; } else { } goto ldv_48765; case 1: ; if (ldv_state_variable_171 == 2) { error_state_read(error_state_attr_group1, error_state_attr_group0, error_state_attr_group2, ldvarg569, ldvarg570, ldvarg568); ldv_state_variable_171 = 2; } else { } goto ldv_48765; case 2: ; if (ldv_state_variable_171 == 2) { ldv_release_171(); ldv_state_variable_171 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48765; case 3: ; if (ldv_state_variable_171 == 1) { ldv_probe_171(); ldv_state_variable_171 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48765; default: ldv_stop(); } ldv_48765: ; return; } } bool ldv_queue_work_on_55(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_56(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_57(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_58(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_59(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_flush_delayed_work_60(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_61(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_62(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_63(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___6 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_64(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___7 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_65(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___8 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void __read_once_size(void const volatile *p , void *res , int size ) { { switch (size) { case 1: *((__u8 *)res) = *((__u8 volatile *)p); goto ldv_880; case 2: *((__u16 *)res) = *((__u16 volatile *)p); goto ldv_880; case 4: *((__u32 *)res) = *((__u32 volatile *)p); goto ldv_880; case 8: *((__u64 *)res) = *((__u64 volatile *)p); goto ldv_880; default: __asm__ volatile ("": : : "memory"); __builtin_memcpy(res, (void const *)p, (unsigned long )size); __asm__ volatile ("": : : "memory"); } ldv_880: ; return; } } __inline static int ffs(int x ) { int r ; { __asm__ ("bsfl %1,%0": "=r" (r): "rm" (x), "0" (-1)); return (r + 1); } } __inline static bool is_power_of_2(unsigned long n ) { { return ((bool )(n != 0UL && ((n - 1UL) & n) == 0UL)); } } __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add(struct list_head *new , struct list_head *head ) { { __list_add(new, head, head->next); return; } } extern void __list_del_entry(struct list_head * ) ; __inline static void list_del_init(struct list_head *entry ) { { __list_del_entry(entry); INIT_LIST_HEAD(entry); return; } } __inline static int list_empty(struct list_head const *head ) { { return ((unsigned long )((struct list_head const *)head->next) == (unsigned long )head); } } extern void warn_slowpath_null(char const * , int const ) ; extern int memcmp(void const * , void const * , size_t ) ; __inline static unsigned long arch_local_save_flags___0(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static u64 div_u64_rem(u64 dividend , u32 divisor , u32 *remainder ) { { *remainder = (u32 )(dividend % (u64 )divisor); return (dividend / (u64 )divisor); } } __inline static u64 div_u64(u64 dividend , u32 divisor ) { u32 remainder ; u64 tmp ; { tmp = div_u64_rem(dividend, divisor, & remainder); return (tmp); } } extern void __cmpxchg_wrong_size(void) ; extern void __xadd_wrong_size(void) ; __inline static int atomic_sub_and_test(int i , atomic_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; subl %2, %0; sete %1": "+m" (v->counter), "=qm" (c): "er" (i): "memory"); return ((int )((signed char )c) != 0); } } __inline static int atomic_dec_and_test(atomic_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0; sete %1": "+m" (v->counter), "=qm" (c): : "memory"); return ((int )((signed char )c) != 0); } } __inline static int atomic_add_return(int i , atomic_t *v ) { int __ret ; { __ret = i; switch (4UL) { case 1UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddb %b0, %1\n": "+q" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5659; case 2UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddw %w0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5659; case 4UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddl %0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5659; case 8UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddq %q0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5659; default: __xadd_wrong_size(); } ldv_5659: ; return (__ret + i); } } __inline static int atomic_cmpxchg(atomic_t *v , int old , int new ) { int __ret ; int __old ; int __new ; u8 volatile *__ptr ; u16 volatile *__ptr___0 ; u32 volatile *__ptr___1 ; u64 volatile *__ptr___2 ; { __old = old; __new = new; switch (4UL) { case 1UL: __ptr = (u8 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgb %2,%1": "=a" (__ret), "+m" (*__ptr): "q" (__new), "0" (__old): "memory"); goto ldv_5679; case 2UL: __ptr___0 = (u16 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgw %2,%1": "=a" (__ret), "+m" (*__ptr___0): "r" (__new), "0" (__old): "memory"); goto ldv_5679; case 4UL: __ptr___1 = (u32 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgl %2,%1": "=a" (__ret), "+m" (*__ptr___1): "r" (__new), "0" (__old): "memory"); goto ldv_5679; case 8UL: __ptr___2 = (u64 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgq %2,%1": "=a" (__ret), "+m" (*__ptr___2): "r" (__new), "0" (__old): "memory"); goto ldv_5679; default: __cmpxchg_wrong_size(); } ldv_5679: ; return (__ret); } } __inline static int __atomic_add_unless(atomic_t *v , int a , int u ) { int c ; int old ; long tmp ; long tmp___0 ; { c = atomic_read((atomic_t const *)v); ldv_5708: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5707; } else { } old = atomic_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5707; } else { } c = old; goto ldv_5708; ldv_5707: ; return (c); } } __inline static int atomic_add_unless(atomic_t *v , int a , int u ) { int tmp ; { tmp = __atomic_add_unless(v, a, u); return (tmp != u); } } __inline static void __preempt_count_add(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (val)); } goto ldv_6059; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6059; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6059; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (val)); } goto ldv_6059; default: __bad_percpu_size(); } ldv_6059: ; return; } } __inline static void __preempt_count_sub(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (- val)); } goto ldv_6071; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6071; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6071; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (- val)); } goto ldv_6071; default: __bad_percpu_size(); } ldv_6071: ; return; } } extern int debug_locks ; extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; extern int lock_is_held(struct lockdep_map * ) ; extern void lockdep_rcu_suspicious(char const * , int const , char const * ) ; __inline static int static_key_count(struct static_key *key ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& key->enabled)); return (tmp); } } __inline static bool static_key_false(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int queued_spin_is_locked(struct qspinlock *lock ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& lock->val)); return (tmp); } } extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; extern void _raw_spin_lock(raw_spinlock_t * ) ; extern void _raw_spin_unlock(raw_spinlock_t * ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->__annonCompField18.rlock); } } __inline static void spin_lock(spinlock_t *lock ) { { _raw_spin_lock(& lock->__annonCompField18.rlock); return; } } __inline static void spin_unlock(spinlock_t *lock ) { { _raw_spin_unlock(& lock->__annonCompField18.rlock); return; } } extern void __mutex_init(struct mutex * , char const * , struct lock_class_key * ) ; __inline static int mutex_is_locked(struct mutex *lock ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& lock->count)); return (tmp != 1); } } extern unsigned int tsc_khz ; extern unsigned int jiffies_to_msecs(unsigned long const ) ; extern ktime_t ktime_get_raw(void) ; __inline static u64 ktime_get_raw_ns(void) { ktime_t tmp ; { tmp = ktime_get_raw(); return ((u64 )tmp.tv64); } } extern bool rcu_is_watching(void) ; extern bool rcu_lockdep_current_cpu_online(void) ; extern struct lockdep_map rcu_sched_lock_map ; extern int debug_lockdep_rcu_enabled(void) ; __inline static int rcu_read_lock_sched_held(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___0(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } __inline static void rcu_read_lock_sched_notrace(void) { { __preempt_count_add(1); __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_unlock_sched_notrace(void) { { __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); return; } } extern void init_timer_key(struct timer_list * , unsigned int , char const * , struct lock_class_key * ) ; extern unsigned long round_jiffies_up_relative(unsigned long ) ; extern void delayed_work_timer_fn(unsigned long ) ; extern void __init_work(struct work_struct * , int ) ; extern struct workqueue_struct *system_wq ; bool ldv_queue_work_on_81(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_83(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_82(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_85(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_84(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_86(struct delayed_work *ldv_func_arg1 ) ; __inline static bool queue_work(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_81(8192, wq, work); return (tmp); } } __inline static bool queue_delayed_work(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = ldv_queue_delayed_work_on_82(8192, wq, dwork, delay); return (tmp); } } __inline static bool schedule_delayed_work(struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = queue_delayed_work(system_wq, dwork, delay); return (tmp); } } __inline static void kref_get(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub(kref, 1U, release); return (tmp); } } __inline static int kref_put_mutex(struct kref *kref , void (*release)(struct kref * ) , struct mutex *lock ) { int __ret_warn_on ; long tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; long tmp___3 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 138); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = atomic_add_unless(& kref->refcount, -1, 1); tmp___3 = ldv__builtin_expect(tmp___2 == 0, 0L); if (tmp___3 != 0L) { mutex_lock_nested(lock, 0U); tmp___0 = atomic_dec_and_test(& kref->refcount); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { mutex_unlock(lock); return (0); } else { } (*release)(kref); return (1); } else { } return (0); } } extern void *__symbol_get(char const * ) ; extern void __symbol_put(char const * ) ; void invoke_work_1(void) ; void call_and_disable_work_1(struct work_struct *work ) ; void disable_work_1(struct work_struct *work ) ; void disable_work_2(struct work_struct *work ) ; void call_and_disable_work_2(struct work_struct *work ) ; void activate_work_1(struct work_struct *work , int state ) ; void activate_work_2(struct work_struct *work , int state ) ; void call_and_disable_all_2(int state ) ; void invoke_work_2(void) ; void call_and_disable_all_1(int state ) ; extern struct cpufreq_policy *cpufreq_cpu_get(unsigned int ) ; extern void cpufreq_cpu_put(struct cpufreq_policy * ) ; extern int drm_format_plane_cpp(uint32_t , int ) ; extern void __const_udelay(unsigned long ) ; extern void drm_wait_one_vblank(struct drm_device * , int ) ; __inline static bool drm_can_sleep___0(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_40327; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40327; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40327; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40327; default: __bad_percpu_size(); } ldv_40327: pscr_ret__ = pfo_ret__; goto ldv_40333; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40337; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40337; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40337; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40337; default: __bad_percpu_size(); } ldv_40337: pscr_ret__ = pfo_ret_____0; goto ldv_40333; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40346; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40346; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40346; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40346; default: __bad_percpu_size(); } ldv_40346: pscr_ret__ = pfo_ret_____1; goto ldv_40333; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40355; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40355; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40355; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40355; default: __bad_percpu_size(); } ldv_40355: pscr_ret__ = pfo_ret_____2; goto ldv_40333; default: __bad_size_call_parameter(); goto ldv_40333; } ldv_40333: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___0(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } bool intel_ring_initialized(struct intel_engine_cs *ring ) ; extern void drm_gem_object_free(struct kref * ) ; __inline static void drm_gem_object_unreference(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put(& obj->refcount, & drm_gem_object_free); } else { } return; } } spinlock_t mchdev_lock ; __inline static uint16_t skl_ddb_entry_size(struct skl_ddb_entry const *entry ) { { return ((int )((uint16_t )entry->end) - (int )((uint16_t )entry->start)); } } void i915_gem_request_free(struct kref *req_ref ) ; __inline static struct drm_i915_gem_request *i915_gem_request_reference(struct drm_i915_gem_request *req ) { { if ((unsigned long )req != (unsigned long )((struct drm_i915_gem_request *)0)) { kref_get(& req->ref); } else { } return (req); } } __inline static void i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req ) { struct drm_device *dev ; int tmp ; { if ((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0)) { return; } else { } dev = (req->ring)->dev; tmp = kref_put_mutex(& req->ref, & i915_gem_request_free, & dev->struct_mutex); if (tmp != 0) { mutex_unlock(& dev->struct_mutex); } else { } return; } } unsigned long i915_read_mch_val(void) ; bool i915_gpu_raise(void) ; bool i915_gpu_lower(void) ; bool i915_gpu_busy(void) ; bool i915_gpu_turbo_disable(void) ; __inline static struct drm_crtc *intel_get_crtc_for_plane(struct drm_device *dev , int plane ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; return (dev_priv->plane_to_crtc_mapping[plane]); } } __inline static unsigned int intel_num_planes(struct intel_crtc *crtc ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)crtc->base.dev); return ((unsigned int )((int )__p->info.num_sprites[(int )crtc->pipe] + 1)); } } void gen6_reset_rps_interrupts(struct drm_device *dev ) ; void gen6_enable_rps_interrupts(struct drm_device *dev ) ; void gen6_disable_rps_interrupts(struct drm_device *dev ) ; u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv , u32 mask ) ; __inline static void intel_wait_for_vblank(struct drm_device *dev , int pipe ) { { drm_wait_one_vblank(dev, pipe); return; } } __inline static bool intel_rotation_90_or_270(unsigned int rotation ) { { return (((unsigned long )rotation & 10UL) != 0UL); } } bool intel_crtc_active(struct drm_crtc *crtc ) ; bool intel_fbc_enabled(struct drm_device *dev ) ; void intel_fbc_init(struct drm_i915_private *dev_priv ) ; void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv ) ; int ilk_wm_max_level(struct drm_device const *dev ) ; void intel_update_watermarks(struct drm_crtc *crtc ) ; void intel_update_sprite_watermarks(struct drm_plane *plane , struct drm_crtc *crtc , uint32_t sprite_width , uint32_t sprite_height , int pixel_size , bool enabled , bool scaled ) ; void intel_init_pm(struct drm_device *dev ) ; void intel_pm_setup(struct drm_device *dev ) ; void intel_gpu_ips_init(struct drm_i915_private *dev_priv ) ; void intel_gpu_ips_teardown(void) ; void intel_init_gt_powersave(struct drm_device *dev ) ; void intel_cleanup_gt_powersave(struct drm_device *dev ) ; void intel_disable_gt_powersave(struct drm_device *dev ) ; void gen6_rps_busy(struct drm_i915_private *dev_priv ) ; void gen6_rps_reset_ei(struct drm_i915_private *dev_priv ) ; void gen6_rps_idle(struct drm_i915_private *dev_priv ) ; void gen6_rps_boost(struct drm_i915_private *dev_priv , struct intel_rps_client *rps , unsigned long submitted ) ; void intel_queue_rps_boost_for_request(struct drm_device *dev , struct drm_i915_gem_request *req ) ; void ilk_wm_get_hw_state(struct drm_device *dev ) ; void skl_wm_get_hw_state(struct drm_device *dev ) ; void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv , struct skl_ddb_allocation *ddb ) ; struct tracepoint __tracepoint_intel_gpu_freq_change ; __inline static void trace_intel_gpu_freq_change(u32 freq ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_416 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_418 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false(& __tracepoint_intel_gpu_freq_change.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_intel_gpu_freq_change.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 671, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_47105: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , u32 ))it_func))(__data, freq); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_47105; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_intel_gpu_freq_change.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 671, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } unsigned long i915_chipset_val(struct drm_i915_private *dev_priv ) ; unsigned long i915_mch_val(struct drm_i915_private *dev_priv ) ; unsigned long i915_gfx_val(struct drm_i915_private *dev_priv ) ; void i915_update_gfx_val(struct drm_i915_private *dev_priv ) ; void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) ; void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) ; __inline static bool intel_vgpu_active(struct drm_device *dev ) { struct drm_i915_private *tmp ; { tmp = to_i915((struct drm_device const *)dev); return (tmp->vgpu.active); } } __inline static bool i915_seqno_passed(uint32_t seq1 , uint32_t seq2 ) { { return ((int )(seq1 - seq2) >= 0); } } __inline static bool i915_gem_request_completed(struct drm_i915_gem_request *req , bool lazy_coherency ) { u32 seqno ; long tmp ; bool tmp___0 ; { tmp = ldv__builtin_expect((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h"), "i" (2806), "i" (12UL)); ldv_47734: ; goto ldv_47734; } else { } seqno = (*((req->ring)->get_seqno))(req->ring, (int )lazy_coherency); tmp___0 = i915_seqno_passed(seqno, req->seqno); return (tmp___0); } } struct drm_i915_gem_object *i915_gem_object_create_stolen(struct drm_device *dev , u32 size ) ; struct drm_i915_gem_object *i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev , u32 stolen_offset , u32 gtt_offset , u32 size ) ; bool ironlake_set_drps(struct drm_device *dev , u8 val ) ; void intel_set_memory_cxsr(struct drm_i915_private *dev_priv , bool enable ) ; int sandybridge_pcode_read(struct drm_i915_private *dev_priv , u32 mbox , u32 *val ) ; int sandybridge_pcode_write(struct drm_i915_private *dev_priv , u32 mbox , u32 val ) ; void vlv_punit_write(struct drm_i915_private *dev_priv , u32 addr , u32 val ) ; u32 vlv_nc_read(struct drm_i915_private *dev_priv , u8 addr ) ; u32 vlv_cck_read(struct drm_i915_private *dev_priv , u32 reg ) ; static void gen9_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 45340L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 45340L, tmp | 4U, 1); return; } } static void skl_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; uint32_t tmp___1 ; int _a ; struct drm_i915_private *__p___0 ; uint32_t tmp___2 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; gen9_init_clock_gating(dev); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p->dev)->pdev)->revision <= 1U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37936L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37936L, tmp | 16793600U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37892L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37892L, tmp___0 | 2147483648U, 1); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___0->dev)->pdev)->revision <= 3U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16528L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16528L, tmp___1 | 33554432U, 1); _a = 256; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8420L, (uint32_t )((_a << 16) | _a), 1); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___1->dev)->pdev)->revision <= 4U) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 45336L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 45336L, tmp___2 | 134217728U, 1); } else { } return; } } static void bxt_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; gen9_init_clock_gating(dev); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37936L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37936L, tmp | 268451840U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1052672L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1052672L, tmp___0 | 2U, 1); return; } } static void i915_pineview_get_mem_freq(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 68608L, 1); switch (tmp & 7U) { case 1U: dev_priv->fsb_freq = 533U; goto ldv_48392; case 2U: dev_priv->fsb_freq = 800U; goto ldv_48392; case 3U: dev_priv->fsb_freq = 667U; goto ldv_48392; case 5U: dev_priv->fsb_freq = 400U; goto ldv_48392; } ldv_48392: ; switch (tmp & 112U) { case 16U: dev_priv->mem_freq = 533U; goto ldv_48397; case 32U: dev_priv->mem_freq = 667U; goto ldv_48397; case 48U: dev_priv->mem_freq = 800U; goto ldv_48397; } ldv_48397: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 65960L, 1); dev_priv->is_ddr3 = (tmp & 4U) != 0U; return; } } static void i915_ironlake_get_mem_freq(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u16 ddrpll ; u16 csipll ; long tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ddrpll = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 76832L, 1); csipll = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 76816L, 1); switch ((int )ddrpll & 255) { case 12: dev_priv->mem_freq = 800U; goto ldv_48407; case 16: dev_priv->mem_freq = 1066U; goto ldv_48407; case 20: dev_priv->mem_freq = 1333U; goto ldv_48407; case 24: dev_priv->mem_freq = 1600U; goto ldv_48407; default: tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_ironlake_get_mem_freq", "unknown memory frequency 0x%02x\n", (int )ddrpll & 255); } else { } dev_priv->mem_freq = 0U; goto ldv_48407; } ldv_48407: dev_priv->ips.r_t = (int )dev_priv->mem_freq; switch ((int )csipll & 1023) { case 12: dev_priv->fsb_freq = 3200U; goto ldv_48414; case 14: dev_priv->fsb_freq = 3733U; goto ldv_48414; case 16: dev_priv->fsb_freq = 4266U; goto ldv_48414; case 18: dev_priv->fsb_freq = 4800U; goto ldv_48414; case 20: dev_priv->fsb_freq = 5333U; goto ldv_48414; case 22: dev_priv->fsb_freq = 5866U; goto ldv_48414; case 24: dev_priv->fsb_freq = 6400U; goto ldv_48414; default: tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_ironlake_get_mem_freq", "unknown fsb frequency 0x%04x\n", (int )csipll & 1023); } else { } dev_priv->fsb_freq = 0U; goto ldv_48414; } ldv_48414: ; if (dev_priv->fsb_freq == 3200U) { dev_priv->ips.c_m = 0; } else if (dev_priv->fsb_freq > 3200U && dev_priv->fsb_freq <= 4800U) { dev_priv->ips.c_m = 1; } else { dev_priv->ips.c_m = 2; } return; } } static struct cxsr_latency const cxsr_latency_table[30U] = { {1, 0, 800UL, 400UL, 3382UL, 33382UL, 3983UL, 33983UL}, {1, 0, 800UL, 667UL, 3354UL, 33354UL, 3807UL, 33807UL}, {1, 0, 800UL, 800UL, 3347UL, 33347UL, 3763UL, 33763UL}, {1, 1, 800UL, 667UL, 6420UL, 36420UL, 6873UL, 36873UL}, {1, 1, 800UL, 800UL, 5902UL, 35902UL, 6318UL, 36318UL}, {1, 0, 667UL, 400UL, 3400UL, 33400UL, 4021UL, 34021UL}, {1, 0, 667UL, 667UL, 3372UL, 33372UL, 3845UL, 33845UL}, {1, 0, 667UL, 800UL, 3386UL, 33386UL, 3822UL, 33822UL}, {1, 1, 667UL, 667UL, 6438UL, 36438UL, 6911UL, 36911UL}, {1, 1, 667UL, 800UL, 5941UL, 35941UL, 6377UL, 36377UL}, {1, 0, 400UL, 400UL, 3472UL, 33472UL, 4173UL, 34173UL}, {1, 0, 400UL, 667UL, 3443UL, 33443UL, 3996UL, 33996UL}, {1, 0, 400UL, 800UL, 3430UL, 33430UL, 3946UL, 33946UL}, {1, 1, 400UL, 667UL, 6509UL, 36509UL, 7062UL, 37062UL}, {1, 1, 400UL, 800UL, 5985UL, 35985UL, 6501UL, 36501UL}, {0, 0, 800UL, 400UL, 3438UL, 33438UL, 4065UL, 34065UL}, {0, 0, 800UL, 667UL, 3410UL, 33410UL, 3889UL, 33889UL}, {0, 0, 800UL, 800UL, 3403UL, 33403UL, 3845UL, 33845UL}, {0, 1, 800UL, 667UL, 6476UL, 36476UL, 6955UL, 36955UL}, {0, 1, 800UL, 800UL, 5958UL, 35958UL, 6400UL, 36400UL}, {0, 0, 667UL, 400UL, 3456UL, 33456UL, 4103UL, 34106UL}, {0, 0, 667UL, 667UL, 3428UL, 33428UL, 3927UL, 33927UL}, {0, 0, 667UL, 800UL, 3443UL, 33443UL, 3905UL, 33905UL}, {0, 1, 667UL, 667UL, 6494UL, 36494UL, 6993UL, 36993UL}, {0, 1, 667UL, 800UL, 5998UL, 35998UL, 6460UL, 36460UL}, {0, 0, 400UL, 400UL, 3528UL, 33528UL, 4255UL, 34255UL}, {0, 0, 400UL, 667UL, 3500UL, 33500UL, 4079UL, 34079UL}, {0, 0, 400UL, 800UL, 3487UL, 33487UL, 4029UL, 34029UL}, {0, 1, 400UL, 667UL, 6566UL, 36566UL, 7145UL, 37145UL}, {0, 1, 400UL, 800UL, 6042UL, 36042UL, 6584UL, 36584UL}}; static struct cxsr_latency const *intel_get_cxsr_latency(int is_desktop , int is_ddr3 , int fsb , int mem ) { struct cxsr_latency const *latency ; int i ; long tmp ; { if (fsb == 0 || mem == 0) { return ((struct cxsr_latency const *)0); } else { } i = 0; goto ldv_48434; ldv_48433: latency = (struct cxsr_latency const *)(& cxsr_latency_table) + (unsigned long )i; if ((((int )latency->is_desktop == is_desktop && (int )latency->is_ddr3 == is_ddr3) && (unsigned long )fsb == (unsigned long )latency->fsb_freq) && (unsigned long )mem == (unsigned long )latency->mem_freq) { return (latency); } else { } i = i + 1; ldv_48434: ; if ((unsigned int )i <= 29U) { goto ldv_48433; } else { } tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_get_cxsr_latency", "Unknown FSB/MEM found, disable CxSR\n"); } else { } return ((struct cxsr_latency const *)0); } } static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv , bool enable ) { u32 val ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; u32 tmp___0 ; bool tmp___1 ; u32 tmp___2 ; { mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); val = vlv_punit_read(dev_priv, 313U); if ((int )enable) { val = val & 4294967294U; } else { val = val | 1U; } val = val & 4294967293U; val = val | 256U; vlv_punit_write(dev_priv, 313U, val); tmp = msecs_to_jiffies(3U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48452; ldv_48451: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = vlv_punit_read(dev_priv, 313U); if ((tmp___0 & 256U) != 0U) { ret__ = -110; } else { } goto ldv_48450; } else { } tmp___1 = drm_can_sleep___0(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48452: tmp___2 = vlv_punit_read(dev_priv, 313U); if ((tmp___2 & 256U) != 0U) { goto ldv_48451; } else { } ldv_48450: ; if (ret__ != 0) { drm_err("timed out waiting for Punit DDR DVFS request\n"); } else { } mutex_unlock(& dev_priv->rps.hw_lock); return; } } static void chv_set_memory_pm5(struct drm_i915_private *dev_priv , bool enable ) { u32 val ; { mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); val = vlv_punit_read(dev_priv, 54U); if ((int )enable) { val = val | 64U; } else { val = val & 4294967231U; } vlv_punit_write(dev_priv, 54U, val); mutex_unlock(& dev_priv->rps.hw_lock); return; } } extern void __compiletime_assert_347(void) ; extern void __compiletime_assert_351(void) ; void intel_set_memory_cxsr(struct drm_i915_private *dev_priv , bool enable ) { struct drm_device *dev ; u32 val ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp ; int _a ; bool __cond___4 ; bool __cond___5 ; bool __cond___6 ; long tmp___0 ; int _a___0 ; bool __cond___10 ; bool __cond___11 ; bool __cond___12 ; int tmp___1 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; long tmp___2 ; { dev = dev_priv->dev; __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1598720L, (int )enable ? 32768U : 0U, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { chv_set_memory_pm5(dev_priv, (int )enable); } else { } } else { } } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 44UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8416L, (int )enable ? 32768U : 0U, 1); } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8416L, (int )enable ? 32768U : 0U, 1); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 44UL) != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), 1); val = tmp & 3221225471U; val = ((int )enable ? 1073741824U : 0U) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), val, 1); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) == 10098U) { goto _L; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) != 0U) { _L: /* CIL Label */ if ((int )enable) { _a = 32768; tmp___0 = (_a << 16) | _a; } else { __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_347(); } else { } __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_347(); } else { } __cond___6 = 0; if ((int )__cond___6) { __compiletime_assert_347(); } else { } tmp___0 = (-0x7FFFFFFF-1); } val = (u32 )tmp___0; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8416L, val, 1); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) == 9618U) { if ((int )enable) { _a___0 = 4096; tmp___1 = (_a___0 << 16) | _a___0; } else { __cond___10 = 0; if ((int )__cond___10) { __compiletime_assert_351(); } else { } __cond___11 = 0; if ((int )__cond___11) { __compiletime_assert_351(); } else { } __cond___12 = 0; if ((int )__cond___12) { __compiletime_assert_351(); } else { } tmp___1 = 268435456; } val = (u32 )tmp___1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8384L, val, 1); } else { return; } } } } } } } tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_set_memory_cxsr", "memory self-refresh is %s\n", (int )enable ? (char *)"enabled" : (char *)"disabled"); } else { } return; } } static int const pessimal_latency_ns = 5000; static int vlv_get_fifo_size(struct drm_device *dev , enum pipe pipe , int plane ) { struct drm_i915_private *dev_priv ; int sprite0_start ; int sprite1_start ; int size ; uint32_t dsparb ; uint32_t dsparb2 ; uint32_t dsparb3 ; struct drm_i915_private *__p ; int tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; switch ((int )pipe) { case 0: dsparb = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458800U), 1); dsparb2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 2031712L, 1); sprite0_start = (int )((dsparb & 255U) | ((dsparb2 & 1U) << 8)); sprite1_start = (int )(((dsparb >> 8) & 255U) | (((dsparb2 >> 4) & 1U) << 8)); goto ldv_48590; case 1: dsparb = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458800U), 1); dsparb2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 2031712L, 1); sprite0_start = (int )(((dsparb >> 16) & 255U) | (dsparb2 & 256U)); sprite1_start = (int )((dsparb >> 24) | (((dsparb2 >> 12) & 1U) << 8)); goto ldv_48590; case 2: dsparb2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 2031712L, 1); dsparb3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 2031724L, 1); sprite0_start = (int )((dsparb3 & 255U) | (((dsparb2 >> 16) & 1U) << 8)); sprite1_start = (int )(((dsparb3 >> 8) & 255U) | (((dsparb2 >> 20) & 1U) << 8)); goto ldv_48590; default: ; return (0); } ldv_48590: ; switch (plane) { case 0: size = sprite0_start; goto ldv_48595; case 1: size = sprite1_start - sprite0_start; goto ldv_48595; case 2: size = 511 - sprite1_start; goto ldv_48595; default: ; return (0); } ldv_48595: tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { if (plane == 0) { tmp = (int )pipe + 65; } else { __p = to_i915((struct drm_device const *)dev); tmp = ((int )__p->info.num_sprites[(int )pipe] * (int )pipe + (plane + -1)) + 65; } drm_ut_debug_printk("vlv_get_fifo_size", "Pipe %c %s %c FIFO size: %d\n", (int )pipe + 65, plane == 0 ? (char *)"primary" : (char *)"sprite", tmp, size); } else { } return (size); } } static int i9xx_get_fifo_size(struct drm_device *dev , int plane ) { struct drm_i915_private *dev_priv ; uint32_t dsparb ; uint32_t tmp ; int size ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458800U), 1); dsparb = tmp; size = (int )dsparb & 127; if (plane != 0) { size = (int )(((dsparb >> 7) & 127U) - (uint32_t )size); } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i9xx_get_fifo_size", "FIFO size - (0x%08x) %s: %d\n", dsparb, plane != 0 ? (char *)"B" : (char *)"A", size); } else { } return (size); } } static int i830_get_fifo_size(struct drm_device *dev , int plane ) { struct drm_i915_private *dev_priv ; uint32_t dsparb ; uint32_t tmp ; int size ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458800U), 1); dsparb = tmp; size = (int )dsparb & 511; if (plane != 0) { size = (int )(((dsparb >> 9) & 511U) - (uint32_t )size); } else { } size = size >> 1; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i830_get_fifo_size", "FIFO size - (0x%08x) %s: %d\n", dsparb, plane != 0 ? (char *)"B" : (char *)"A", size); } else { } return (size); } } static int i845_get_fifo_size(struct drm_device *dev , int plane ) { struct drm_i915_private *dev_priv ; uint32_t dsparb ; uint32_t tmp ; int size ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458800U), 1); dsparb = tmp; size = (int )dsparb & 127; size = size >> 2; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i845_get_fifo_size", "FIFO size - (0x%08x) %s: %d\n", dsparb, plane != 0 ? (char *)"B" : (char *)"A", size); } else { } return (size); } } static struct intel_watermark_params const pineview_display_wm = {512UL, 511UL, 63UL, 10UL, 64UL}; static struct intel_watermark_params const pineview_display_hplloff_wm = {512UL, 511UL, 0UL, 10UL, 64UL}; static struct intel_watermark_params const pineview_cursor_wm = {64UL, 63UL, 0UL, 5UL, 64UL}; static struct intel_watermark_params const pineview_cursor_hplloff_wm = {64UL, 63UL, 0UL, 5UL, 64UL}; static struct intel_watermark_params const g4x_wm_info = {127UL, 63UL, 63UL, 2UL, 64UL}; static struct intel_watermark_params const g4x_cursor_wm_info = {64UL, 32UL, 8UL, 2UL, 64UL}; static struct intel_watermark_params const i965_cursor_wm_info = {64UL, 32UL, 8UL, 2UL, 64UL}; static struct intel_watermark_params const i945_wm_info = {127UL, 63UL, 1UL, 2UL, 64UL}; static struct intel_watermark_params const i915_wm_info = {95UL, 63UL, 1UL, 2UL, 64UL}; static struct intel_watermark_params const i830_a_wm_info = {127UL, 63UL, 1UL, 2UL, 32UL}; static struct intel_watermark_params const i830_bc_wm_info = {127UL, 31UL, 1UL, 2UL, 32UL}; static struct intel_watermark_params const i845_wm_info = {95UL, 63UL, 1UL, 2UL, 32UL}; static unsigned long intel_calculate_wm(unsigned long clock_in_khz , struct intel_watermark_params const *wm , int fifo_size , int pixel_size , unsigned long latency_ns ) { long entries_required ; long wm_size ; long tmp ; long tmp___0 ; { entries_required = (long )((((clock_in_khz / 1000UL) * (unsigned long )pixel_size) * latency_ns) / 1000UL); entries_required = (long )((((unsigned long )wm->cacheline_size + (unsigned long )entries_required) - 1UL) / (unsigned long )wm->cacheline_size); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_calculate_wm", "FIFO entries required for mode: %ld\n", entries_required); } else { } wm_size = (long )((unsigned long )fifo_size - ((unsigned long )wm->guard_size + (unsigned long )entries_required)); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_calculate_wm", "FIFO watermark level: %ld\n", wm_size); } else { } if ((long )wm->max_wm < wm_size) { wm_size = (long )wm->max_wm; } else { } if (wm_size <= 0L) { wm_size = (long )wm->default_wm; } else { } if (wm_size <= 8L) { wm_size = 8L; } else { } return ((unsigned long )wm_size); } } static struct drm_crtc *single_enabled_crtc(struct drm_device *dev ) { struct drm_crtc *crtc ; struct drm_crtc *enabled ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; { enabled = (struct drm_crtc *)0; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_48664; ldv_48663: tmp = intel_crtc_active(crtc); if ((int )tmp) { if ((unsigned long )enabled != (unsigned long )((struct drm_crtc *)0)) { return ((struct drm_crtc *)0); } else { } enabled = crtc; } else { } __mptr___0 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_48664: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_48663; } else { } return (enabled); } } static void pineview_update_wm(struct drm_crtc *unused_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct cxsr_latency const *latency ; u32 reg ; unsigned long wm ; struct drm_i915_private *__p ; long tmp ; struct drm_display_mode const *adjusted_mode ; int pixel_size ; int clock ; struct drm_crtc const *__mptr ; long tmp___0 ; long tmp___1 ; { dev = unused_crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); latency = intel_get_cxsr_latency((unsigned int )((unsigned short )__p->info.device_id) == 40961U, (int )dev_priv->is_ddr3, (int )dev_priv->fsb_freq, (int )dev_priv->mem_freq); if ((unsigned long )latency == (unsigned long )((struct cxsr_latency const *)0)) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("pineview_update_wm", "Unknown FSB/MEM found, disable CxSR\n"); } else { } intel_set_memory_cxsr(dev_priv, 0); return; } else { } crtc = single_enabled_crtc(dev); if ((unsigned long )crtc != (unsigned long )((struct drm_crtc *)0)) { pixel_size = (((crtc->primary)->state)->fb)->bits_per_pixel / 8; __mptr = (struct drm_crtc const *)crtc; adjusted_mode = (struct drm_display_mode const *)(& (((struct intel_crtc *)__mptr)->config)->base.adjusted_mode); clock = adjusted_mode->crtc_clock; wm = intel_calculate_wm((unsigned long )clock, & pineview_display_wm, (int )pineview_display_wm.fifo_size, pixel_size, latency->display_sr); reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458804U), 1); reg = reg & 8388607U; reg = ((u32 )wm << 23U) | reg; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458804U), reg, 1); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("pineview_update_wm", "DSPFW1 register is %x\n", reg); } else { } wm = intel_calculate_wm((unsigned long )clock, & pineview_cursor_wm, (int )pineview_display_wm.fifo_size, pixel_size, latency->cursor_sr); reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), 1); reg = reg & 3238002687U; reg = (((u32 )wm << 24U) & 1056964608U) | reg; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), reg, 1); wm = intel_calculate_wm((unsigned long )clock, & pineview_display_hplloff_wm, (int )pineview_display_hplloff_wm.fifo_size, pixel_size, latency->display_hpll_disable); reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), 1); reg = reg & 4294966784U; reg = ((u32 )wm & 511U) | reg; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), reg, 1); wm = intel_calculate_wm((unsigned long )clock, & pineview_cursor_hplloff_wm, (int )pineview_display_hplloff_wm.fifo_size, pixel_size, latency->cursor_hpll_disable); reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), 1); reg = reg & 4290838527U; reg = (((u32 )wm << 16U) & 4128768U) | reg; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), reg, 1); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("pineview_update_wm", "DSPFW3 register is %x\n", reg); } else { } intel_set_memory_cxsr(dev_priv, 1); } else { intel_set_memory_cxsr(dev_priv, 0); } return; } } static bool g4x_compute_wm0(struct drm_device *dev , int plane , struct intel_watermark_params const *display , int display_latency_ns , struct intel_watermark_params const *cursor , int cursor_latency_ns , int *plane_wm , int *cursor_wm ) { struct drm_crtc *crtc ; struct drm_display_mode const *adjusted_mode ; int htotal ; int hdisplay ; int clock ; int pixel_size ; int line_time_us ; int line_count ; int entries ; int tlb_miss ; bool tmp ; int tmp___0 ; struct drm_crtc const *__mptr ; struct drm_crtc const *__mptr___0 ; int _max1 ; int _max2 ; { crtc = intel_get_crtc_for_plane(dev, plane); tmp = intel_crtc_active(crtc); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { *cursor_wm = (int )cursor->guard_size; *plane_wm = (int )display->guard_size; return (0); } else { } __mptr = (struct drm_crtc const *)crtc; adjusted_mode = (struct drm_display_mode const *)(& (((struct intel_crtc *)__mptr)->config)->base.adjusted_mode); clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; __mptr___0 = (struct drm_crtc const *)crtc; hdisplay = (((struct intel_crtc *)__mptr___0)->config)->pipe_src_w; pixel_size = (((crtc->primary)->state)->fb)->bits_per_pixel / 8; entries = (((clock * pixel_size) / 1000) * display_latency_ns) / 1000; tlb_miss = (int )((unsigned int )display->fifo_size * (unsigned int )display->cacheline_size + (unsigned int )(hdisplay * -8)); if (tlb_miss > 0) { entries = entries + tlb_miss; } else { } entries = (int )((((unsigned long )entries + (unsigned long )display->cacheline_size) - 1UL) / (unsigned long )display->cacheline_size); *plane_wm = (int )((unsigned int )display->guard_size + (unsigned int )entries); if (*plane_wm > (int )display->max_wm) { *plane_wm = (int )display->max_wm; } else { } _max1 = (htotal * 1000) / clock; _max2 = 1; line_time_us = _max1 > _max2 ? _max1 : _max2; line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; entries = (int )((((crtc->cursor)->state)->crtc_w * (uint32_t )line_count) * (uint32_t )pixel_size); tlb_miss = (int )((unsigned int )cursor->fifo_size * (unsigned int )cursor->cacheline_size + (unsigned int )(hdisplay * -8)); if (tlb_miss > 0) { entries = entries + tlb_miss; } else { } entries = (int )((((unsigned long )entries + (unsigned long )cursor->cacheline_size) - 1UL) / (unsigned long )cursor->cacheline_size); *cursor_wm = (int )((unsigned int )cursor->guard_size + (unsigned int )entries); if (*cursor_wm > (int )cursor->max_wm) { *cursor_wm = (int )cursor->max_wm; } else { } return (1); } } static bool g4x_check_srwm(struct drm_device *dev , int display_wm , int cursor_wm , struct intel_watermark_params const *display , struct intel_watermark_params const *cursor ) { long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("g4x_check_srwm", "SR watermark: display plane %d, cursor %d\n", display_wm, cursor_wm); } else { } if ((unsigned long )display_wm > (unsigned long )display->max_wm) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("g4x_check_srwm", "display watermark is too large(%d/%ld), disabling\n", display_wm, display->max_wm); } else { } return (0); } else { } if ((unsigned long )cursor_wm > (unsigned long )cursor->max_wm) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("g4x_check_srwm", "cursor watermark is too large(%d/%ld), disabling\n", cursor_wm, cursor->max_wm); } else { } return (0); } else { } if (display_wm == 0 && cursor_wm == 0) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("g4x_check_srwm", "SR latency is 0, disabling\n"); } else { } return (0); } else { } return (1); } } static bool g4x_compute_srwm(struct drm_device *dev , int plane , int latency_ns , struct intel_watermark_params const *display , struct intel_watermark_params const *cursor , int *display_wm , int *cursor_wm ) { struct drm_crtc *crtc ; struct drm_display_mode const *adjusted_mode ; int hdisplay ; int htotal ; int pixel_size ; int clock ; unsigned long line_time_us ; int line_count ; int line_size ; int small ; int large ; int entries ; int tmp ; struct drm_crtc const *__mptr ; struct drm_crtc const *__mptr___0 ; int _max1 ; int _max2 ; int _min1 ; int _min2 ; bool tmp___0 ; { if (latency_ns == 0) { tmp = 0; *cursor_wm = tmp; *display_wm = tmp; return (0); } else { } crtc = intel_get_crtc_for_plane(dev, plane); __mptr = (struct drm_crtc const *)crtc; adjusted_mode = (struct drm_display_mode const *)(& (((struct intel_crtc *)__mptr)->config)->base.adjusted_mode); clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; __mptr___0 = (struct drm_crtc const *)crtc; hdisplay = (((struct intel_crtc *)__mptr___0)->config)->pipe_src_w; pixel_size = (((crtc->primary)->state)->fb)->bits_per_pixel / 8; _max1 = (htotal * 1000) / clock; _max2 = 1; line_time_us = (unsigned long )(_max1 > _max2 ? _max1 : _max2); line_count = (int )(((unsigned long )latency_ns / line_time_us + 1000UL) / 1000UL); line_size = hdisplay * pixel_size; small = (((clock * pixel_size) / 1000) * latency_ns) / 1000; large = line_count * line_size; _min1 = small; _min2 = large; entries = (int )((((unsigned long )(_min1 < _min2 ? _min1 : _min2) + (unsigned long )display->cacheline_size) - 1UL) / (unsigned long )display->cacheline_size); *display_wm = (int )((unsigned int )display->guard_size + (unsigned int )entries); entries = (int )((uint32_t )(line_count * pixel_size) * ((crtc->cursor)->state)->crtc_w); entries = (int )((((unsigned long )entries + (unsigned long )cursor->cacheline_size) - 1UL) / (unsigned long )cursor->cacheline_size); *cursor_wm = (int )((unsigned int )cursor->guard_size + (unsigned int )entries); tmp___0 = g4x_check_srwm(dev, *display_wm, *cursor_wm, display, cursor); return (tmp___0); } } static void vlv_write_wm_values(struct intel_crtc *crtc , struct vlv_wm_values const *wm ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; enum pipe pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { tmp = to_i915((struct drm_device const *)crtc->base.dev); dev_priv = tmp; pipe = crtc->pipe; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )pipe + 507924) * 4), (uint32_t )(((((int )wm->ddl[(int )pipe].cursor << 24) | ((int )wm->ddl[(int )pipe].sprite[1] << 16)) | ((int )wm->ddl[(int )pipe].sprite[0] << 8)) | (int )wm->ddl[(int )pipe].primary), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458804U), (uint32_t )(((((int )wm->sr.plane << 23) | (((int )wm->pipe[1].cursor << 16) & 4128768)) | (((int )wm->pipe[1].primary << 8) & 65535)) | ((int )wm->pipe[0].primary & 255)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458808U), (uint32_t )(((((int )wm->pipe[0].sprite[1] << 16) & 16711680) | (((int )wm->pipe[0].cursor << 8) & 16128)) | ((int )wm->pipe[0].sprite[0] & 255)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), (uint32_t )((int )wm->sr.cursor << 24) & 1056964608U, 1); __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031796L, (uint32_t )((((int )wm->pipe[1].sprite[1] << 16) & 16711680) | ((int )wm->pipe[1].sprite[0] & 255)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031800L, (uint32_t )((((int )wm->pipe[2].sprite[1] << 16) & 16711680) | ((int )wm->pipe[2].sprite[0] & 255)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031740L, (uint32_t )((((int )wm->pipe[2].primary << 16) & 16711680) | ((int )wm->pipe[2].cursor & 63)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031716L, (uint32_t )(((((((((((((int )((unsigned short )wm->sr.plane) >> 9) << 24) & 50331648) | ((((int )wm->pipe[2].sprite[1] >> 8) << 23) & 8388608)) | ((((int )wm->pipe[2].sprite[0] >> 8) << 22) & 4194304)) | ((((int )((unsigned short )wm->pipe[2].primary) >> 8) << 21) & 2097152)) | ((((int )wm->pipe[1].sprite[1] >> 8) << 20) & 1048576)) | ((((int )wm->pipe[1].sprite[0] >> 8) << 16) & 65536)) | ((((int )((unsigned short )wm->pipe[1].primary) >> 8) << 12) & 4096)) | ((((int )wm->pipe[0].sprite[1] >> 8) << 8) & 256)) | ((((int )wm->pipe[0].sprite[0] >> 8) << 4) & 16)) | (((int )((unsigned short )wm->pipe[0].primary) >> 8) & 1)), 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031740L, (uint32_t )((((int )wm->pipe[1].sprite[1] << 16) & 16711680) | ((int )wm->pipe[1].sprite[0] & 255)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031716L, (uint32_t )((((((((((int )((unsigned short )wm->sr.plane) >> 9) << 24) & 50331648) | ((((int )wm->pipe[1].sprite[1] >> 8) << 20) & 1048576)) | ((((int )wm->pipe[1].sprite[0] >> 8) << 16) & 65536)) | ((((int )((unsigned short )wm->pipe[1].primary) >> 8) << 12) & 4096)) | ((((int )wm->pipe[0].sprite[1] >> 8) << 8) & 256)) | ((((int )wm->pipe[0].sprite[0] >> 8) << 4) & 16)) | (((int )((unsigned short )wm->pipe[0].primary) >> 8) & 1)), 1); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031740L, (uint32_t )((((int )wm->pipe[1].sprite[1] << 16) & 16711680) | ((int )wm->pipe[1].sprite[0] & 255)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031716L, (uint32_t )((((((((((int )((unsigned short )wm->sr.plane) >> 9) << 24) & 50331648) | ((((int )wm->pipe[1].sprite[1] >> 8) << 20) & 1048576)) | ((((int )wm->pipe[1].sprite[0] >> 8) << 16) & 65536)) | ((((int )((unsigned short )wm->pipe[1].primary) >> 8) << 12) & 4096)) | ((((int )wm->pipe[0].sprite[1] >> 8) << 8) & 256)) | ((((int )wm->pipe[0].sprite[0] >> 8) << 4) & 16)) | (((int )((unsigned short )wm->pipe[0].primary) >> 8) & 1)), 1); } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458804U), 0); dev_priv->wm.__annonCompField83.vlv = *wm; return; } } static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc , struct drm_plane *plane ) { struct drm_device *dev ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int entries ; int prec_mult ; int drain_latency ; int pixel_size ; int clock ; int high_precision ; int tmp ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; long tmp___3 ; { dev = crtc->dev; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; clock = (intel_crtc->config)->base.adjusted_mode.crtc_clock; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { tmp = 16; } else { tmp = 64; } } else { tmp = 64; } high_precision = tmp; if (! intel_crtc->active || (unsigned long )(plane->state)->fb == (unsigned long )((struct drm_framebuffer *)0)) { return (0U); } else { } __ret_warn_on = clock == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 949, "Pixel clock is zero!\n"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return (0U); } else { } pixel_size = drm_format_plane_cpp(((plane->state)->fb)->pixel_format, 0); __ret_warn_on___0 = pixel_size == 0; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 954, "Pixel size is zero!\n"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { return (0U); } else { } entries = ((clock + 999) / 1000) * pixel_size; prec_mult = high_precision; drain_latency = (prec_mult * 256) / entries; if (drain_latency > 127) { prec_mult = prec_mult / 2; drain_latency = (prec_mult * 256) / entries; } else { } if (drain_latency > 127) { drain_latency = 127; } else { } return ((uint8_t )((prec_mult == high_precision ? -128 : 0) | (int )((signed char )drain_latency))); } } static int vlv_compute_wm(struct intel_crtc *crtc , struct intel_plane *plane , int fifo_size ) { int clock ; int entries ; int pixel_size ; int _min1 ; int _max1 ; int _max2 ; int _min2 ; { if (! crtc->active || (unsigned long )(plane->base.state)->fb == (unsigned long )((struct drm_framebuffer *)0)) { return (0); } else { } pixel_size = drm_format_plane_cpp(((plane->base.state)->fb)->pixel_format, 0); clock = (crtc->config)->base.adjusted_mode.crtc_clock; entries = ((clock + 999) / 1000) * pixel_size; _max1 = (entries * 256 + 63) / 64; _max2 = 0; _min1 = _max1 > _max2 ? _max1 : _max2; _min2 = fifo_size + -8; return (fifo_size - (_min1 < _min2 ? _min1 : _min2)); } } static bool vlv_compute_sr_wm(struct drm_device *dev , struct vlv_wm_values *wm ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct drm_crtc *crtc ; enum pipe pipe ; int num_planes ; int fifo_size ; struct intel_plane *plane ; struct drm_crtc const *__mptr ; struct drm_i915_private *__p ; struct drm_crtc const *__mptr___0 ; struct drm_plane const *__mptr___1 ; struct drm_crtc const *__mptr___2 ; int tmp___0 ; struct list_head const *__mptr___3 ; struct drm_crtc const *__mptr___4 ; int tmp___1 ; struct list_head const *__mptr___5 ; { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; pipe = -1; num_planes = 0; fifo_size = 0; wm->sr.plane = 0U; wm->sr.cursor = 0U; crtc = single_enabled_crtc(dev); if ((unsigned long )crtc != (unsigned long )((struct drm_crtc *)0)) { __mptr___0 = (struct drm_crtc const *)crtc; if ((int )((struct intel_crtc *)__mptr___0)->pipe != 2) { __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; num_planes = (((unsigned int )wm->pipe[(int )pipe].primary != 0U) + ((unsigned int )wm->pipe[(int )pipe].sprite[0] != 0U)) + ((unsigned int )wm->pipe[(int )pipe].sprite[1] != 0U); __p = dev_priv; fifo_size = (int )__p->info.num_pipes * 512 + -1; } else { } } else { } if (fifo_size == 0 || num_planes > 1) { return (0); } else { } __mptr___1 = (struct drm_plane const *)crtc->cursor; __mptr___2 = (struct drm_crtc const *)crtc; tmp___0 = vlv_compute_wm((struct intel_crtc *)__mptr___2, (struct intel_plane *)__mptr___1, 63); wm->sr.cursor = (uint8_t )tmp___0; __mptr___3 = (struct list_head const *)dev->mode_config.plane_list.next; plane = (struct intel_plane *)__mptr___3 + 0xfffffffffffffff8UL; goto ldv_48851; ldv_48850: ; if ((unsigned int )plane->base.type == 2U) { goto ldv_48846; } else { } if ((int )plane->pipe != (int )pipe) { goto ldv_48846; } else { } __mptr___4 = (struct drm_crtc const *)crtc; tmp___1 = vlv_compute_wm((struct intel_crtc *)__mptr___4, plane, fifo_size); wm->sr.plane = (uint16_t )tmp___1; if ((unsigned int )wm->sr.plane != 0U) { goto ldv_48849; } else { } ldv_48846: __mptr___5 = (struct list_head const *)plane->base.head.next; plane = (struct intel_plane *)__mptr___5 + 0xfffffffffffffff8UL; ldv_48851: ; if ((unsigned long )(& plane->base.head) != (unsigned long )(& dev->mode_config.plane_list)) { goto ldv_48850; } else { } ldv_48849: ; return (1); } } static void valleyview_update_wm(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; bool cxsr_enabled ; struct vlv_wm_values wm ; int tmp ; struct drm_plane const *__mptr___0 ; int tmp___0 ; struct drm_plane const *__mptr___1 ; int tmp___1 ; int tmp___2 ; long tmp___3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; wm = dev_priv->wm.__annonCompField83.vlv; wm.ddl[(int )pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary); tmp = vlv_get_fifo_size(dev, pipe, 0); __mptr___0 = (struct drm_plane const *)crtc->primary; tmp___0 = vlv_compute_wm(intel_crtc, (struct intel_plane *)__mptr___0, tmp); wm.pipe[(int )pipe].primary = (uint16_t )tmp___0; wm.ddl[(int )pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor); __mptr___1 = (struct drm_plane const *)crtc->cursor; tmp___1 = vlv_compute_wm(intel_crtc, (struct intel_plane *)__mptr___1, 63); wm.pipe[(int )pipe].cursor = (uint8_t )tmp___1; cxsr_enabled = vlv_compute_sr_wm(dev, & wm); tmp___2 = memcmp((void const *)(& wm), (void const *)(& dev_priv->wm.__annonCompField83.vlv), 40UL); if (tmp___2 == 0) { return; } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("valleyview_update_wm", "Setting FIFO watermarks - %c: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", (int )pipe + 65, (int )wm.pipe[(int )pipe].primary, (int )wm.pipe[(int )pipe].cursor, (int )wm.sr.plane, (int )wm.sr.cursor); } else { } __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { chv_set_memory_dvfs(dev_priv, 0); } else { } } else { } if (! cxsr_enabled) { intel_set_memory_cxsr(dev_priv, 0); } else { } vlv_write_wm_values(intel_crtc, (struct vlv_wm_values const *)(& wm)); if ((int )cxsr_enabled) { intel_set_memory_cxsr(dev_priv, 1); } else { } return; } } static void valleyview_update_sprite_wm(struct drm_plane *plane , struct drm_crtc *crtc , uint32_t sprite_width , uint32_t sprite_height , int pixel_size , bool enabled , bool scaled ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; int sprite ; struct drm_plane const *__mptr___0 ; bool cxsr_enabled ; struct vlv_wm_values wm ; int tmp ; struct drm_plane const *__mptr___1 ; int tmp___0 ; int tmp___1 ; struct drm_i915_private *__p ; long tmp___2 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; __mptr___0 = (struct drm_plane const *)plane; sprite = ((struct intel_plane *)__mptr___0)->plane; wm = dev_priv->wm.__annonCompField83.vlv; if ((int )enabled) { wm.ddl[(int )pipe].sprite[sprite] = vlv_compute_drain_latency(crtc, plane); tmp = vlv_get_fifo_size(dev, pipe, sprite + 1); __mptr___1 = (struct drm_plane const *)plane; tmp___0 = vlv_compute_wm(intel_crtc, (struct intel_plane *)__mptr___1, tmp); wm.pipe[(int )pipe].sprite[sprite] = (uint16_t )tmp___0; } else { wm.ddl[(int )pipe].sprite[sprite] = 0U; wm.pipe[(int )pipe].sprite[sprite] = 0U; } cxsr_enabled = vlv_compute_sr_wm(dev, & wm); tmp___1 = memcmp((void const *)(& wm), (void const *)(& dev_priv->wm.__annonCompField83.vlv), 40UL); if (tmp___1 == 0) { return; } else { } tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { __p = to_i915((struct drm_device const *)dev); drm_ut_debug_printk("valleyview_update_sprite_wm", "Setting FIFO watermarks - %c: sprite %c=%d, SR: plane=%d, cursor=%d\n", (int )pipe + 65, ((int )__p->info.num_sprites[(int )pipe] * (int )pipe + sprite) + 65, (int )wm.pipe[(int )pipe].sprite[sprite], (int )wm.sr.plane, (int )wm.sr.cursor); } else { } if (! cxsr_enabled) { intel_set_memory_cxsr(dev_priv, 0); } else { } vlv_write_wm_values(intel_crtc, (struct vlv_wm_values const *)(& wm)); if ((int )cxsr_enabled) { intel_set_memory_cxsr(dev_priv, 1); } else { } return; } } static void g4x_update_wm(struct drm_crtc *crtc ) { struct drm_device *dev ; int sr_latency_ns ; struct drm_i915_private *dev_priv ; int planea_wm ; int planeb_wm ; int cursora_wm ; int cursorb_wm ; int plane_sr ; int cursor_sr ; unsigned int enabled ; bool cxsr_enabled ; bool tmp ; bool tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; long tmp___4 ; uint32_t tmp___5 ; uint32_t tmp___6 ; { dev = crtc->dev; sr_latency_ns = 12000; dev_priv = (struct drm_i915_private *)dev->dev_private; enabled = 0U; tmp = g4x_compute_wm0(dev, 0, & g4x_wm_info, pessimal_latency_ns, & g4x_cursor_wm_info, pessimal_latency_ns, & planea_wm, & cursora_wm); if ((int )tmp) { enabled = enabled | 1U; } else { } tmp___0 = g4x_compute_wm0(dev, 1, & g4x_wm_info, pessimal_latency_ns, & g4x_cursor_wm_info, pessimal_latency_ns, & planeb_wm, & cursorb_wm); if ((int )tmp___0) { enabled = enabled | 2U; } else { } tmp___1 = is_power_of_2((unsigned long )enabled); if ((int )tmp___1) { tmp___2 = ffs((int )enabled); tmp___3 = g4x_compute_srwm(dev, tmp___2 + -1, sr_latency_ns, & g4x_wm_info, & g4x_cursor_wm_info, & plane_sr, & cursor_sr); if ((int )tmp___3) { cxsr_enabled = 1; } else { cxsr_enabled = 0; intel_set_memory_cxsr(dev_priv, 0); cursor_sr = 0; plane_sr = cursor_sr; } } else { cxsr_enabled = 0; intel_set_memory_cxsr(dev_priv, 0); cursor_sr = 0; plane_sr = cursor_sr; } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("g4x_update_wm", "Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", planea_wm, cursora_wm, planeb_wm, cursorb_wm, plane_sr, cursor_sr); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458804U), (uint32_t )((((plane_sr << 23) | ((cursorb_wm << 16) & 4128768)) | ((planeb_wm << 8) & 32512)) | (planea_wm & 127)), 1); tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458808U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458808U), (tmp___5 & 4294951167U) | ((uint32_t )(cursora_wm << 8) & 16128U), 1); tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), (tmp___6 & 1090519039U) | ((uint32_t )(cursor_sr << 24) & 1056964608U), 1); if ((int )cxsr_enabled) { intel_set_memory_cxsr(dev_priv, 1); } else { } return; } } static void i965_update_wm(struct drm_crtc *unused_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; int srwm ; int cursor_sr ; bool cxsr_enabled ; int sr_latency_ns ; struct drm_display_mode const *adjusted_mode ; struct drm_crtc const *__mptr ; int clock ; int htotal ; int hdisplay ; struct drm_crtc const *__mptr___0 ; int pixel_size ; unsigned long line_time_us ; int entries ; int _max1 ; int _max2 ; long tmp ; long tmp___0 ; long tmp___1 ; { dev = unused_crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; srwm = 1; cursor_sr = 16; crtc = single_enabled_crtc(dev); if ((unsigned long )crtc != (unsigned long )((struct drm_crtc *)0)) { sr_latency_ns = 12000; __mptr = (struct drm_crtc const *)crtc; adjusted_mode = (struct drm_display_mode const *)(& (((struct intel_crtc *)__mptr)->config)->base.adjusted_mode); clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; __mptr___0 = (struct drm_crtc const *)crtc; hdisplay = (((struct intel_crtc *)__mptr___0)->config)->pipe_src_w; pixel_size = (((crtc->primary)->state)->fb)->bits_per_pixel / 8; _max1 = (htotal * 1000) / clock; _max2 = 1; line_time_us = (unsigned long )(_max1 > _max2 ? _max1 : _max2); entries = (int )(((unsigned int )(((unsigned long )sr_latency_ns / line_time_us + 1000UL) / 1000UL) * (unsigned int )((unsigned long )pixel_size)) * (unsigned int )((unsigned long )hdisplay)); entries = (entries + 63) / 64; srwm = 512 - entries; if (srwm < 0) { srwm = 1; } else { } srwm = srwm & 511; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i965_update_wm", "self-refresh entries: %d, wm: %d\n", entries, srwm); } else { } entries = (int )(((unsigned int )(((unsigned long )sr_latency_ns / line_time_us + 1000UL) / 1000UL) * (unsigned int )((unsigned long )pixel_size)) * ((crtc->cursor)->state)->crtc_w); entries = (int )((((unsigned long )entries + (unsigned long )i965_cursor_wm_info.cacheline_size) - 1UL) / (unsigned long )i965_cursor_wm_info.cacheline_size); cursor_sr = (int )((unsigned int )i965_cursor_wm_info.fifo_size - ((unsigned int )i965_cursor_wm_info.guard_size + (unsigned int )entries)); if ((unsigned long )cursor_sr > (unsigned long )i965_cursor_wm_info.max_wm) { cursor_sr = (int )i965_cursor_wm_info.max_wm; } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i965_update_wm", "self-refresh watermark: display plane %d cursor %d\n", srwm, cursor_sr); } else { } cxsr_enabled = 1; } else { cxsr_enabled = 0; intel_set_memory_cxsr(dev_priv, 0); } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i965_update_wm", "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", srwm); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458804U), (uint32_t )((srwm << 23) | 526344), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458808U), 2056U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), (uint32_t )(cursor_sr << 24) & 1056964608U, 1); if ((int )cxsr_enabled) { intel_set_memory_cxsr(dev_priv, 1); } else { } return; } } static void i9xx_update_wm(struct drm_crtc *unused_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_watermark_params const *wm_info ; uint32_t fwater_lo ; uint32_t fwater_hi ; int cwm ; int srwm ; int fifo_size ; int planea_wm ; int planeb_wm ; struct drm_crtc *crtc ; struct drm_crtc *enabled ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_display_mode const *adjusted_mode ; int cpp ; struct drm_i915_private *__p___1 ; struct drm_crtc const *__mptr ; unsigned long tmp ; bool tmp___0 ; struct drm_i915_private *__p___2 ; struct drm_display_mode const *adjusted_mode___0 ; int cpp___0 ; struct drm_i915_private *__p___3 ; struct drm_crtc const *__mptr___0 ; unsigned long tmp___1 ; bool tmp___2 ; long tmp___3 ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr___1 ; struct drm_i915_private *__p___4 ; int sr_latency_ns ; struct drm_display_mode const *adjusted_mode___1 ; struct drm_crtc const *__mptr___2 ; int clock ; int htotal ; int hdisplay ; struct drm_crtc const *__mptr___3 ; int pixel_size ; unsigned long line_time_us ; int entries ; int _max1 ; int _max2 ; long tmp___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; long tmp___5 ; { dev = unused_crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; srwm = 1; enabled = (struct drm_crtc *)0; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { wm_info = & i945_wm_info; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { wm_info = & i915_wm_info; } else { wm_info = & i830_a_wm_info; } } fifo_size = (*(dev_priv->display.get_fifo_size))(dev, 0); crtc = intel_get_crtc_for_plane(dev, 0); tmp___0 = intel_crtc_active(crtc); if ((int )tmp___0) { cpp = (((crtc->primary)->state)->fb)->bits_per_pixel / 8; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 2U) { cpp = 4; } else { } __mptr = (struct drm_crtc const *)crtc; adjusted_mode = (struct drm_display_mode const *)(& (((struct intel_crtc *)__mptr)->config)->base.adjusted_mode); tmp = intel_calculate_wm((unsigned long )adjusted_mode->crtc_clock, wm_info, fifo_size, cpp, (unsigned long )pessimal_latency_ns); planea_wm = (int )tmp; enabled = crtc; } else { planea_wm = (int )((unsigned int )fifo_size - (unsigned int )wm_info->guard_size); if ((long )planea_wm > (long )wm_info->max_wm) { planea_wm = (int )wm_info->max_wm; } else { } } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 2U) { wm_info = & i830_bc_wm_info; } else { } fifo_size = (*(dev_priv->display.get_fifo_size))(dev, 1); crtc = intel_get_crtc_for_plane(dev, 1); tmp___2 = intel_crtc_active(crtc); if ((int )tmp___2) { cpp___0 = (((crtc->primary)->state)->fb)->bits_per_pixel / 8; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 2U) { cpp___0 = 4; } else { } __mptr___0 = (struct drm_crtc const *)crtc; adjusted_mode___0 = (struct drm_display_mode const *)(& (((struct intel_crtc *)__mptr___0)->config)->base.adjusted_mode); tmp___1 = intel_calculate_wm((unsigned long )adjusted_mode___0->crtc_clock, wm_info, fifo_size, cpp___0, (unsigned long )pessimal_latency_ns); planeb_wm = (int )tmp___1; if ((unsigned long )enabled == (unsigned long )((struct drm_crtc *)0)) { enabled = crtc; } else { enabled = (struct drm_crtc *)0; } } else { planeb_wm = (int )((unsigned int )fifo_size - (unsigned int )wm_info->guard_size); if ((long )planeb_wm > (long )wm_info->max_wm) { planeb_wm = (int )wm_info->max_wm; } else { } } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("i9xx_update_wm", "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___4->info.device_id) == 9618U && (unsigned long )enabled != (unsigned long )((struct drm_crtc *)0)) { if ((unsigned long )((enabled->primary)->state)->fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___1 = (struct drm_framebuffer const *)((enabled->primary)->state)->fb; obj = ((struct intel_framebuffer *)__mptr___1)->obj; } else { obj = (struct drm_i915_gem_object *)0; } if ((unsigned int )*((unsigned char *)obj + 409UL) == 0U) { enabled = (struct drm_crtc *)0; } else { } } else { } cwm = 2; intel_set_memory_cxsr(dev_priv, 0); __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) > 2U && (unsigned long )enabled != (unsigned long )((struct drm_crtc *)0)) { sr_latency_ns = 6000; __mptr___2 = (struct drm_crtc const *)enabled; adjusted_mode___1 = (struct drm_display_mode const *)(& (((struct intel_crtc *)__mptr___2)->config)->base.adjusted_mode); clock = adjusted_mode___1->crtc_clock; htotal = adjusted_mode___1->crtc_htotal; __mptr___3 = (struct drm_crtc const *)enabled; hdisplay = (((struct intel_crtc *)__mptr___3)->config)->pipe_src_w; pixel_size = (((enabled->primary)->state)->fb)->bits_per_pixel / 8; _max1 = (htotal * 1000) / clock; _max2 = 1; line_time_us = (unsigned long )(_max1 > _max2 ? _max1 : _max2); entries = (int )(((unsigned int )(((unsigned long )sr_latency_ns / line_time_us + 1000UL) / 1000UL) * (unsigned int )((unsigned long )pixel_size)) * (unsigned int )((unsigned long )hdisplay)); entries = (int )((((unsigned long )entries + (unsigned long )wm_info->cacheline_size) - 1UL) / (unsigned long )wm_info->cacheline_size); tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("i9xx_update_wm", "self-refresh entries: %d\n", entries); } else { } srwm = (int )((unsigned int )wm_info->fifo_size - (unsigned int )entries); if (srwm < 0) { srwm = 1; } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___6->info.device_id) == 10098U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8416L, (uint32_t )((srwm & 255) | 65536), 1); } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 44UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8416L, (uint32_t )((srwm & 255) | 65536), 1); } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___5->info.device_id) == 9618U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8416L, (uint32_t )srwm & 63U, 1); } else { } } } } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("i9xx_update_wm", "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", planea_wm, planeb_wm, cwm, srwm); } else { } fwater_lo = (uint32_t )(((planeb_wm & 63) << 16) | (planea_wm & 63)); fwater_hi = (uint32_t )cwm & 31U; fwater_lo = fwater_lo | 16777472U; fwater_hi = fwater_hi | 256U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8408L, fwater_lo, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8412L, fwater_hi, 1); if ((unsigned long )enabled != (unsigned long )((struct drm_crtc *)0)) { intel_set_memory_cxsr(dev_priv, 1); } else { } return; } } static void i845_update_wm(struct drm_crtc *unused_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct drm_display_mode const *adjusted_mode ; uint32_t fwater_lo ; int planea_wm ; struct drm_crtc const *__mptr ; int tmp ; unsigned long tmp___0 ; uint32_t tmp___1 ; long tmp___2 ; { dev = unused_crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = single_enabled_crtc(dev); if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { return; } else { } __mptr = (struct drm_crtc const *)crtc; adjusted_mode = (struct drm_display_mode const *)(& (((struct intel_crtc *)__mptr)->config)->base.adjusted_mode); tmp = (*(dev_priv->display.get_fifo_size))(dev, 0); tmp___0 = intel_calculate_wm((unsigned long )adjusted_mode->crtc_clock, & i845_wm_info, tmp, 4, (unsigned long )pessimal_latency_ns); planea_wm = (int )tmp___0; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8408L, 1); fwater_lo = tmp___1 & 4294963200U; fwater_lo = ((uint32_t )planea_wm | fwater_lo) | 768U; tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("i845_update_wm", "Setting FIFO watermarks - A: %d\n", planea_wm); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8408L, fwater_lo, 1); return; } } static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev , struct drm_crtc *crtc ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; uint32_t pixel_rate ; uint64_t pipe_w ; uint64_t pipe_h ; uint64_t pfit_w ; uint64_t pfit_h ; uint32_t pfit_size ; u64 tmp ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pixel_rate = (uint32_t )(intel_crtc->config)->base.adjusted_mode.crtc_clock; if ((int )(intel_crtc->config)->pch_pfit.enabled) { pfit_size = (intel_crtc->config)->pch_pfit.size; pipe_w = (uint64_t )(intel_crtc->config)->pipe_src_w; pipe_h = (uint64_t )(intel_crtc->config)->pipe_src_h; pfit_w = (uint64_t )(pfit_size >> 16); pfit_h = (uint64_t )pfit_size & 65535ULL; if (pipe_w < pfit_w) { pipe_w = pfit_w; } else { } if (pipe_h < pfit_h) { pipe_h = pfit_h; } else { } tmp = div_u64(((unsigned long long )pixel_rate * pipe_w) * pipe_h, (u32 )pfit_w * (u32 )pfit_h); pixel_rate = (uint32_t )tmp; } else { } return (pixel_rate); } } static uint32_t ilk_wm_method1(uint32_t pixel_rate , uint8_t bytes_per_pixel , uint32_t latency ) { uint64_t ret ; int __ret_warn_on ; long tmp ; long tmp___0 ; unsigned long long _tmp ; uint32_t __base ; uint32_t __rem ; { __ret_warn_on = latency == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 1474, "Latency value missing\n"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (4294967295U); } else { } ret = ((unsigned long long )pixel_rate * (unsigned long long )bytes_per_pixel) * (unsigned long long )latency; _tmp = ret + 639999ULL; __base = 640000U; __rem = (uint32_t )(_tmp % (unsigned long long )__base); _tmp = _tmp / (unsigned long long )__base; ret = _tmp + 2ULL; return ((uint32_t )ret); } } static uint32_t ilk_wm_method2(uint32_t pixel_rate , uint32_t pipe_htotal , uint32_t horiz_pixels , uint8_t bytes_per_pixel , uint32_t latency ) { uint32_t ret ; int __ret_warn_on ; long tmp ; long tmp___0 ; { __ret_warn_on = latency == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 1490, "Latency value missing\n"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (4294967295U); } else { } ret = (latency * pixel_rate) / (pipe_htotal * 10000U); ret = ((ret + 1U) * horiz_pixels) * (uint32_t )bytes_per_pixel; ret = (ret + 63U) / 64U + 2U; return (ret); } } static uint32_t ilk_wm_fbc(uint32_t pri_val , uint32_t horiz_pixels , uint8_t bytes_per_pixel ) { { return (((pri_val * 64U + (uint32_t )bytes_per_pixel * horiz_pixels) - 1U) / ((uint32_t )bytes_per_pixel * horiz_pixels) + 2U); } } static uint32_t ilk_compute_pri_wm(struct ilk_pipe_wm_parameters const *params , uint32_t mem_value , bool is_lp ) { uint32_t method1 ; uint32_t method2 ; uint32_t _min1 ; uint32_t _min2 ; { if (! ((_Bool )params->active) || ! ((_Bool )params->pri.enabled)) { return (0U); } else { } method1 = ilk_wm_method1(params->pixel_rate, (int )params->pri.bytes_per_pixel, mem_value); if (! is_lp) { return (method1); } else { } method2 = ilk_wm_method2(params->pixel_rate, params->pipe_htotal, params->pri.horiz_pixels, (int )params->pri.bytes_per_pixel, mem_value); _min1 = method1; _min2 = method2; return (_min1 < _min2 ? _min1 : _min2); } } static uint32_t ilk_compute_spr_wm(struct ilk_pipe_wm_parameters const *params , uint32_t mem_value ) { uint32_t method1 ; uint32_t method2 ; uint32_t _min1 ; uint32_t _min2 ; { if (! ((_Bool )params->active) || ! ((_Bool )params->spr.enabled)) { return (0U); } else { } method1 = ilk_wm_method1(params->pixel_rate, (int )params->spr.bytes_per_pixel, mem_value); method2 = ilk_wm_method2(params->pixel_rate, params->pipe_htotal, params->spr.horiz_pixels, (int )params->spr.bytes_per_pixel, mem_value); _min1 = method1; _min2 = method2; return (_min1 < _min2 ? _min1 : _min2); } } static uint32_t ilk_compute_cur_wm(struct ilk_pipe_wm_parameters const *params , uint32_t mem_value ) { uint32_t tmp ; { if (! ((_Bool )params->active) || ! ((_Bool )params->cur.enabled)) { return (0U); } else { } tmp = ilk_wm_method2(params->pixel_rate, params->pipe_htotal, params->cur.horiz_pixels, (int )params->cur.bytes_per_pixel, mem_value); return (tmp); } } static uint32_t ilk_compute_fbc_wm(struct ilk_pipe_wm_parameters const *params , uint32_t pri_val ) { uint32_t tmp ; { if (! ((_Bool )params->active) || ! ((_Bool )params->pri.enabled)) { return (0U); } else { } tmp = ilk_wm_fbc(pri_val, params->pri.horiz_pixels, (int )params->pri.bytes_per_pixel); return (tmp); } } static unsigned int ilk_display_fifo_size(struct drm_device const *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p___0 = to_i915(dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { return (3072U); } else { __p = to_i915(dev); if ((unsigned int )((unsigned char )__p->info.gen) > 6U) { return (768U); } else { return (512U); } } } } static unsigned int ilk_plane_wm_reg_max(struct drm_device const *dev , int level , bool is_sprite ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p___0 = to_i915(dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { return (level == 0 ? 255U : 2047U); } else { __p = to_i915(dev); if ((unsigned int )((unsigned char )__p->info.gen) > 6U) { return (level == 0 ? 127U : 1023U); } else if (! is_sprite) { return (level == 0 ? 127U : 511U); } else { return (level == 0 ? 63U : 255U); } } } } static unsigned int ilk_cursor_wm_reg_max(struct drm_device const *dev , int level ) { struct drm_i915_private *__p ; { __p = to_i915(dev); if ((unsigned int )((unsigned char )__p->info.gen) > 6U) { return (level == 0 ? 63U : 255U); } else { return (level == 0 ? 31U : 63U); } } } static unsigned int ilk_fbc_wm_reg_max(struct drm_device const *dev ) { struct drm_i915_private *__p ; { __p = to_i915(dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { return (31U); } else { return (15U); } } } static unsigned int ilk_plane_wm_max(struct drm_device const *dev , int level , struct intel_wm_config const *config , enum intel_ddb_partitioning ddb_partitioning , bool is_sprite ) { unsigned int fifo_size ; unsigned int tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; unsigned int _min1 ; unsigned int _min2 ; unsigned int tmp___0 ; { tmp = ilk_display_fifo_size(dev); fifo_size = tmp; if ((int )is_sprite && ! ((_Bool )config->sprites_enabled)) { return (0U); } else { } if (level == 0 || (unsigned int )config->num_pipes_active > 1U) { __p = to_i915(dev); fifo_size = fifo_size / (unsigned int )__p->info.num_pipes; __p___0 = to_i915(dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 6U) { fifo_size = fifo_size / 2U; } else { } } else { } if ((int )config->sprites_enabled) { if (level > 0 && (unsigned int )ddb_partitioning == 1U) { if ((int )is_sprite) { fifo_size = fifo_size * 5U; } else { } fifo_size = fifo_size / 6U; } else { fifo_size = fifo_size / 2U; } } else { } _min1 = fifo_size; tmp___0 = ilk_plane_wm_reg_max(dev, level, (int )is_sprite); _min2 = tmp___0; return (_min1 < _min2 ? _min1 : _min2); } } static unsigned int ilk_cursor_wm_max(struct drm_device const *dev , int level , struct intel_wm_config const *config ) { unsigned int tmp ; { if (level > 0 && (unsigned int )config->num_pipes_active > 1U) { return (64U); } else { } tmp = ilk_cursor_wm_reg_max(dev, level); return (tmp); } } static void ilk_compute_wm_maximums(struct drm_device const *dev , int level , struct intel_wm_config const *config , enum intel_ddb_partitioning ddb_partitioning , struct ilk_wm_maximums *max ) { unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; { tmp = ilk_plane_wm_max(dev, level, config, ddb_partitioning, 0); max->pri = (uint16_t )tmp; tmp___0 = ilk_plane_wm_max(dev, level, config, ddb_partitioning, 1); max->spr = (uint16_t )tmp___0; tmp___1 = ilk_cursor_wm_max(dev, level, config); max->cur = (uint16_t )tmp___1; tmp___2 = ilk_fbc_wm_reg_max(dev); max->fbc = (uint16_t )tmp___2; return; } } static void ilk_compute_wm_reg_maximums(struct drm_device *dev , int level , struct ilk_wm_maximums *max ) { unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; { tmp = ilk_plane_wm_reg_max((struct drm_device const *)dev, level, 0); max->pri = (uint16_t )tmp; tmp___0 = ilk_plane_wm_reg_max((struct drm_device const *)dev, level, 1); max->spr = (uint16_t )tmp___0; tmp___1 = ilk_cursor_wm_reg_max((struct drm_device const *)dev, level); max->cur = (uint16_t )tmp___1; tmp___2 = ilk_fbc_wm_reg_max((struct drm_device const *)dev); max->fbc = (uint16_t )tmp___2; return; } } static bool ilk_validate_wm_level(int level , struct ilk_wm_maximums const *max , struct intel_wm_level *result ) { bool ret ; long tmp ; long tmp___0 ; long tmp___1 ; uint32_t __min1 ; uint32_t __min2 ; uint32_t __min1___0 ; uint32_t __min2___0 ; uint32_t __min1___1 ; uint32_t __min2___1 ; { if (! result->enable) { return (0); } else { } result->enable = (bool )((result->pri_val <= (uint32_t )max->pri && result->spr_val <= (uint32_t )max->spr) && result->cur_val <= (uint32_t )max->cur); ret = result->enable; if (level == 0 && ! result->enable) { if (result->pri_val > (uint32_t )max->pri) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ilk_validate_wm_level", "Primary WM%d too large %u (max %u)\n", level, result->pri_val, (int )max->pri); } else { } } else { } if (result->spr_val > (uint32_t )max->spr) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ilk_validate_wm_level", "Sprite WM%d too large %u (max %u)\n", level, result->spr_val, (int )max->spr); } else { } } else { } if (result->cur_val > (uint32_t )max->cur) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ilk_validate_wm_level", "Cursor WM%d too large %u (max %u)\n", level, result->cur_val, (int )max->cur); } else { } } else { } __min1 = result->pri_val; __min2 = (uint32_t )max->pri; result->pri_val = __min1 < __min2 ? __min1 : __min2; __min1___0 = result->spr_val; __min2___0 = (uint32_t )max->spr; result->spr_val = __min1___0 < __min2___0 ? __min1___0 : __min2___0; __min1___1 = result->cur_val; __min2___1 = (uint32_t )max->cur; result->cur_val = __min1___1 < __min2___1 ? __min1___1 : __min2___1; result->enable = 1; } else { } return (ret); } } static void ilk_compute_wm_level(struct drm_i915_private const *dev_priv , int level , struct ilk_pipe_wm_parameters const *p , struct intel_wm_level *result ) { uint16_t pri_latency ; uint16_t spr_latency ; uint16_t cur_latency ; { pri_latency = dev_priv->wm.pri_latency[level]; spr_latency = dev_priv->wm.spr_latency[level]; cur_latency = dev_priv->wm.cur_latency[level]; if (level > 0) { pri_latency = (unsigned int )pri_latency * 5U; spr_latency = (unsigned int )spr_latency * 5U; cur_latency = (unsigned int )cur_latency * 5U; } else { } result->pri_val = ilk_compute_pri_wm(p, (uint32_t )pri_latency, level != 0); result->spr_val = ilk_compute_spr_wm(p, (uint32_t )spr_latency); result->cur_val = ilk_compute_cur_wm(p, (uint32_t )cur_latency); result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val); result->enable = 1; return; } } static uint32_t hsw_compute_linetime_wm(struct drm_device *dev , struct drm_crtc *crtc ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode *mode ; u32 linetime ; u32 ips_linetime ; int __x ; int __d ; int __x___0 ; int __d___0 ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; mode = & (intel_crtc->config)->base.adjusted_mode; if (! intel_crtc->active) { return (0U); } else { } __x = mode->crtc_htotal * 8000; __d = mode->crtc_clock; linetime = (u32 )(__x > 0 ? (__d / 2 + __x) / __d : (__x - __d / 2) / __d); __x___0 = mode->crtc_htotal * 8000; tmp = (*(dev_priv->display.get_display_clock_speed))(dev_priv->dev); __d___0 = tmp; ips_linetime = (u32 )(__x___0 > 0 ? (__d___0 / 2 + __x___0) / __d___0 : (__x___0 - __d___0 / 2) / __d___0); return ((ips_linetime << 16) | linetime); } } static void intel_read_wm_latency(struct drm_device *dev , uint16_t *wm ) { struct drm_i915_private *dev_priv ; uint32_t val ; int ret ; int i ; int level ; int max_level ; int tmp ; uint64_t sskpd ; uint64_t tmp___0 ; uint32_t sskpd___0 ; uint32_t tmp___1 ; uint32_t mltr ; uint32_t tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 9U) { tmp = ilk_wm_max_level((struct drm_device const *)dev); max_level = tmp; val = 0U; mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); ret = sandybridge_pcode_read(dev_priv, 6U, & val); mutex_unlock(& dev_priv->rps.hw_lock); if (ret != 0) { drm_err("SKL Mailbox read error = %d\n", ret); return; } else { } *wm = (unsigned int )((uint16_t )val) & 255U; *(wm + 1UL) = (unsigned int )((uint16_t )(val >> 8)) & 255U; *(wm + 2UL) = (unsigned int )((uint16_t )(val >> 16)) & 255U; *(wm + 3UL) = (uint16_t )(val >> 24); val = 1U; mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); ret = sandybridge_pcode_read(dev_priv, 6U, & val); mutex_unlock(& dev_priv->rps.hw_lock); if (ret != 0) { drm_err("SKL Mailbox read error = %d\n", ret); return; } else { } *(wm + 4UL) = (unsigned int )((uint16_t )val) & 255U; *(wm + 5UL) = (unsigned int )((uint16_t )(val >> 8)) & 255U; *(wm + 6UL) = (unsigned int )((uint16_t )(val >> 16)) & 255U; *(wm + 7UL) = (uint16_t )(val >> 24); *wm = (unsigned int )*wm + 2U; level = 1; goto ldv_49307; ldv_49306: ; if ((unsigned int )*(wm + (unsigned long )level) != 0U) { *(wm + (unsigned long )level) = (unsigned int )*(wm + (unsigned long )level) + 2U; } else { i = level + 1; goto ldv_49303; ldv_49302: *(wm + (unsigned long )i) = 0U; i = i + 1; ldv_49303: ; if (i <= max_level) { goto ldv_49302; } else { } goto ldv_49305; } level = level + 1; ldv_49307: ; if (level <= max_level) { goto ldv_49306; } else { } ldv_49305: ; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { _L: /* CIL Label */ tmp___0 = (*(dev_priv->uncore.funcs.mmio_readq))(dev_priv, 1334544L, 1); sskpd = tmp___0; *wm = (uint16_t )(sskpd >> 56); if ((unsigned int )*wm == 0U) { *wm = (unsigned int )((uint16_t )sskpd) & 15U; } else { } *(wm + 1UL) = (unsigned int )((uint16_t )(sskpd >> 4)) & 255U; *(wm + 2UL) = (unsigned int )((uint16_t )(sskpd >> 12)) & 255U; *(wm + 3UL) = (unsigned int )((uint16_t )(sskpd >> 20)) & 511U; *(wm + 4UL) = (unsigned int )((uint16_t )(sskpd >> 32)) & 511U; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 5U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1334544L, 1); sskpd___0 = tmp___1; *wm = (unsigned int )((uint16_t )sskpd___0) & 63U; *(wm + 1UL) = (unsigned int )((uint16_t )(sskpd___0 >> 8)) & 63U; *(wm + 2UL) = (unsigned int )((uint16_t )(sskpd___0 >> 16)) & 63U; *(wm + 3UL) = (unsigned int )((uint16_t )(sskpd___0 >> 24)) & 63U; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70178L, 1); mltr = tmp___2; *wm = 7U; *(wm + 1UL) = (unsigned int )((uint16_t )mltr) & 63U; *(wm + 2UL) = (unsigned int )((uint16_t )(mltr >> 8)) & 63U; } else { } } } } } return; } } static void intel_fixup_spr_wm_latency(struct drm_device *dev , uint16_t *wm ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 5U) { *wm = 13U; } else { } return; } } static void intel_fixup_cur_wm_latency(struct drm_device *dev , uint16_t *wm ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 5U) { *wm = 13U; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { *(wm + 3UL) = (unsigned int )*(wm + 3UL) * 2U; } else { } return; } } int ilk_wm_max_level(struct drm_device const *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { __p___3 = to_i915(dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 8U) { return (7); } else { __p___0 = to_i915(dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { return (4); } else { __p___1 = to_i915(dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915(dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { return (4); } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915(dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { return (3); } else { return (2); } } } } } } static void intel_print_wm_latency(struct drm_device *dev , char const *name , uint16_t const *wm ) { int level ; int max_level ; int tmp ; unsigned int latency ; struct drm_i915_private *__p ; long tmp___0 ; { tmp = ilk_wm_max_level((struct drm_device const *)dev); max_level = tmp; level = 0; goto ldv_49417; ldv_49416: latency = (unsigned int )*(wm + (unsigned long )level); if (latency == 0U) { drm_err("%s WM%d latency not provided\n", name, level); goto ldv_49408; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 9U) { latency = latency * 10U; } else if (level > 0) { latency = latency * 5U; } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_print_wm_latency", "%s WM%d latency %u (%u.%u usec)\n", name, level, (int )*(wm + (unsigned long )level), latency / 10U, latency % 10U); } else { } ldv_49408: level = level + 1; ldv_49417: ; if (level <= max_level) { goto ldv_49416; } else { } return; } } static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv , uint16_t *wm , uint16_t min ) { int level ; int max_level ; int tmp ; uint16_t _max1 ; uint16_t _max2 ; uint16_t __max1 ; uint16_t __max2 ; { tmp = ilk_wm_max_level((struct drm_device const *)dev_priv->dev); max_level = tmp; if ((int )*wm >= (int )min) { return (0); } else { } _max1 = *wm; _max2 = min; *wm = (uint16_t )((int )_max1 > (int )_max2 ? _max1 : _max2); level = 1; goto ldv_49433; ldv_49432: __max1 = *(wm + (unsigned long )level); __max2 = (uint16_t )(((int )min + 4) / 5); *(wm + (unsigned long )level) = (uint16_t )((int )__max1 > (int )__max2 ? __max1 : __max2); level = level + 1; ldv_49433: ; if (level <= max_level) { goto ldv_49432; } else { } return (1); } } static void snb_wm_latency_quirk(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; bool changed ; bool tmp ; bool tmp___0 ; bool tmp___1 ; long tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = ilk_increase_wm_latency(dev_priv, (uint16_t *)(& dev_priv->wm.pri_latency), 12); tmp___0 = ilk_increase_wm_latency(dev_priv, (uint16_t *)(& dev_priv->wm.spr_latency), 12); tmp___1 = ilk_increase_wm_latency(dev_priv, (uint16_t *)(& dev_priv->wm.cur_latency), 12); changed = (((int )tmp | (int )tmp___0) | (int )tmp___1) != 0; if (! changed) { return; } else { } tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("snb_wm_latency_quirk", "WM latency values increased to avoid potential underruns\n"); } else { } intel_print_wm_latency(dev, "Primary", (uint16_t const *)(& dev_priv->wm.pri_latency)); intel_print_wm_latency(dev, "Sprite", (uint16_t const *)(& dev_priv->wm.spr_latency)); intel_print_wm_latency(dev, "Cursor", (uint16_t const *)(& dev_priv->wm.cur_latency)); return; } } static void ilk_setup_wm_latency(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; intel_read_wm_latency(dev, (uint16_t *)(& dev_priv->wm.pri_latency)); memcpy((void *)(& dev_priv->wm.spr_latency), (void const *)(& dev_priv->wm.pri_latency), 10UL); memcpy((void *)(& dev_priv->wm.cur_latency), (void const *)(& dev_priv->wm.pri_latency), 10UL); intel_fixup_spr_wm_latency(dev, (uint16_t *)(& dev_priv->wm.spr_latency)); intel_fixup_cur_wm_latency(dev, (uint16_t *)(& dev_priv->wm.cur_latency)); intel_print_wm_latency(dev, "Primary", (uint16_t const *)(& dev_priv->wm.pri_latency)); intel_print_wm_latency(dev, "Sprite", (uint16_t const *)(& dev_priv->wm.spr_latency)); intel_print_wm_latency(dev, "Cursor", (uint16_t const *)(& dev_priv->wm.cur_latency)); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { snb_wm_latency_quirk(dev); } else { } return; } } static void skl_setup_wm_latency(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; intel_read_wm_latency(dev, (uint16_t *)(& dev_priv->wm.skl_latency)); intel_print_wm_latency(dev, "Gen9 Plane", (uint16_t const *)(& dev_priv->wm.skl_latency)); return; } } static void ilk_compute_wm_parameters(struct drm_crtc *crtc , struct ilk_pipe_wm_parameters *p ) { struct drm_device *dev ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; struct drm_plane *plane ; struct list_head const *__mptr___0 ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr___1 ; struct list_head const *__mptr___2 ; { dev = crtc->dev; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; if (! intel_crtc->active) { return; } else { } p->active = 1; p->pipe_htotal = (uint32_t )(intel_crtc->config)->base.adjusted_mode.crtc_htotal; p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); if ((unsigned long )((crtc->primary)->state)->fb != (unsigned long )((struct drm_framebuffer *)0)) { p->pri.bytes_per_pixel = (uint8_t )((((crtc->primary)->state)->fb)->bits_per_pixel / 8); } else { p->pri.bytes_per_pixel = 4U; } p->cur.bytes_per_pixel = 4U; p->pri.enabled = 1; p->cur.enabled = 1; p->pri.horiz_pixels = (uint32_t )(intel_crtc->config)->pipe_src_w; p->cur.horiz_pixels = ((intel_crtc->base.cursor)->state)->crtc_w; __mptr___0 = (struct list_head const *)dev->mode_config.plane_list.next; plane = (struct drm_plane *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_49474; ldv_49473: ; if ((unsigned int )plane->type == 0U) { __mptr___1 = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr___1; if ((int )intel_plane->pipe == (int )pipe) { p->spr = intel_plane->wm; goto ldv_49472; } else { } } else { } __mptr___2 = (struct list_head const *)plane->head.next; plane = (struct drm_plane *)__mptr___2 + 0xfffffffffffffff8UL; ldv_49474: ; if ((unsigned long )(& plane->head) != (unsigned long )(& dev->mode_config.plane_list)) { goto ldv_49473; } else { } ldv_49472: ; return; } } static void ilk_compute_wm_config(struct drm_device *dev , struct intel_wm_config *config ) { struct intel_crtc *intel_crtc ; struct list_head const *__mptr ; struct intel_pipe_wm const *wm ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; intel_crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_49487; ldv_49486: wm = (struct intel_pipe_wm const *)(& intel_crtc->wm.active); if (! ((_Bool )wm->pipe_enabled)) { goto ldv_49485; } else { } config->sprites_enabled = ((int )config->sprites_enabled | (int )wm->sprites_enabled) != 0; config->sprites_scaled = ((int )config->sprites_scaled | (int )wm->sprites_scaled) != 0; config->num_pipes_active = config->num_pipes_active + 1U; ldv_49485: __mptr___0 = (struct list_head const *)intel_crtc->base.head.next; intel_crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_49487: ; if ((unsigned long )(& intel_crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_49486; } else { } return; } } static bool intel_compute_pipe_wm(struct drm_crtc *crtc , struct ilk_pipe_wm_parameters const *params , struct intel_pipe_wm *pipe_wm ) { struct drm_device *dev ; struct drm_i915_private const *dev_priv ; int level ; int max_level ; int tmp ; struct intel_wm_config config ; struct ilk_wm_maximums max ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; bool tmp___0 ; int tmp___1 ; struct intel_wm_level wm ; bool tmp___2 ; int tmp___3 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private const *)dev->dev_private; tmp = ilk_wm_max_level((struct drm_device const *)dev); max_level = tmp; config.num_pipes_active = 1U; config.sprites_enabled = params->spr.enabled; config.sprites_scaled = params->spr.scaled; pipe_wm->pipe_enabled = params->active; pipe_wm->sprites_enabled = params->spr.enabled; pipe_wm->sprites_scaled = params->spr.scaled; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 6U && (int )params->spr.enabled) { max_level = 1; } else { } if ((int )params->spr.scaled) { max_level = 0; } else { } ilk_compute_wm_level(dev_priv, 0, params, (struct intel_wm_level *)(& pipe_wm->wm)); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); } else { } } else { } } ilk_compute_wm_maximums((struct drm_device const *)dev, 0, (struct intel_wm_config const *)(& config), 0, & max); tmp___0 = ilk_validate_wm_level(0, (struct ilk_wm_maximums const *)(& max), (struct intel_wm_level *)(& pipe_wm->wm)); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } ilk_compute_wm_reg_maximums(dev, 1, & max); level = 1; goto ldv_49527; ldv_49526: wm.enable = (_Bool)0; wm.pri_val = 0U; wm.spr_val = 0U; wm.cur_val = 0U; wm.fbc_val = 0U; ilk_compute_wm_level(dev_priv, level, params, & wm); tmp___2 = ilk_validate_wm_level(level, (struct ilk_wm_maximums const *)(& max), & wm); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { goto ldv_49525; } else { } pipe_wm->wm[level] = wm; level = level + 1; ldv_49527: ; if (level <= max_level) { goto ldv_49526; } else { } ldv_49525: ; return (1); } } static void ilk_merge_wm_level(struct drm_device *dev , int level , struct intel_wm_level *ret_wm ) { struct intel_crtc const *intel_crtc ; struct list_head const *__mptr ; struct intel_pipe_wm const *active ; struct intel_wm_level const *wm ; uint32_t _max1 ; unsigned int _max2 ; uint32_t _max1___0 ; unsigned int _max2___0 ; uint32_t _max1___1 ; unsigned int _max2___1 ; uint32_t _max1___2 ; unsigned int _max2___2 ; struct list_head const *__mptr___0 ; { ret_wm->enable = 1; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; intel_crtc = (struct intel_crtc const *)__mptr + 0xfffffffffffffff0UL; goto ldv_49554; ldv_49553: active = & intel_crtc->wm.active; wm = (struct intel_wm_level const *)(& active->wm) + (unsigned long )level; if (! ((_Bool )active->pipe_enabled)) { goto ldv_49540; } else { } if (! ((_Bool )wm->enable)) { ret_wm->enable = 0; } else { } _max1 = ret_wm->pri_val; _max2 = wm->pri_val; ret_wm->pri_val = (unsigned int const )_max1 > (unsigned int const )_max2 ? (unsigned int const )_max1 : _max2; _max1___0 = ret_wm->spr_val; _max2___0 = wm->spr_val; ret_wm->spr_val = (unsigned int const )_max1___0 > (unsigned int const )_max2___0 ? (unsigned int const )_max1___0 : _max2___0; _max1___1 = ret_wm->cur_val; _max2___1 = wm->cur_val; ret_wm->cur_val = (unsigned int const )_max1___1 > (unsigned int const )_max2___1 ? (unsigned int const )_max1___1 : _max2___1; _max1___2 = ret_wm->fbc_val; _max2___2 = wm->fbc_val; ret_wm->fbc_val = (unsigned int const )_max1___2 > (unsigned int const )_max2___2 ? (unsigned int const )_max1___2 : _max2___2; ldv_49540: __mptr___0 = (struct list_head const *)intel_crtc->base.head.next; intel_crtc = (struct intel_crtc const *)__mptr___0 + 0xfffffffffffffff0UL; ldv_49554: ; if ((unsigned long )(& intel_crtc->base.head) != (unsigned long )((struct list_head const *)(& dev->mode_config.crtc_list))) { goto ldv_49553; } else { } return; } } static void ilk_wm_merge(struct drm_device *dev , struct intel_wm_config const *config , struct ilk_wm_maximums const *max , struct intel_pipe_wm *merged ) { int level ; int max_level ; int tmp ; int last_enabled_level ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct intel_wm_level *wm ; bool tmp___0 ; int tmp___1 ; struct intel_wm_level *wm___0 ; struct drm_i915_private *__p___2 ; bool tmp___2 ; { tmp = ilk_wm_max_level((struct drm_device const *)dev); max_level = tmp; last_enabled_level = max_level; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 6U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { _L: /* CIL Label */ if ((unsigned int )config->num_pipes_active > 1U) { return; } else { } } else { } } __p___1 = to_i915((struct drm_device const *)dev); merged->fbc_wm_enabled = (unsigned int )((unsigned char )__p___1->info.gen) > 5U; level = 1; goto ldv_49585; ldv_49584: wm = (struct intel_wm_level *)(& merged->wm) + (unsigned long )level; ilk_merge_wm_level(dev, level, wm); if (level > last_enabled_level) { wm->enable = 0; } else { tmp___0 = ilk_validate_wm_level(level, max, wm); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { last_enabled_level = level + -1; } else { } } if (wm->fbc_val > (uint32_t )max->fbc) { if ((int )wm->enable) { merged->fbc_wm_enabled = 0; } else { } wm->fbc_val = 0U; } else { } level = level + 1; ldv_49585: ; if (level <= max_level) { goto ldv_49584; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 5U && ! merged->fbc_wm_enabled) { tmp___2 = intel_fbc_enabled(dev); if ((int )tmp___2) { level = 2; goto ldv_49595; ldv_49594: wm___0 = (struct intel_wm_level *)(& merged->wm) + (unsigned long )level; wm___0->enable = 0; level = level + 1; ldv_49595: ; if (level <= max_level) { goto ldv_49594; } else { } } else { } } else { } return; } } static int ilk_wm_lp_to_level(int wm_lp , struct intel_pipe_wm const *pipe_wm ) { { return ((wm_lp > 1 && (int )pipe_wm->wm[4].enable) + wm_lp); } } static unsigned int ilk_wm_lp_latency(struct drm_device *dev , int level ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { return ((unsigned int )(level * 2)); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { return ((unsigned int )(level * 2)); } else { return ((unsigned int )dev_priv->wm.pri_latency[level]); } } else { return ((unsigned int )dev_priv->wm.pri_latency[level]); } } } } static void ilk_compute_wm_results(struct drm_device *dev , struct intel_pipe_wm const *merged , enum intel_ddb_partitioning partitioning , struct ilk_wm_values *results ) { struct intel_crtc *intel_crtc ; int level ; int wm_lp ; struct intel_wm_level const *r ; unsigned int tmp ; struct drm_i915_private *__p ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p___0 ; struct list_head const *__mptr ; enum pipe pipe ; struct intel_wm_level const *r___0 ; int __ret_warn_on___0 ; long tmp___1 ; long tmp___2 ; struct list_head const *__mptr___0 ; { results->enable_fbc_wm = merged->fbc_wm_enabled; results->partitioning = partitioning; wm_lp = 1; goto ldv_49649; ldv_49648: level = ilk_wm_lp_to_level(wm_lp, merged); r = (struct intel_wm_level const *)(& merged->wm) + (unsigned long )level; tmp = ilk_wm_lp_latency(dev, level); results->wm_lp[wm_lp + -1] = ((tmp << 24) | (unsigned int )(r->pri_val << 8)) | (unsigned int )r->cur_val; if ((int )r->enable) { results->wm_lp[wm_lp + -1] = results->wm_lp[wm_lp + -1] | 2147483648U; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { results->wm_lp[wm_lp + -1] = results->wm_lp[wm_lp + -1] | (uint32_t )(r->fbc_val << 19); } else { results->wm_lp[wm_lp + -1] = results->wm_lp[wm_lp + -1] | (uint32_t )(r->fbc_val << 20); } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 6U && (unsigned int )r->spr_val != 0U) { __ret_warn_on = wm_lp != 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 2327, "WARN_ON(wm_lp != 1)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); results->wm_lp_spr[wm_lp + -1] = (unsigned int )r->spr_val | 2147483648U; } else { results->wm_lp_spr[wm_lp + -1] = r->spr_val; } wm_lp = wm_lp + 1; ldv_49649: ; if (wm_lp <= 3) { goto ldv_49648; } else { } __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; intel_crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_49661; ldv_49660: pipe = intel_crtc->pipe; r___0 = (struct intel_wm_level const *)(& intel_crtc->wm.active.wm); __ret_warn_on___0 = ! ((_Bool )r___0->enable); tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 2339, "WARN_ON(!r->enable)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { goto ldv_49659; } else { } results->wm_linetime[(int )pipe] = intel_crtc->wm.active.linetime; results->wm_pipe[(int )pipe] = ((unsigned int )(r___0->pri_val << 16) | (unsigned int )(r___0->spr_val << 8)) | (unsigned int )r___0->cur_val; ldv_49659: __mptr___0 = (struct list_head const *)intel_crtc->base.head.next; intel_crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_49661: ; if ((unsigned long )(& intel_crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_49660; } else { } return; } } static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev , struct intel_pipe_wm *r1 , struct intel_pipe_wm *r2 ) { int level ; int max_level ; int tmp ; int level1 ; int level2 ; { tmp = ilk_wm_max_level((struct drm_device const *)dev); max_level = tmp; level1 = 0; level2 = 0; level = 1; goto ldv_49673; ldv_49672: ; if ((int )r1->wm[level].enable) { level1 = level; } else { } if ((int )r2->wm[level].enable) { level2 = level; } else { } level = level + 1; ldv_49673: ; if (level <= max_level) { goto ldv_49672; } else { } if (level1 == level2) { if ((int )r2->fbc_wm_enabled && ! r1->fbc_wm_enabled) { return (r2); } else { return (r1); } } else if (level1 > level2) { return (r1); } else { return (r2); } } } static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv , struct ilk_wm_values const *old , struct ilk_wm_values const *new ) { unsigned int dirty ; enum pipe pipe ; int wm_lp ; struct drm_i915_private *__p ; { dirty = 0U; pipe = 0; goto ldv_49690; ldv_49689: ; if (old->wm_linetime[(int )pipe] != new->wm_linetime[(int )pipe]) { dirty = (unsigned int )(1 << ((int )pipe + 8)) | dirty; dirty = dirty | 458752U; } else { } if (old->wm_pipe[(int )pipe] != new->wm_pipe[(int )pipe]) { dirty = (unsigned int )(1 << (int )pipe) | dirty; dirty = dirty | 458752U; } else { } pipe = (enum pipe )((int )pipe + 1); ldv_49690: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_49689; } else { } if ((int const )old->enable_fbc_wm != (int const )new->enable_fbc_wm) { dirty = dirty | 16777216U; dirty = dirty | 458752U; } else { } if ((unsigned int )old->partitioning != (unsigned int )new->partitioning) { dirty = dirty | 33554432U; dirty = dirty | 458752U; } else { } if ((dirty & 458752U) != 0U) { return (dirty); } else { } wm_lp = 1; goto ldv_49694; ldv_49693: ; if (old->wm_lp[wm_lp + -1] != new->wm_lp[wm_lp + -1] || old->wm_lp_spr[wm_lp + -1] != new->wm_lp_spr[wm_lp + -1]) { goto ldv_49692; } else { } wm_lp = wm_lp + 1; ldv_49694: ; if (wm_lp <= 3) { goto ldv_49693; } else { } ldv_49692: ; goto ldv_49696; ldv_49695: dirty = (unsigned int )(1 << (wm_lp + 15)) | dirty; wm_lp = wm_lp + 1; ldv_49696: ; if (wm_lp <= 3) { goto ldv_49695; } else { } return (dirty); } } static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv , unsigned int dirty ) { struct ilk_wm_values *previous ; bool changed ; { previous = & dev_priv->wm.__annonCompField83.hw; changed = 0; if ((dirty & 262144U) != 0U && (int )previous->wm_lp[2] < 0) { previous->wm_lp[2] = previous->wm_lp[2] & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282896L, previous->wm_lp[2], 1); changed = 1; } else { } if ((dirty & 131072U) != 0U && (int )previous->wm_lp[1] < 0) { previous->wm_lp[1] = previous->wm_lp[1] & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282892L, previous->wm_lp[1], 1); changed = 1; } else { } if ((dirty & 65536U) != 0U && (int )previous->wm_lp[0] < 0) { previous->wm_lp[0] = previous->wm_lp[0] & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282888L, previous->wm_lp[0], 1); changed = 1; } else { } return (changed); } } static void ilk_write_wm_values(struct drm_i915_private *dev_priv , struct ilk_wm_values *results ) { struct drm_device *dev ; struct ilk_wm_values *previous ; unsigned int dirty ; uint32_t val ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = dev_priv->dev; previous = & dev_priv->wm.__annonCompField83.hw; dirty = ilk_compute_wm_dirty(dev_priv, (struct ilk_wm_values const *)previous, (struct ilk_wm_values const *)results); if (dirty == 0U) { return; } else { } _ilk_disable_lp_wm(dev_priv, dirty); if ((int )dirty & 1) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282880L, results->wm_pipe[0], 1); } else { } if ((dirty & 2U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282884L, results->wm_pipe[1], 1); } else { } if ((dirty & 4U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283136L, results->wm_pipe[2], 1); } else { } if ((dirty & 256U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283248L, results->wm_linetime[0], 1); } else { } if ((dirty & 512U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283252L, results->wm_linetime[1], 1); } else { } if ((dirty & 1024U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283256L, results->wm_linetime[2], 1); } else { } if ((dirty & 33554432U) != 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { _L: /* CIL Label */ val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283232L, 1); if ((unsigned int )results->partitioning == 0U) { val = val & 4294967294U; } else { val = val | 1U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283232L, val, 1); } else { goto _L___0; } } else { _L___0: /* CIL Label */ val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282628L, 1); if ((unsigned int )results->partitioning == 0U) { val = val & 4294967231U; } else { val = val | 64U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282628L, val, 1); } } } else { } if ((dirty & 16777216U) != 0U) { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282624L, 1); if ((int )results->enable_fbc_wm) { val = val & 4294934527U; } else { val = val | 32768U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282624L, val, 1); } else { } if ((dirty & 65536U) != 0U && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282912L, results->wm_lp_spr[0], 1); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 6U) { if ((dirty & 131072U) != 0U && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282916L, results->wm_lp_spr[1], 1); } else { } if ((dirty & 262144U) != 0U && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282920L, results->wm_lp_spr[2], 1); } else { } } else { } if ((dirty & 65536U) != 0U && previous->wm_lp[0] != results->wm_lp[0]) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282888L, results->wm_lp[0], 1); } else { } if ((dirty & 131072U) != 0U && previous->wm_lp[1] != results->wm_lp[1]) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282892L, results->wm_lp[1], 1); } else { } if ((dirty & 262144U) != 0U && previous->wm_lp[2] != results->wm_lp[2]) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282896L, results->wm_lp[2], 1); } else { } dev_priv->wm.__annonCompField83.hw = *results; return; } } static bool ilk_disable_lp_wm(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; bool tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = _ilk_disable_lp_wm(dev_priv, 458752U); return (tmp); } } static void skl_ddb_get_pipe_allocation_limits(struct drm_device *dev , struct drm_crtc *for_crtc , struct intel_wm_config const *config , struct skl_pipe_wm_parameters const *params , struct skl_ddb_entry *alloc ) { struct drm_crtc *crtc ; unsigned int pipe_size ; unsigned int ddb_size ; int nth_active_pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct list_head const *__mptr ; struct drm_crtc const *__mptr___0 ; int tmp ; struct list_head const *__mptr___1 ; { if (! ((_Bool )params->active)) { alloc->start = 0U; alloc->end = 0U; return; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { ddb_size = 512U; } else { ddb_size = 896U; } } else { ddb_size = 896U; } ddb_size = ddb_size - 4U; nth_active_pipe = 0; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_49772; ldv_49771: __mptr___0 = (struct drm_crtc const *)crtc; if (((struct intel_crtc *)__mptr___0)->active) { tmp = 0; } else { tmp = 1; } if (tmp) { goto ldv_49769; } else { } if ((unsigned long )crtc == (unsigned long )for_crtc) { goto ldv_49770; } else { } nth_active_pipe = nth_active_pipe + 1; ldv_49769: __mptr___1 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___1 + 0xfffffffffffffff0UL; ldv_49772: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_49771; } else { } ldv_49770: pipe_size = ddb_size / (unsigned int )config->num_pipes_active; alloc->start = (uint16_t )(((unsigned int )nth_active_pipe * ddb_size) / (unsigned int )config->num_pipes_active); alloc->end = (int )alloc->start + (int )((uint16_t )pipe_size); return; } } static unsigned int skl_cursor_allocation(struct intel_wm_config const *config ) { { if ((unsigned int )config->num_pipes_active == 1U) { return (32U); } else { } return (8U); } } static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry , u32 reg ) { { entry->start = (unsigned int )((uint16_t )reg) & 1023U; entry->end = (unsigned int )((uint16_t )(reg >> 16)) & 1023U; if ((unsigned int )entry->end != 0U) { entry->end = (unsigned int )entry->end + 1U; } else { } return; } } void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv , struct skl_ddb_allocation *ddb ) { enum pipe pipe ; int plane ; u32 val ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { pipe = 0; goto ldv_49803; ldv_49802: plane = 0; goto ldv_49800; ldv_49799: val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe * 4096 + (((int )pipe * 4096 + 459644) + ((int )pipe * -4096 + -459388)) * plane) + 459388), 1); skl_ddb_entry_init_from_hw((struct skl_ddb_entry *)(& ddb->plane) + ((unsigned long )pipe + (unsigned long )plane), val); plane = plane + 1; ldv_49800: __p = dev_priv; if ((int )__p->info.num_sprites[(int )pipe] + 1 > plane) { goto ldv_49799; } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 459132), 1); skl_ddb_entry_init_from_hw((struct skl_ddb_entry *)(& ddb->cursor) + (unsigned long )pipe, val); pipe = (enum pipe )((int )pipe + 1); ldv_49803: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > (int )pipe) { goto ldv_49802; } else { } return; } } static unsigned int skl_plane_relative_data_rate(struct intel_plane_wm_parameters const *p , int y ) { { if ((unsigned int )((unsigned char )p->y_bytes_per_pixel) != 0U) { if (y != 0) { return (((unsigned int )p->horiz_pixels * (unsigned int )p->vert_pixels) * (unsigned int )p->y_bytes_per_pixel); } else { return ((((unsigned int )p->horiz_pixels / 2U) * ((unsigned int )p->vert_pixels / 2U)) * (unsigned int )p->bytes_per_pixel); } } else { } return (((unsigned int )p->horiz_pixels * (unsigned int )p->vert_pixels) * (unsigned int )p->bytes_per_pixel); } } static unsigned int skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc , struct skl_pipe_wm_parameters const *params ) { unsigned int total_data_rate ; int plane ; struct intel_plane_wm_parameters const *p ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; { total_data_rate = 0U; plane = 0; goto ldv_49818; ldv_49817: p = (struct intel_plane_wm_parameters const *)(& params->plane) + (unsigned long )plane; if (! ((_Bool )p->enabled)) { goto ldv_49816; } else { } tmp = skl_plane_relative_data_rate(p, 0); total_data_rate = tmp + total_data_rate; if ((unsigned int )((unsigned char )p->y_bytes_per_pixel) != 0U) { tmp___0 = skl_plane_relative_data_rate(p, 1); total_data_rate = tmp___0 + total_data_rate; } else { } ldv_49816: plane = plane + 1; ldv_49818: tmp___1 = intel_num_planes(intel_crtc); if ((unsigned int )plane < tmp___1) { goto ldv_49817; } else { } return (total_data_rate); } } static void skl_allocate_pipe_ddb(struct drm_crtc *crtc , struct intel_wm_config const *config , struct skl_pipe_wm_parameters const *params , struct skl_ddb_allocation *ddb ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; struct skl_ddb_entry *alloc ; uint16_t alloc_size ; uint16_t start ; uint16_t cursor_blocks ; uint16_t minimum[4U] ; uint16_t y_minimum[4U] ; unsigned int total_data_rate ; int plane ; unsigned int tmp ; struct intel_plane_wm_parameters const *p ; struct drm_i915_private *__p ; struct intel_plane_wm_parameters const *p___0 ; unsigned int data_rate ; unsigned int y_data_rate ; uint16_t plane_blocks ; uint16_t y_plane_blocks ; u64 tmp___0 ; u64 tmp___1 ; unsigned int tmp___2 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; alloc = (struct skl_ddb_entry *)(& ddb->pipe) + (unsigned long )pipe; skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc); alloc_size = skl_ddb_entry_size((struct skl_ddb_entry const *)alloc); if ((unsigned int )alloc_size == 0U) { memset((void *)(& ddb->plane) + (unsigned long )pipe, 0, 16UL); memset((void *)(& ddb->cursor) + (unsigned long )pipe, 0, 4UL); return; } else { } tmp = skl_cursor_allocation(config); cursor_blocks = (uint16_t )tmp; ddb->cursor[(int )pipe].start = (int )alloc->end - (int )cursor_blocks; ddb->cursor[(int )pipe].end = alloc->end; alloc_size = (int )alloc_size - (int )cursor_blocks; alloc->end = (int )alloc->end - (int )cursor_blocks; plane = 0; goto ldv_49849; ldv_49848: p = (struct intel_plane_wm_parameters const *)(& params->plane) + (unsigned long )plane; if (! ((_Bool )p->enabled)) { goto ldv_49847; } else { } minimum[plane] = 8U; alloc_size = (int )alloc_size - (int )minimum[plane]; y_minimum[plane] = (unsigned int )((unsigned char )p->y_bytes_per_pixel) != 0U ? 8U : 0U; alloc_size = (int )alloc_size - (int )y_minimum[plane]; ldv_49847: plane = plane + 1; ldv_49849: __p = dev_priv; if ((int )__p->info.num_sprites[(int )pipe] + 1 > plane) { goto ldv_49848; } else { } total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params); start = alloc->start; plane = 0; goto ldv_49858; ldv_49857: y_plane_blocks = 0U; p___0 = (struct intel_plane_wm_parameters const *)(& params->plane) + (unsigned long )plane; if (! ((_Bool )p___0->enabled)) { goto ldv_49856; } else { } data_rate = skl_plane_relative_data_rate(p___0, 0); plane_blocks = minimum[plane]; tmp___0 = div_u64((unsigned long long )alloc_size * (unsigned long long )data_rate, total_data_rate); plane_blocks = (int )((uint16_t )tmp___0) + (int )plane_blocks; ddb->plane[(int )pipe][plane].start = start; ddb->plane[(int )pipe][plane].end = (int )start + (int )plane_blocks; start = (int )start + (int )plane_blocks; if ((unsigned int )((unsigned char )p___0->y_bytes_per_pixel) != 0U) { y_data_rate = skl_plane_relative_data_rate(p___0, 1); y_plane_blocks = y_minimum[plane]; tmp___1 = div_u64((unsigned long long )alloc_size * (unsigned long long )y_data_rate, total_data_rate); y_plane_blocks = (int )((uint16_t )tmp___1) + (int )y_plane_blocks; ddb->y_plane[(int )pipe][plane].start = start; ddb->y_plane[(int )pipe][plane].end = (int )start + (int )y_plane_blocks; start = (int )start + (int )y_plane_blocks; } else { } ldv_49856: plane = plane + 1; ldv_49858: tmp___2 = intel_num_planes(intel_crtc); if ((unsigned int )plane < tmp___2) { goto ldv_49857; } else { } return; } } static uint32_t skl_pipe_pixel_rate(struct intel_crtc_state const *config ) { { return ((uint32_t )config->base.adjusted_mode.crtc_clock); } } static uint32_t skl_wm_method1(uint32_t pixel_rate , uint8_t bytes_per_pixel , uint32_t latency ) { uint32_t wm_intermediate_val ; uint32_t ret ; { if (latency == 0U) { return (4294967295U); } else { } wm_intermediate_val = ((latency * pixel_rate) * (uint32_t )bytes_per_pixel) / 512U; ret = (wm_intermediate_val + 999U) / 1000U; return (ret); } } static uint32_t skl_wm_method2(uint32_t pixel_rate , uint32_t pipe_htotal , uint32_t horiz_pixels , uint8_t bytes_per_pixel , uint64_t tiling , uint32_t latency ) { uint32_t ret ; uint32_t plane_bytes_per_line ; uint32_t plane_blocks_per_line ; uint32_t wm_intermediate_val ; { if (latency == 0U) { return (4294967295U); } else { } plane_bytes_per_line = (uint32_t )bytes_per_pixel * horiz_pixels; if (tiling == 72057594037927938ULL || tiling == 72057594037927939ULL) { plane_bytes_per_line = plane_bytes_per_line * 4U; plane_blocks_per_line = (plane_bytes_per_line + 511U) / 512U; plane_blocks_per_line = plane_blocks_per_line / 4U; } else { plane_blocks_per_line = (plane_bytes_per_line + 511U) / 512U; } wm_intermediate_val = latency * pixel_rate; ret = (((pipe_htotal * 1000U + wm_intermediate_val) - 1U) / (pipe_htotal * 1000U)) * plane_blocks_per_line; return (ret); } } static bool skl_ddb_allocation_changed(struct skl_ddb_allocation const *new_ddb , struct intel_crtc const *intel_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct skl_ddb_allocation const *cur_ddb ; enum pipe pipe ; int tmp ; int tmp___0 ; { dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; cur_ddb = (struct skl_ddb_allocation const *)(& dev_priv->wm.__annonCompField83.skl_hw.ddb); pipe = intel_crtc->pipe; tmp = memcmp((void const *)(& new_ddb->plane) + (unsigned long )pipe, (void const *)(& cur_ddb->plane) + (unsigned long )pipe, 16UL); if (tmp != 0) { return (1); } else { } tmp___0 = memcmp((void const *)(& new_ddb->cursor) + (unsigned long )pipe, (void const *)(& cur_ddb->cursor) + (unsigned long )pipe, 4UL); if (tmp___0 != 0) { return (1); } else { } return (0); } } static void skl_compute_wm_global_parameters(struct drm_device *dev , struct intel_wm_config *config ) { struct drm_crtc *crtc ; struct drm_plane *plane ; struct list_head const *__mptr ; struct drm_crtc const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr___3 ; struct list_head const *__mptr___4 ; { __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_49903; ldv_49902: __mptr___0 = (struct drm_crtc const *)crtc; config->num_pipes_active = config->num_pipes_active + (unsigned int )((struct intel_crtc *)__mptr___0)->active; __mptr___1 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___1 + 0xfffffffffffffff0UL; ldv_49903: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_49902; } else { } __mptr___2 = (struct list_head const *)dev->mode_config.plane_list.next; plane = (struct drm_plane *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_49913; ldv_49912: __mptr___3 = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr___3; config->sprites_enabled = ((int )config->sprites_enabled | (int )intel_plane->wm.enabled) != 0; config->sprites_scaled = ((int )config->sprites_scaled | (int )intel_plane->wm.scaled) != 0; __mptr___4 = (struct list_head const *)plane->head.next; plane = (struct drm_plane *)__mptr___4 + 0xfffffffffffffff8UL; ldv_49913: ; if ((unsigned long )(& plane->head) != (unsigned long )(& dev->mode_config.plane_list)) { goto ldv_49912; } else { } return; } } static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc , struct skl_pipe_wm_parameters *p ) { struct drm_device *dev ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; struct drm_plane *plane ; struct drm_framebuffer *fb ; int i ; int tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr___1 ; int tmp___1 ; struct list_head const *__mptr___2 ; { dev = crtc->dev; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; i = 1; p->active = intel_crtc->active; if ((int )p->active) { p->pipe_htotal = (uint32_t )(intel_crtc->config)->base.adjusted_mode.crtc_htotal; p->pixel_rate = skl_pipe_pixel_rate((struct intel_crtc_state const *)intel_crtc->config); fb = ((crtc->primary)->state)->fb; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { p->plane[0].enabled = 1; if (fb->pixel_format == 842094158U) { tmp = drm_format_plane_cpp(fb->pixel_format, 1); p->plane[0].bytes_per_pixel = (uint8_t )tmp; } else { p->plane[0].bytes_per_pixel = (uint8_t )(fb->bits_per_pixel / 8); } if (fb->pixel_format == 842094158U) { tmp___0 = drm_format_plane_cpp(fb->pixel_format, 0); p->plane[0].y_bytes_per_pixel = (uint8_t )tmp___0; } else { p->plane[0].y_bytes_per_pixel = 0U; } p->plane[0].tiling = fb->modifier[0]; } else { p->plane[0].enabled = 0; p->plane[0].bytes_per_pixel = 0U; p->plane[0].y_bytes_per_pixel = 0U; p->plane[0].tiling = 0ULL; } p->plane[0].horiz_pixels = (uint32_t )(intel_crtc->config)->pipe_src_w; p->plane[0].vert_pixels = (uint32_t )(intel_crtc->config)->pipe_src_h; p->plane[0].rotation = ((crtc->primary)->state)->rotation; fb = ((crtc->cursor)->state)->fb; p->cursor.y_bytes_per_pixel = 0U; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { p->cursor.enabled = 1; p->cursor.bytes_per_pixel = (uint8_t )(fb->bits_per_pixel / 8); p->cursor.horiz_pixels = ((crtc->cursor)->state)->crtc_w; p->cursor.vert_pixels = ((crtc->cursor)->state)->crtc_h; } else { p->cursor.enabled = 0; p->cursor.bytes_per_pixel = 0U; p->cursor.horiz_pixels = 64U; p->cursor.vert_pixels = 64U; } } else { } __mptr___0 = (struct list_head const *)dev->mode_config.plane_list.next; plane = (struct drm_plane *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_49935; ldv_49934: __mptr___1 = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr___1; if ((int )intel_plane->pipe == (int )pipe && (unsigned int )plane->type == 0U) { tmp___1 = i; i = i + 1; p->plane[tmp___1] = intel_plane->wm; } else { } __mptr___2 = (struct list_head const *)plane->head.next; plane = (struct drm_plane *)__mptr___2 + 0xfffffffffffffff8UL; ldv_49935: ; if ((unsigned long )(& plane->head) != (unsigned long )(& dev->mode_config.plane_list)) { goto ldv_49934; } else { } return; } } static bool skl_compute_plane_wm(struct drm_i915_private const *dev_priv , struct skl_pipe_wm_parameters *p , struct intel_plane_wm_parameters *p_params , uint16_t ddb_allocation , int level , uint16_t *out_blocks , uint8_t *out_lines ) { uint32_t latency ; uint32_t method1 ; uint32_t method2 ; uint32_t plane_bytes_per_line ; uint32_t plane_blocks_per_line ; uint32_t res_blocks ; uint32_t res_lines ; uint32_t selected_result ; uint8_t bytes_per_pixel ; uint32_t min_scanlines ; uint32_t y_tile_minimum ; int __ret_warn_on ; long tmp ; bool tmp___0 ; uint32_t _max1 ; uint32_t _max2 ; uint32_t _min1 ; uint32_t _min2 ; { latency = (uint32_t )dev_priv->wm.skl_latency[level]; if ((latency == 0U || ! p->active) || ! p_params->enabled) { return (0); } else { } bytes_per_pixel = (unsigned int )p_params->y_bytes_per_pixel != 0U ? p_params->y_bytes_per_pixel : p_params->bytes_per_pixel; method1 = skl_wm_method1(p->pixel_rate, (int )bytes_per_pixel, latency); method2 = skl_wm_method2(p->pixel_rate, p->pipe_htotal, p_params->horiz_pixels, (int )bytes_per_pixel, p_params->tiling, latency); plane_bytes_per_line = p_params->horiz_pixels * (uint32_t )bytes_per_pixel; plane_blocks_per_line = (plane_bytes_per_line + 511U) / 512U; if (p_params->tiling == 72057594037927938ULL || p_params->tiling == 72057594037927939ULL) { min_scanlines = 4U; tmp___0 = intel_rotation_90_or_270(p_params->rotation); if ((int )tmp___0) { switch ((int )p_params->bytes_per_pixel) { case 1: min_scanlines = 16U; goto ldv_49958; case 2: min_scanlines = 8U; goto ldv_49958; case 8: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 2979, "Unsupported pixel depth for rotation"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } ldv_49958: ; } else { } y_tile_minimum = plane_blocks_per_line * min_scanlines; _max1 = method2; _max2 = y_tile_minimum; selected_result = _max1 > _max2 ? _max1 : _max2; } else if ((uint32_t )ddb_allocation / plane_blocks_per_line != 0U) { _min1 = method1; _min2 = method2; selected_result = _min1 < _min2 ? _min1 : _min2; } else { selected_result = method1; } res_blocks = selected_result + 1U; res_lines = ((selected_result + plane_blocks_per_line) - 1U) / plane_blocks_per_line; if (level > 0 && level <= 7) { if (p_params->tiling == 72057594037927938ULL || p_params->tiling == 72057594037927939ULL) { res_lines = res_lines + 4U; } else { res_blocks = res_blocks + 1U; } } else { } if ((uint32_t )ddb_allocation <= res_blocks || res_lines > 31U) { return (0); } else { } *out_blocks = (uint16_t )res_blocks; *out_lines = (uint8_t )res_lines; return (1); } } static void skl_compute_wm_level(struct drm_i915_private const *dev_priv , struct skl_ddb_allocation *ddb , struct skl_pipe_wm_parameters *p , enum pipe pipe , int level , int num_planes , struct skl_wm_level *result ) { uint16_t ddb_blocks ; int i ; { i = 0; goto ldv_49981; ldv_49980: ddb_blocks = skl_ddb_entry_size((struct skl_ddb_entry const *)(& ddb->plane) + ((unsigned long )pipe + (unsigned long )i)); result->plane_en[i] = skl_compute_plane_wm(dev_priv, p, (struct intel_plane_wm_parameters *)(& p->plane) + (unsigned long )i, (int )ddb_blocks, level, (uint16_t *)(& result->plane_res_b) + (unsigned long )i, (uint8_t *)(& result->plane_res_l) + (unsigned long )i); i = i + 1; ldv_49981: ; if (i < num_planes) { goto ldv_49980; } else { } ddb_blocks = skl_ddb_entry_size((struct skl_ddb_entry const *)(& ddb->cursor) + (unsigned long )pipe); result->cursor_en = skl_compute_plane_wm(dev_priv, p, & p->cursor, (int )ddb_blocks, level, & result->cursor_res_b, & result->cursor_res_l); return; } } static uint32_t skl_compute_linetime_wm(struct drm_crtc *crtc , struct skl_pipe_wm_parameters *p ) { struct drm_crtc const *__mptr ; int tmp ; { __mptr = (struct drm_crtc const *)crtc; if (((struct intel_crtc *)__mptr)->active) { tmp = 0; } else { tmp = 1; } if (tmp) { return (0U); } else { } return (((p->pipe_htotal * 8000U + p->pixel_rate) - 1U) / p->pixel_rate); } } static void skl_compute_transition_wm(struct drm_crtc *crtc , struct skl_pipe_wm_parameters *params , struct skl_wm_level *trans_wm ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int i ; unsigned int tmp ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; if (! params->active) { return; } else { } i = 0; goto ldv_49999; ldv_49998: trans_wm->plane_en[i] = 0; i = i + 1; ldv_49999: tmp = intel_num_planes(intel_crtc); if ((unsigned int )i < tmp) { goto ldv_49998; } else { } trans_wm->cursor_en = 0; return; } } static void skl_compute_pipe_wm(struct drm_crtc *crtc , struct skl_ddb_allocation *ddb , struct skl_pipe_wm_parameters *params , struct skl_pipe_wm *pipe_wm ) { struct drm_device *dev ; struct drm_i915_private const *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int level ; int max_level ; int tmp ; unsigned int tmp___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private const *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = ilk_wm_max_level((struct drm_device const *)dev); max_level = tmp; level = 0; goto ldv_50015; ldv_50014: tmp___0 = intel_num_planes(intel_crtc); skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe, level, (int )tmp___0, (struct skl_wm_level *)(& pipe_wm->wm) + (unsigned long )level); level = level + 1; ldv_50015: ; if (level <= max_level) { goto ldv_50014; } else { } pipe_wm->linetime = skl_compute_linetime_wm(crtc, params); skl_compute_transition_wm(crtc, params, & pipe_wm->trans_wm); return; } } static void skl_compute_wm_results(struct drm_device *dev , struct skl_pipe_wm_parameters *p , struct skl_pipe_wm *p_wm , struct skl_wm_values *r , struct intel_crtc *intel_crtc ) { int level ; int max_level ; int tmp ; enum pipe pipe ; uint32_t temp ; int i ; unsigned int tmp___0 ; unsigned int tmp___1 ; { tmp = ilk_wm_max_level((struct drm_device const *)dev); max_level = tmp; pipe = intel_crtc->pipe; level = 0; goto ldv_50033; ldv_50032: i = 0; goto ldv_50030; ldv_50029: temp = 0U; temp = (uint32_t )((int )p_wm->wm[level].plane_res_l[i] << 14) | temp; temp = (uint32_t )p_wm->wm[level].plane_res_b[i] | temp; if ((int )p_wm->wm[level].plane_en[i]) { temp = temp | 2147483648U; } else { } r->plane[(int )pipe][i][level] = temp; i = i + 1; ldv_50030: tmp___0 = intel_num_planes(intel_crtc); if ((unsigned int )i < tmp___0) { goto ldv_50029; } else { } temp = 0U; temp = (uint32_t )((int )p_wm->wm[level].cursor_res_l << 14) | temp; temp = (uint32_t )p_wm->wm[level].cursor_res_b | temp; if ((int )p_wm->wm[level].cursor_en) { temp = temp | 2147483648U; } else { } r->cursor[(int )pipe][level] = temp; level = level + 1; ldv_50033: ; if (level <= max_level) { goto ldv_50032; } else { } i = 0; goto ldv_50036; ldv_50035: temp = 0U; temp = (uint32_t )((int )p_wm->trans_wm.plane_res_l[i] << 14) | temp; temp = (uint32_t )p_wm->trans_wm.plane_res_b[i] | temp; if ((int )p_wm->trans_wm.plane_en[i]) { temp = temp | 2147483648U; } else { } r->plane_trans[(int )pipe][i] = temp; i = i + 1; ldv_50036: tmp___1 = intel_num_planes(intel_crtc); if ((unsigned int )i < tmp___1) { goto ldv_50035; } else { } temp = 0U; temp = (uint32_t )((int )p_wm->trans_wm.cursor_res_l << 14) | temp; temp = (uint32_t )p_wm->trans_wm.cursor_res_b | temp; if ((int )p_wm->trans_wm.cursor_en) { temp = temp | 2147483648U; } else { } r->cursor_trans[(int )pipe] = temp; r->wm_linetime[(int )pipe] = p_wm->linetime; return; } } static void skl_ddb_entry_write(struct drm_i915_private *dev_priv , uint32_t reg , struct skl_ddb_entry const *entry ) { { if ((unsigned int )((unsigned short )entry->end) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, (uint32_t )((((int )entry->end + -1) << 16) | (int )entry->start), 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, 0U, 1); } return; } } static void skl_write_wm_values(struct drm_i915_private *dev_priv , struct skl_wm_values const *new ) { struct drm_device *dev ; struct intel_crtc *crtc ; struct list_head const *__mptr ; int i ; int level ; int max_level ; int tmp ; enum pipe pipe ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; struct list_head const *__mptr___0 ; { dev = dev_priv->dev; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_50071; ldv_50070: tmp = ilk_wm_max_level((struct drm_device const *)dev); max_level = tmp; pipe = crtc->pipe; if (! new->dirty[(int )pipe]) { goto ldv_50057; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )pipe + 70812) * 4), new->wm_linetime[(int )pipe], 1); level = 0; goto ldv_50062; ldv_50061: i = 0; goto ldv_50059; ldv_50058: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((((int )pipe * 4096 + (((int )pipe * 4096 + 459584) + ((int )pipe * -4096 + -459328)) * i) + 459328) + level * 4), new->plane[(int )pipe][i][level], 1); i = i + 1; ldv_50059: tmp___0 = intel_num_planes(crtc); if ((unsigned int )i < tmp___0) { goto ldv_50058; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((((int )pipe * 1024 + level) + 114768) * 4), new->cursor[(int )pipe][level], 1); level = level + 1; ldv_50062: ; if (level <= max_level) { goto ldv_50061; } else { } i = 0; goto ldv_50065; ldv_50064: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )pipe * 4096 + (((int )pipe * 4096 + 459624) + ((int )pipe * -4096 + -459368)) * i) + 459368), new->plane_trans[(int )pipe][i], 1); i = i + 1; ldv_50065: tmp___1 = intel_num_planes(crtc); if ((unsigned int )i < tmp___1) { goto ldv_50064; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459112), new->cursor_trans[(int )pipe], 1); i = 0; goto ldv_50068; ldv_50067: skl_ddb_entry_write(dev_priv, (uint32_t )(((int )pipe * 4096 + (((int )pipe * 4096 + 459644) + ((int )pipe * -4096 + -459388)) * i) + 459388), (struct skl_ddb_entry const *)(& new->ddb.plane) + ((unsigned long )pipe + (unsigned long )i)); skl_ddb_entry_write(dev_priv, (uint32_t )(((int )pipe * 4096 + (((int )pipe * 4096 + 459640) + ((int )pipe * -4096 + -459384)) * i) + 459384), (struct skl_ddb_entry const *)(& new->ddb.y_plane) + ((unsigned long )pipe + (unsigned long )i)); i = i + 1; ldv_50068: tmp___2 = intel_num_planes(crtc); if ((unsigned int )i < tmp___2) { goto ldv_50067; } else { } skl_ddb_entry_write(dev_priv, (uint32_t )((int )pipe * 4096 + 459132), (struct skl_ddb_entry const *)(& new->ddb.cursor) + (unsigned long )pipe); ldv_50057: __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_50071: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50070; } else { } return; } } static void skl_wm_flush_pipe(struct drm_i915_private *dev_priv , enum pipe pipe , int pass ) { int plane ; long tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; uint32_t tmp___1 ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("skl_wm_flush_pipe", "flush pipe %c (pass %d)\n", (int )pipe + 65, pass); } else { } plane = 0; goto ldv_50087; ldv_50086: tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe * 4096 + (((int )pipe * 4096 + 459420) + ((int )pipe * -4096 + -459164)) * plane) + 459164), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )pipe * 4096 + (((int )pipe * 4096 + 459420) + ((int )pipe * -4096 + -459164)) * plane) + 459164), tmp___0, 1); plane = plane + 1; ldv_50087: __p = dev_priv; if ((int )__p->info.num_sprites[(int )pipe] + 1 > plane) { goto ldv_50086; } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[(int )pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458884U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[(int )pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458884U), tmp___1, 1); return; } } static bool skl_ddb_allocation_included(struct skl_ddb_allocation const *old , struct skl_ddb_allocation const *new , enum pipe pipe ) { uint16_t old_size ; uint16_t new_size ; { old_size = skl_ddb_entry_size((struct skl_ddb_entry const *)(& old->pipe) + (unsigned long )pipe); new_size = skl_ddb_entry_size((struct skl_ddb_entry const *)(& new->pipe) + (unsigned long )pipe); return ((bool )(((int )old_size != (int )new_size && (int )((unsigned short )new->pipe[(int )pipe].start) >= (int )((unsigned short )old->pipe[(int )pipe].start)) && (int )((unsigned short )new->pipe[(int )pipe].end) <= (int )((unsigned short )old->pipe[(int )pipe].end))); } } static void skl_flush_wm_values(struct drm_i915_private *dev_priv , struct skl_wm_values *new_values ) { struct drm_device *dev ; struct skl_ddb_allocation *cur_ddb ; struct skl_ddb_allocation *new_ddb ; bool reallocated[3U] ; struct intel_crtc *crtc ; enum pipe pipe ; struct list_head const *__mptr ; bool tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; uint16_t tmp___1 ; uint16_t tmp___2 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; { dev = dev_priv->dev; reallocated[0] = (_Bool)0; reallocated[1] = (_Bool)0; reallocated[2] = (_Bool)0; new_ddb = & new_values->ddb; cur_ddb = & dev_priv->wm.__annonCompField83.skl_hw.ddb; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_50112; ldv_50111: ; if (! crtc->active) { goto ldv_50110; } else { } pipe = crtc->pipe; tmp = skl_ddb_allocation_included((struct skl_ddb_allocation const *)cur_ddb, (struct skl_ddb_allocation const *)new_ddb, pipe); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_50110; } else { } skl_wm_flush_pipe(dev_priv, pipe, 1); intel_wait_for_vblank(dev, (int )pipe); reallocated[(int )pipe] = 1; ldv_50110: __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_50112: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50111; } else { } __mptr___1 = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr___1 + 0xfffffffffffffff0UL; goto ldv_50120; ldv_50119: ; if (! crtc->active) { goto ldv_50118; } else { } pipe = crtc->pipe; if ((int )reallocated[(int )pipe]) { goto ldv_50118; } else { } tmp___1 = skl_ddb_entry_size((struct skl_ddb_entry const *)(& new_ddb->pipe) + (unsigned long )pipe); tmp___2 = skl_ddb_entry_size((struct skl_ddb_entry const *)(& cur_ddb->pipe) + (unsigned long )pipe); if ((int )tmp___1 < (int )tmp___2) { skl_wm_flush_pipe(dev_priv, pipe, 2); intel_wait_for_vblank(dev, (int )pipe); reallocated[(int )pipe] = 1; } else { } ldv_50118: __mptr___2 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___2 + 0xfffffffffffffff0UL; ldv_50120: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50119; } else { } __mptr___3 = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr___3 + 0xfffffffffffffff0UL; goto ldv_50128; ldv_50127: ; if (! crtc->active) { goto ldv_50126; } else { } pipe = crtc->pipe; if ((int )reallocated[(int )pipe]) { goto ldv_50126; } else { } skl_wm_flush_pipe(dev_priv, pipe, 3); ldv_50126: __mptr___4 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___4 + 0xfffffffffffffff0UL; ldv_50128: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50127; } else { } return; } } static bool skl_update_pipe_wm(struct drm_crtc *crtc , struct skl_pipe_wm_parameters *params , struct intel_wm_config *config , struct skl_ddb_allocation *ddb , struct skl_pipe_wm *pipe_wm ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int tmp ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; skl_compute_wm_pipe_parameters(crtc, params); skl_allocate_pipe_ddb(crtc, (struct intel_wm_config const *)config, (struct skl_pipe_wm_parameters const *)params, ddb); skl_compute_pipe_wm(crtc, ddb, params, pipe_wm); tmp = memcmp((void const *)(& intel_crtc->wm.skl_active), (void const *)pipe_wm, 204UL); if (tmp == 0) { return (0); } else { } intel_crtc->wm.skl_active = *pipe_wm; return (1); } } static void skl_update_other_pipe_wm(struct drm_device *dev , struct drm_crtc *crtc , struct intel_wm_config *config , struct skl_wm_values *r ) { struct intel_crtc *intel_crtc ; struct intel_crtc *this_crtc ; struct drm_crtc const *__mptr ; bool tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; struct skl_pipe_wm_parameters params ; struct skl_pipe_wm pipe_wm ; bool wm_changed ; int __ret_warn_on ; long tmp___1 ; struct list_head const *__mptr___1 ; { __mptr = (struct drm_crtc const *)crtc; this_crtc = (struct intel_crtc *)__mptr; tmp = skl_ddb_allocation_changed((struct skl_ddb_allocation const *)(& r->ddb), (struct intel_crtc const *)this_crtc); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } __mptr___0 = (struct list_head const *)dev->mode_config.crtc_list.next; intel_crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; goto ldv_50161; ldv_50160: params.active = (_Bool)0; params.pipe_htotal = 0U; params.pixel_rate = 0U; params.plane[0].horiz_pixels = 0U; params.plane[0].vert_pixels = 0U; params.plane[0].bytes_per_pixel = (unsigned char)0; params.plane[0].y_bytes_per_pixel = (unsigned char)0; params.plane[0].enabled = (_Bool)0; params.plane[0].scaled = (_Bool)0; params.plane[0].tiling = 0ULL; params.plane[0].rotation = 0U; params.plane[1].horiz_pixels = 0U; params.plane[1].vert_pixels = 0U; params.plane[1].bytes_per_pixel = (unsigned char)0; params.plane[1].y_bytes_per_pixel = (unsigned char)0; params.plane[1].enabled = (_Bool)0; params.plane[1].scaled = (_Bool)0; params.plane[1].tiling = 0ULL; params.plane[1].rotation = 0U; params.plane[2].horiz_pixels = 0U; params.plane[2].vert_pixels = 0U; params.plane[2].bytes_per_pixel = (unsigned char)0; params.plane[2].y_bytes_per_pixel = (unsigned char)0; params.plane[2].enabled = (_Bool)0; params.plane[2].scaled = (_Bool)0; params.plane[2].tiling = 0ULL; params.plane[2].rotation = 0U; params.plane[3].horiz_pixels = 0U; params.plane[3].vert_pixels = 0U; params.plane[3].bytes_per_pixel = (unsigned char)0; params.plane[3].y_bytes_per_pixel = (unsigned char)0; params.plane[3].enabled = (_Bool)0; params.plane[3].scaled = (_Bool)0; params.plane[3].tiling = 0ULL; params.plane[3].rotation = 0U; params.cursor.horiz_pixels = 0U; params.cursor.vert_pixels = 0U; params.cursor.bytes_per_pixel = (unsigned char)0; params.cursor.y_bytes_per_pixel = (unsigned char)0; params.cursor.enabled = (_Bool)0; params.cursor.scaled = (_Bool)0; params.cursor.tiling = 0ULL; params.cursor.rotation = 0U; pipe_wm.wm[0].plane_en[0] = (_Bool)0; pipe_wm.wm[0].plane_en[1] = (_Bool)0; pipe_wm.wm[0].plane_en[2] = (_Bool)0; pipe_wm.wm[0].plane_en[3] = (_Bool)0; pipe_wm.wm[0].cursor_en = (_Bool)0; pipe_wm.wm[0].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[0].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[0].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[0].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[0].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[0].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[0].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[0].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[0].cursor_res_b = (unsigned short)0; pipe_wm.wm[0].cursor_res_l = (unsigned char)0; pipe_wm.wm[1].plane_en[0] = (_Bool)0; pipe_wm.wm[1].plane_en[1] = (_Bool)0; pipe_wm.wm[1].plane_en[2] = (_Bool)0; pipe_wm.wm[1].plane_en[3] = (_Bool)0; pipe_wm.wm[1].cursor_en = (_Bool)0; pipe_wm.wm[1].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[1].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[1].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[1].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[1].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[1].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[1].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[1].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[1].cursor_res_b = (unsigned short)0; pipe_wm.wm[1].cursor_res_l = (unsigned char)0; pipe_wm.wm[2].plane_en[0] = (_Bool)0; pipe_wm.wm[2].plane_en[1] = (_Bool)0; pipe_wm.wm[2].plane_en[2] = (_Bool)0; pipe_wm.wm[2].plane_en[3] = (_Bool)0; pipe_wm.wm[2].cursor_en = (_Bool)0; pipe_wm.wm[2].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[2].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[2].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[2].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[2].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[2].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[2].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[2].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[2].cursor_res_b = (unsigned short)0; pipe_wm.wm[2].cursor_res_l = (unsigned char)0; pipe_wm.wm[3].plane_en[0] = (_Bool)0; pipe_wm.wm[3].plane_en[1] = (_Bool)0; pipe_wm.wm[3].plane_en[2] = (_Bool)0; pipe_wm.wm[3].plane_en[3] = (_Bool)0; pipe_wm.wm[3].cursor_en = (_Bool)0; pipe_wm.wm[3].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[3].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[3].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[3].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[3].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[3].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[3].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[3].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[3].cursor_res_b = (unsigned short)0; pipe_wm.wm[3].cursor_res_l = (unsigned char)0; pipe_wm.wm[4].plane_en[0] = (_Bool)0; pipe_wm.wm[4].plane_en[1] = (_Bool)0; pipe_wm.wm[4].plane_en[2] = (_Bool)0; pipe_wm.wm[4].plane_en[3] = (_Bool)0; pipe_wm.wm[4].cursor_en = (_Bool)0; pipe_wm.wm[4].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[4].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[4].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[4].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[4].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[4].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[4].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[4].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[4].cursor_res_b = (unsigned short)0; pipe_wm.wm[4].cursor_res_l = (unsigned char)0; pipe_wm.wm[5].plane_en[0] = (_Bool)0; pipe_wm.wm[5].plane_en[1] = (_Bool)0; pipe_wm.wm[5].plane_en[2] = (_Bool)0; pipe_wm.wm[5].plane_en[3] = (_Bool)0; pipe_wm.wm[5].cursor_en = (_Bool)0; pipe_wm.wm[5].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[5].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[5].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[5].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[5].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[5].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[5].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[5].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[5].cursor_res_b = (unsigned short)0; pipe_wm.wm[5].cursor_res_l = (unsigned char)0; pipe_wm.wm[6].plane_en[0] = (_Bool)0; pipe_wm.wm[6].plane_en[1] = (_Bool)0; pipe_wm.wm[6].plane_en[2] = (_Bool)0; pipe_wm.wm[6].plane_en[3] = (_Bool)0; pipe_wm.wm[6].cursor_en = (_Bool)0; pipe_wm.wm[6].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[6].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[6].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[6].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[6].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[6].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[6].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[6].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[6].cursor_res_b = (unsigned short)0; pipe_wm.wm[6].cursor_res_l = (unsigned char)0; pipe_wm.wm[7].plane_en[0] = (_Bool)0; pipe_wm.wm[7].plane_en[1] = (_Bool)0; pipe_wm.wm[7].plane_en[2] = (_Bool)0; pipe_wm.wm[7].plane_en[3] = (_Bool)0; pipe_wm.wm[7].cursor_en = (_Bool)0; pipe_wm.wm[7].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[7].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[7].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[7].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[7].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[7].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[7].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[7].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[7].cursor_res_b = (unsigned short)0; pipe_wm.wm[7].cursor_res_l = (unsigned char)0; pipe_wm.trans_wm.plane_en[0] = (_Bool)0; pipe_wm.trans_wm.plane_en[1] = (_Bool)0; pipe_wm.trans_wm.plane_en[2] = (_Bool)0; pipe_wm.trans_wm.plane_en[3] = (_Bool)0; pipe_wm.trans_wm.cursor_en = (_Bool)0; pipe_wm.trans_wm.plane_res_b[0] = (unsigned short)0; pipe_wm.trans_wm.plane_res_b[1] = (unsigned short)0; pipe_wm.trans_wm.plane_res_b[2] = (unsigned short)0; pipe_wm.trans_wm.plane_res_b[3] = (unsigned short)0; pipe_wm.trans_wm.plane_res_l[0] = (unsigned char)0; pipe_wm.trans_wm.plane_res_l[1] = (unsigned char)0; pipe_wm.trans_wm.plane_res_l[2] = (unsigned char)0; pipe_wm.trans_wm.plane_res_l[3] = (unsigned char)0; pipe_wm.trans_wm.cursor_res_b = (unsigned short)0; pipe_wm.trans_wm.cursor_res_l = (unsigned char)0; pipe_wm.linetime = 0U; if ((int )this_crtc->pipe == (int )intel_crtc->pipe) { goto ldv_50157; } else { } if (! intel_crtc->active) { goto ldv_50157; } else { } wm_changed = skl_update_pipe_wm(& intel_crtc->base, & params, config, & r->ddb, & pipe_wm); __ret_warn_on = ! wm_changed; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 3391, "WARN_ON(!wm_changed)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); skl_compute_wm_results(dev, & params, & pipe_wm, r, intel_crtc); r->dirty[(int )intel_crtc->pipe] = 1; ldv_50157: __mptr___1 = (struct list_head const *)intel_crtc->base.head.next; intel_crtc = (struct intel_crtc *)__mptr___1 + 0xfffffffffffffff0UL; ldv_50161: ; if ((unsigned long )(& intel_crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50160; } else { } return; } } static void skl_update_wm(struct drm_crtc *crtc ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct skl_pipe_wm_parameters params ; struct skl_wm_values *results ; struct skl_pipe_wm pipe_wm ; struct intel_wm_config config ; bool tmp ; int tmp___0 ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; params.active = (_Bool)0; params.pipe_htotal = 0U; params.pixel_rate = 0U; params.plane[0].horiz_pixels = 0U; params.plane[0].vert_pixels = 0U; params.plane[0].bytes_per_pixel = (unsigned char)0; params.plane[0].y_bytes_per_pixel = (unsigned char)0; params.plane[0].enabled = (_Bool)0; params.plane[0].scaled = (_Bool)0; params.plane[0].tiling = 0ULL; params.plane[0].rotation = 0U; params.plane[1].horiz_pixels = 0U; params.plane[1].vert_pixels = 0U; params.plane[1].bytes_per_pixel = (unsigned char)0; params.plane[1].y_bytes_per_pixel = (unsigned char)0; params.plane[1].enabled = (_Bool)0; params.plane[1].scaled = (_Bool)0; params.plane[1].tiling = 0ULL; params.plane[1].rotation = 0U; params.plane[2].horiz_pixels = 0U; params.plane[2].vert_pixels = 0U; params.plane[2].bytes_per_pixel = (unsigned char)0; params.plane[2].y_bytes_per_pixel = (unsigned char)0; params.plane[2].enabled = (_Bool)0; params.plane[2].scaled = (_Bool)0; params.plane[2].tiling = 0ULL; params.plane[2].rotation = 0U; params.plane[3].horiz_pixels = 0U; params.plane[3].vert_pixels = 0U; params.plane[3].bytes_per_pixel = (unsigned char)0; params.plane[3].y_bytes_per_pixel = (unsigned char)0; params.plane[3].enabled = (_Bool)0; params.plane[3].scaled = (_Bool)0; params.plane[3].tiling = 0ULL; params.plane[3].rotation = 0U; params.cursor.horiz_pixels = 0U; params.cursor.vert_pixels = 0U; params.cursor.bytes_per_pixel = (unsigned char)0; params.cursor.y_bytes_per_pixel = (unsigned char)0; params.cursor.enabled = (_Bool)0; params.cursor.scaled = (_Bool)0; params.cursor.tiling = 0ULL; params.cursor.rotation = 0U; results = & dev_priv->wm.skl_results; pipe_wm.wm[0].plane_en[0] = (_Bool)0; pipe_wm.wm[0].plane_en[1] = (_Bool)0; pipe_wm.wm[0].plane_en[2] = (_Bool)0; pipe_wm.wm[0].plane_en[3] = (_Bool)0; pipe_wm.wm[0].cursor_en = (_Bool)0; pipe_wm.wm[0].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[0].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[0].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[0].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[0].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[0].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[0].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[0].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[0].cursor_res_b = (unsigned short)0; pipe_wm.wm[0].cursor_res_l = (unsigned char)0; pipe_wm.wm[1].plane_en[0] = (_Bool)0; pipe_wm.wm[1].plane_en[1] = (_Bool)0; pipe_wm.wm[1].plane_en[2] = (_Bool)0; pipe_wm.wm[1].plane_en[3] = (_Bool)0; pipe_wm.wm[1].cursor_en = (_Bool)0; pipe_wm.wm[1].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[1].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[1].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[1].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[1].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[1].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[1].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[1].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[1].cursor_res_b = (unsigned short)0; pipe_wm.wm[1].cursor_res_l = (unsigned char)0; pipe_wm.wm[2].plane_en[0] = (_Bool)0; pipe_wm.wm[2].plane_en[1] = (_Bool)0; pipe_wm.wm[2].plane_en[2] = (_Bool)0; pipe_wm.wm[2].plane_en[3] = (_Bool)0; pipe_wm.wm[2].cursor_en = (_Bool)0; pipe_wm.wm[2].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[2].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[2].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[2].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[2].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[2].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[2].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[2].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[2].cursor_res_b = (unsigned short)0; pipe_wm.wm[2].cursor_res_l = (unsigned char)0; pipe_wm.wm[3].plane_en[0] = (_Bool)0; pipe_wm.wm[3].plane_en[1] = (_Bool)0; pipe_wm.wm[3].plane_en[2] = (_Bool)0; pipe_wm.wm[3].plane_en[3] = (_Bool)0; pipe_wm.wm[3].cursor_en = (_Bool)0; pipe_wm.wm[3].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[3].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[3].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[3].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[3].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[3].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[3].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[3].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[3].cursor_res_b = (unsigned short)0; pipe_wm.wm[3].cursor_res_l = (unsigned char)0; pipe_wm.wm[4].plane_en[0] = (_Bool)0; pipe_wm.wm[4].plane_en[1] = (_Bool)0; pipe_wm.wm[4].plane_en[2] = (_Bool)0; pipe_wm.wm[4].plane_en[3] = (_Bool)0; pipe_wm.wm[4].cursor_en = (_Bool)0; pipe_wm.wm[4].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[4].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[4].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[4].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[4].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[4].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[4].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[4].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[4].cursor_res_b = (unsigned short)0; pipe_wm.wm[4].cursor_res_l = (unsigned char)0; pipe_wm.wm[5].plane_en[0] = (_Bool)0; pipe_wm.wm[5].plane_en[1] = (_Bool)0; pipe_wm.wm[5].plane_en[2] = (_Bool)0; pipe_wm.wm[5].plane_en[3] = (_Bool)0; pipe_wm.wm[5].cursor_en = (_Bool)0; pipe_wm.wm[5].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[5].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[5].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[5].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[5].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[5].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[5].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[5].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[5].cursor_res_b = (unsigned short)0; pipe_wm.wm[5].cursor_res_l = (unsigned char)0; pipe_wm.wm[6].plane_en[0] = (_Bool)0; pipe_wm.wm[6].plane_en[1] = (_Bool)0; pipe_wm.wm[6].plane_en[2] = (_Bool)0; pipe_wm.wm[6].plane_en[3] = (_Bool)0; pipe_wm.wm[6].cursor_en = (_Bool)0; pipe_wm.wm[6].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[6].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[6].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[6].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[6].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[6].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[6].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[6].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[6].cursor_res_b = (unsigned short)0; pipe_wm.wm[6].cursor_res_l = (unsigned char)0; pipe_wm.wm[7].plane_en[0] = (_Bool)0; pipe_wm.wm[7].plane_en[1] = (_Bool)0; pipe_wm.wm[7].plane_en[2] = (_Bool)0; pipe_wm.wm[7].plane_en[3] = (_Bool)0; pipe_wm.wm[7].cursor_en = (_Bool)0; pipe_wm.wm[7].plane_res_b[0] = (unsigned short)0; pipe_wm.wm[7].plane_res_b[1] = (unsigned short)0; pipe_wm.wm[7].plane_res_b[2] = (unsigned short)0; pipe_wm.wm[7].plane_res_b[3] = (unsigned short)0; pipe_wm.wm[7].plane_res_l[0] = (unsigned char)0; pipe_wm.wm[7].plane_res_l[1] = (unsigned char)0; pipe_wm.wm[7].plane_res_l[2] = (unsigned char)0; pipe_wm.wm[7].plane_res_l[3] = (unsigned char)0; pipe_wm.wm[7].cursor_res_b = (unsigned short)0; pipe_wm.wm[7].cursor_res_l = (unsigned char)0; pipe_wm.trans_wm.plane_en[0] = (_Bool)0; pipe_wm.trans_wm.plane_en[1] = (_Bool)0; pipe_wm.trans_wm.plane_en[2] = (_Bool)0; pipe_wm.trans_wm.plane_en[3] = (_Bool)0; pipe_wm.trans_wm.cursor_en = (_Bool)0; pipe_wm.trans_wm.plane_res_b[0] = (unsigned short)0; pipe_wm.trans_wm.plane_res_b[1] = (unsigned short)0; pipe_wm.trans_wm.plane_res_b[2] = (unsigned short)0; pipe_wm.trans_wm.plane_res_b[3] = (unsigned short)0; pipe_wm.trans_wm.plane_res_l[0] = (unsigned char)0; pipe_wm.trans_wm.plane_res_l[1] = (unsigned char)0; pipe_wm.trans_wm.plane_res_l[2] = (unsigned char)0; pipe_wm.trans_wm.plane_res_l[3] = (unsigned char)0; pipe_wm.trans_wm.cursor_res_b = (unsigned short)0; pipe_wm.trans_wm.cursor_res_l = (unsigned char)0; pipe_wm.linetime = 0U; config.num_pipes_active = 0U; config.sprites_enabled = (_Bool)0; config.sprites_scaled = (_Bool)0; memset((void *)results, 0, 676UL); skl_compute_wm_global_parameters(dev, & config); tmp = skl_update_pipe_wm(crtc, & params, & config, & results->ddb, & pipe_wm); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } skl_compute_wm_results(dev, & params, & pipe_wm, results, intel_crtc); results->dirty[(int )intel_crtc->pipe] = 1; skl_update_other_pipe_wm(dev, crtc, & config, results); skl_write_wm_values(dev_priv, (struct skl_wm_values const *)results); skl_flush_wm_values(dev_priv, results); dev_priv->wm.__annonCompField83.skl_hw = *results; return; } } static void skl_update_sprite_wm(struct drm_plane *plane , struct drm_crtc *crtc , uint32_t sprite_width , uint32_t sprite_height , int pixel_size , bool enabled , bool scaled ) { struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; struct drm_framebuffer *fb ; int tmp ; int tmp___0 ; { __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; fb = (plane->state)->fb; intel_plane->wm.enabled = enabled; intel_plane->wm.scaled = scaled; intel_plane->wm.horiz_pixels = sprite_width; intel_plane->wm.vert_pixels = sprite_height; intel_plane->wm.tiling = 0ULL; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0) && fb->pixel_format == 842094158U) { tmp = drm_format_plane_cpp(((plane->state)->fb)->pixel_format, 1); intel_plane->wm.bytes_per_pixel = (uint8_t )tmp; } else { intel_plane->wm.bytes_per_pixel = (uint8_t )pixel_size; } if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0) && fb->pixel_format == 842094158U) { tmp___0 = drm_format_plane_cpp(((plane->state)->fb)->pixel_format, 0); intel_plane->wm.y_bytes_per_pixel = (uint8_t )tmp___0; } else { intel_plane->wm.y_bytes_per_pixel = 0U; } if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { intel_plane->wm.tiling = fb->modifier[0]; } else { } intel_plane->wm.rotation = (plane->state)->rotation; skl_update_wm(crtc); return; } } static void ilk_update_wm(struct drm_crtc *crtc ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct ilk_wm_maximums max ; struct ilk_pipe_wm_parameters params ; struct ilk_wm_values results ; enum intel_ddb_partitioning partitioning ; struct intel_pipe_wm pipe_wm ; struct intel_pipe_wm lp_wm_1_2 ; struct intel_pipe_wm lp_wm_5_6 ; struct intel_pipe_wm *best_lp_wm ; struct intel_wm_config config ; int tmp ; struct drm_i915_private *__p ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; params.active = (_Bool)0; params.pipe_htotal = 0U; params.pixel_rate = 0U; params.pri.horiz_pixels = 0U; params.pri.vert_pixels = 0U; params.pri.bytes_per_pixel = (unsigned char)0; params.pri.y_bytes_per_pixel = (unsigned char)0; params.pri.enabled = (_Bool)0; params.pri.scaled = (_Bool)0; params.pri.tiling = 0ULL; params.pri.rotation = 0U; params.spr.horiz_pixels = 0U; params.spr.vert_pixels = 0U; params.spr.bytes_per_pixel = (unsigned char)0; params.spr.y_bytes_per_pixel = (unsigned char)0; params.spr.enabled = (_Bool)0; params.spr.scaled = (_Bool)0; params.spr.tiling = 0ULL; params.spr.rotation = 0U; params.cur.horiz_pixels = 0U; params.cur.vert_pixels = 0U; params.cur.bytes_per_pixel = (unsigned char)0; params.cur.y_bytes_per_pixel = (unsigned char)0; params.cur.enabled = (_Bool)0; params.cur.scaled = (_Bool)0; params.cur.tiling = 0ULL; params.cur.rotation = 0U; results.wm_pipe[0] = 0U; results.wm_pipe[1] = 0U; results.wm_pipe[2] = 0U; results.wm_lp[0] = 0U; results.wm_lp[1] = 0U; results.wm_lp[2] = 0U; results.wm_lp_spr[0] = 0U; results.wm_lp_spr[1] = 0U; results.wm_lp_spr[2] = 0U; results.wm_linetime[0] = 0U; results.wm_linetime[1] = 0U; results.wm_linetime[2] = 0U; results.enable_fbc_wm = (_Bool)0; results.partitioning = 0; pipe_wm.wm[0].enable = (_Bool)0; pipe_wm.wm[0].pri_val = 0U; pipe_wm.wm[0].spr_val = 0U; pipe_wm.wm[0].cur_val = 0U; pipe_wm.wm[0].fbc_val = 0U; pipe_wm.wm[1].enable = (_Bool)0; pipe_wm.wm[1].pri_val = 0U; pipe_wm.wm[1].spr_val = 0U; pipe_wm.wm[1].cur_val = 0U; pipe_wm.wm[1].fbc_val = 0U; pipe_wm.wm[2].enable = (_Bool)0; pipe_wm.wm[2].pri_val = 0U; pipe_wm.wm[2].spr_val = 0U; pipe_wm.wm[2].cur_val = 0U; pipe_wm.wm[2].fbc_val = 0U; pipe_wm.wm[3].enable = (_Bool)0; pipe_wm.wm[3].pri_val = 0U; pipe_wm.wm[3].spr_val = 0U; pipe_wm.wm[3].cur_val = 0U; pipe_wm.wm[3].fbc_val = 0U; pipe_wm.wm[4].enable = (_Bool)0; pipe_wm.wm[4].pri_val = 0U; pipe_wm.wm[4].spr_val = 0U; pipe_wm.wm[4].cur_val = 0U; pipe_wm.wm[4].fbc_val = 0U; pipe_wm.linetime = 0U; pipe_wm.fbc_wm_enabled = (_Bool)0; pipe_wm.pipe_enabled = (_Bool)0; pipe_wm.sprites_enabled = (_Bool)0; pipe_wm.sprites_scaled = (_Bool)0; lp_wm_1_2.wm[0].enable = (_Bool)0; lp_wm_1_2.wm[0].pri_val = 0U; lp_wm_1_2.wm[0].spr_val = 0U; lp_wm_1_2.wm[0].cur_val = 0U; lp_wm_1_2.wm[0].fbc_val = 0U; lp_wm_1_2.wm[1].enable = (_Bool)0; lp_wm_1_2.wm[1].pri_val = 0U; lp_wm_1_2.wm[1].spr_val = 0U; lp_wm_1_2.wm[1].cur_val = 0U; lp_wm_1_2.wm[1].fbc_val = 0U; lp_wm_1_2.wm[2].enable = (_Bool)0; lp_wm_1_2.wm[2].pri_val = 0U; lp_wm_1_2.wm[2].spr_val = 0U; lp_wm_1_2.wm[2].cur_val = 0U; lp_wm_1_2.wm[2].fbc_val = 0U; lp_wm_1_2.wm[3].enable = (_Bool)0; lp_wm_1_2.wm[3].pri_val = 0U; lp_wm_1_2.wm[3].spr_val = 0U; lp_wm_1_2.wm[3].cur_val = 0U; lp_wm_1_2.wm[3].fbc_val = 0U; lp_wm_1_2.wm[4].enable = (_Bool)0; lp_wm_1_2.wm[4].pri_val = 0U; lp_wm_1_2.wm[4].spr_val = 0U; lp_wm_1_2.wm[4].cur_val = 0U; lp_wm_1_2.wm[4].fbc_val = 0U; lp_wm_1_2.linetime = 0U; lp_wm_1_2.fbc_wm_enabled = (_Bool)0; lp_wm_1_2.pipe_enabled = (_Bool)0; lp_wm_1_2.sprites_enabled = (_Bool)0; lp_wm_1_2.sprites_scaled = (_Bool)0; lp_wm_5_6.wm[0].enable = (_Bool)0; lp_wm_5_6.wm[0].pri_val = 0U; lp_wm_5_6.wm[0].spr_val = 0U; lp_wm_5_6.wm[0].cur_val = 0U; lp_wm_5_6.wm[0].fbc_val = 0U; lp_wm_5_6.wm[1].enable = (_Bool)0; lp_wm_5_6.wm[1].pri_val = 0U; lp_wm_5_6.wm[1].spr_val = 0U; lp_wm_5_6.wm[1].cur_val = 0U; lp_wm_5_6.wm[1].fbc_val = 0U; lp_wm_5_6.wm[2].enable = (_Bool)0; lp_wm_5_6.wm[2].pri_val = 0U; lp_wm_5_6.wm[2].spr_val = 0U; lp_wm_5_6.wm[2].cur_val = 0U; lp_wm_5_6.wm[2].fbc_val = 0U; lp_wm_5_6.wm[3].enable = (_Bool)0; lp_wm_5_6.wm[3].pri_val = 0U; lp_wm_5_6.wm[3].spr_val = 0U; lp_wm_5_6.wm[3].cur_val = 0U; lp_wm_5_6.wm[3].fbc_val = 0U; lp_wm_5_6.wm[4].enable = (_Bool)0; lp_wm_5_6.wm[4].pri_val = 0U; lp_wm_5_6.wm[4].spr_val = 0U; lp_wm_5_6.wm[4].cur_val = 0U; lp_wm_5_6.wm[4].fbc_val = 0U; lp_wm_5_6.linetime = 0U; lp_wm_5_6.fbc_wm_enabled = (_Bool)0; lp_wm_5_6.pipe_enabled = (_Bool)0; lp_wm_5_6.sprites_enabled = (_Bool)0; lp_wm_5_6.sprites_scaled = (_Bool)0; config.num_pipes_active = 0U; config.sprites_enabled = (_Bool)0; config.sprites_scaled = (_Bool)0; ilk_compute_wm_parameters(crtc, & params); intel_compute_pipe_wm(crtc, (struct ilk_pipe_wm_parameters const *)(& params), & pipe_wm); tmp = memcmp((void const *)(& intel_crtc->wm.active), (void const *)(& pipe_wm), 108UL); if (tmp == 0) { return; } else { } intel_crtc->wm.active = pipe_wm; ilk_compute_wm_config(dev, & config); ilk_compute_wm_maximums((struct drm_device const *)dev, 1, (struct intel_wm_config const *)(& config), 0, & max); ilk_wm_merge(dev, (struct intel_wm_config const *)(& config), (struct ilk_wm_maximums const *)(& max), & lp_wm_1_2); __p = to_i915((struct drm_device const *)dev); if (((unsigned int )((unsigned char )__p->info.gen) > 6U && config.num_pipes_active == 1U) && (int )config.sprites_enabled) { ilk_compute_wm_maximums((struct drm_device const *)dev, 1, (struct intel_wm_config const *)(& config), 1, & max); ilk_wm_merge(dev, (struct intel_wm_config const *)(& config), (struct ilk_wm_maximums const *)(& max), & lp_wm_5_6); best_lp_wm = ilk_find_best_result(dev, & lp_wm_1_2, & lp_wm_5_6); } else { best_lp_wm = & lp_wm_1_2; } partitioning = (unsigned long )(& lp_wm_1_2) != (unsigned long )best_lp_wm; ilk_compute_wm_results(dev, (struct intel_pipe_wm const *)best_lp_wm, partitioning, & results); ilk_write_wm_values(dev_priv, & results); return; } } static void ilk_update_sprite_wm(struct drm_plane *plane , struct drm_crtc *crtc , uint32_t sprite_width , uint32_t sprite_height , int pixel_size , bool enabled , bool scaled ) { struct drm_device *dev ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; struct drm_i915_private *__p ; bool tmp ; { dev = plane->dev; __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; intel_plane->wm.enabled = enabled; intel_plane->wm.scaled = scaled; intel_plane->wm.horiz_pixels = sprite_width; intel_plane->wm.vert_pixels = sprite_width; intel_plane->wm.bytes_per_pixel = (uint8_t )pixel_size; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && (int )scaled) { tmp = ilk_disable_lp_wm(dev); if ((int )tmp) { intel_wait_for_vblank(dev, (int )intel_plane->pipe); } else { } } else { } ilk_update_wm(crtc); return; } } static void skl_pipe_wm_active_state(uint32_t val , struct skl_pipe_wm *active , bool is_transwm , bool is_cursor , int i , int level ) { bool is_enabled ; { is_enabled = (int )val < 0; if (! is_transwm) { if (! is_cursor) { active->wm[level].plane_en[i] = is_enabled; active->wm[level].plane_res_b[i] = (unsigned int )((uint16_t )val) & 1023U; active->wm[level].plane_res_l[i] = (unsigned int )((uint8_t )(val >> 14)) & 31U; } else { active->wm[level].cursor_en = is_enabled; active->wm[level].cursor_res_b = (unsigned int )((uint16_t )val) & 1023U; active->wm[level].cursor_res_l = (unsigned int )((uint8_t )(val >> 14)) & 31U; } } else if (! is_cursor) { active->trans_wm.plane_en[i] = is_enabled; active->trans_wm.plane_res_b[i] = (unsigned int )((uint16_t )val) & 1023U; active->trans_wm.plane_res_l[i] = (unsigned int )((uint8_t )(val >> 14)) & 31U; } else { active->trans_wm.cursor_en = is_enabled; active->trans_wm.cursor_res_b = (unsigned int )((uint16_t )val) & 1023U; active->trans_wm.cursor_res_l = (unsigned int )((uint8_t )(val >> 14)) & 31U; } return; } } static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct skl_wm_values *hw ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct skl_pipe_wm *active ; enum pipe pipe ; int level ; int i ; int max_level ; uint32_t temp ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; hw = & dev_priv->wm.__annonCompField83.skl_hw; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; active = & intel_crtc->wm.skl_active; pipe = intel_crtc->pipe; max_level = ilk_wm_max_level((struct drm_device const *)dev); hw->wm_linetime[(int )pipe] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe + 70812) * 4), 1); level = 0; goto ldv_50258; ldv_50257: i = 0; goto ldv_50255; ldv_50254: hw->plane[(int )pipe][i][level] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((((int )pipe * 4096 + (((int )pipe * 4096 + 459584) + ((int )pipe * -4096 + -459328)) * i) + 459328) + level * 4), 1); i = i + 1; ldv_50255: tmp = intel_num_planes(intel_crtc); if ((unsigned int )i < tmp) { goto ldv_50254; } else { } hw->cursor[(int )pipe][level] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((((int )pipe * 1024 + level) + 114768) * 4), 1); level = level + 1; ldv_50258: ; if (level <= max_level) { goto ldv_50257; } else { } i = 0; goto ldv_50261; ldv_50260: hw->plane_trans[(int )pipe][i] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe * 4096 + (((int )pipe * 4096 + 459624) + ((int )pipe * -4096 + -459368)) * i) + 459368), 1); i = i + 1; ldv_50261: tmp___0 = intel_num_planes(intel_crtc); if ((unsigned int )i < tmp___0) { goto ldv_50260; } else { } hw->cursor_trans[(int )pipe] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 459112), 1); if (! intel_crtc->active) { return; } else { } hw->dirty[(int )pipe] = 1; active->linetime = hw->wm_linetime[(int )pipe]; level = 0; goto ldv_50267; ldv_50266: i = 0; goto ldv_50264; ldv_50263: temp = hw->plane[(int )pipe][i][level]; skl_pipe_wm_active_state(temp, active, 0, 0, i, level); i = i + 1; ldv_50264: tmp___1 = intel_num_planes(intel_crtc); if ((unsigned int )i < tmp___1) { goto ldv_50263; } else { } temp = hw->cursor[(int )pipe][level]; skl_pipe_wm_active_state(temp, active, 0, 1, i, level); level = level + 1; ldv_50267: ; if (level <= max_level) { goto ldv_50266; } else { } i = 0; goto ldv_50270; ldv_50269: temp = hw->plane_trans[(int )pipe][i]; skl_pipe_wm_active_state(temp, active, 1, 0, i, 0); i = i + 1; ldv_50270: tmp___2 = intel_num_planes(intel_crtc); if ((unsigned int )i < tmp___2) { goto ldv_50269; } else { } temp = hw->cursor_trans[(int )pipe]; skl_pipe_wm_active_state(temp, active, 1, 1, i, 0); return; } } void skl_wm_get_hw_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct skl_ddb_allocation *ddb ; struct drm_crtc *crtc ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ddb = & dev_priv->wm.__annonCompField83.skl_hw.ddb; skl_ddb_get_hw_state(dev_priv, ddb); __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_50283; ldv_50282: skl_pipe_wm_get_hw_state(crtc); __mptr___0 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_50283: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50282; } else { } return; } } static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct ilk_wm_values *hw ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_pipe_wm *active ; enum pipe pipe ; unsigned int wm0_pipe_reg[3U] ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; u32 tmp ; int level ; int max_level ; int tmp___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; hw = & dev_priv->wm.__annonCompField83.hw; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; active = & intel_crtc->wm.active; pipe = intel_crtc->pipe; wm0_pipe_reg[0] = 282880U; wm0_pipe_reg[1] = 282884U; wm0_pipe_reg[2] = 283136U; hw->wm_pipe[(int )pipe] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )wm0_pipe_reg[(int )pipe], 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { hw->wm_linetime[(int )pipe] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe + 70812) * 4), 1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { hw->wm_linetime[(int )pipe] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe + 70812) * 4), 1); } else { } } else { } } active->pipe_enabled = intel_crtc->active; if ((int )active->pipe_enabled) { tmp = hw->wm_pipe[(int )pipe]; active->wm[0].enable = 1; active->wm[0].pri_val = tmp >> 16; active->wm[0].spr_val = (tmp & 65280U) >> 8; active->wm[0].cur_val = tmp & 255U; active->linetime = hw->wm_linetime[(int )pipe]; } else { tmp___0 = ilk_wm_max_level((struct drm_device const *)dev); max_level = tmp___0; level = 0; goto ldv_50319; ldv_50318: active->wm[level].enable = 1; level = level + 1; ldv_50319: ; if (level <= max_level) { goto ldv_50318; } else { } } return; } } void ilk_wm_get_hw_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct ilk_wm_values *hw ; struct drm_crtc *crtc ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct drm_i915_private *__p ; uint32_t tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; hw = & dev_priv->wm.__annonCompField83.hw; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_50332; ldv_50331: ilk_pipe_wm_get_hw_state(crtc); __mptr___0 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_50332: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50331; } else { } hw->wm_lp[0] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282888L, 1); hw->wm_lp[1] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282892L, 1); hw->wm_lp[2] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282896L, 1); hw->wm_lp_spr[0] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282912L, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 6U) { hw->wm_lp_spr[1] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282916L, 1); hw->wm_lp_spr[2] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282920L, 1); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283232L, 1); hw->partitioning = (enum intel_ddb_partitioning )((int )tmp & 1); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283232L, 1); hw->partitioning = (enum intel_ddb_partitioning )((int )tmp & 1); } else { goto _L; } } else { _L: /* CIL Label */ __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282628L, 1); hw->partitioning = (tmp___0 & 64U) != 0U; } else { } } } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282624L, 1); hw->enable_fbc_wm = (tmp___1 & 32768U) == 0U; return; } } void intel_update_watermarks(struct drm_crtc *crtc ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)(crtc->dev)->dev_private; if ((unsigned long )dev_priv->display.update_wm != (unsigned long )((void (*)(struct drm_crtc * ))0)) { (*(dev_priv->display.update_wm))(crtc); } else { } return; } } void intel_update_sprite_watermarks(struct drm_plane *plane , struct drm_crtc *crtc , uint32_t sprite_width , uint32_t sprite_height , int pixel_size , bool enabled , bool scaled ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)(plane->dev)->dev_private; if ((unsigned long )dev_priv->display.update_sprite_wm != (unsigned long )((void (*)(struct drm_plane * , struct drm_crtc * , uint32_t , uint32_t , int , bool , bool ))0)) { (*(dev_priv->display.update_sprite_wm))(plane, crtc, sprite_width, sprite_height, pixel_size, (int )enabled, (int )scaled); } else { } return; } } spinlock_t mchdev_lock = {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "mchdev_lock", 0, 0UL}}}}; static struct drm_i915_private *i915_mch_dev ; bool ironlake_set_drps(struct drm_device *dev , u8 val ) { struct drm_i915_private *dev_priv ; u16 rgvswctl ; int tmp ; long tmp___0 ; long tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = queued_spin_is_locked(& mchdev_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c"), "i" (3787), "i" (12UL)); ldv_50387: ; goto ldv_50387; } else { } rgvswctl = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 70000L, 1); if (((int )rgvswctl & 4096) != 0) { tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ironlake_set_drps", "gpu busy, RCS change rejected\n"); } else { } return (0); } else { } rgvswctl = (u16 )((int )((short )((int )val << 8)) | 16512); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 70000L, (int )rgvswctl, 1); (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 70000L, 0); rgvswctl = (u16 )((unsigned int )rgvswctl | 4096U); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 70000L, (int )rgvswctl, 1); return (1); } } static void ironlake_enable_drps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 rgvmodectl ; uint32_t tmp ; u8 fmax ; u8 fmin ; u8 fstart ; u8 vstart ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; long tmp___3 ; unsigned long timeout__ ; unsigned long tmp___4 ; int ret__ ; uint32_t tmp___5 ; uint32_t tmp___6 ; unsigned long __ms ; unsigned long tmp___7 ; uint32_t tmp___8 ; uint32_t tmp___9 ; uint32_t tmp___10 ; unsigned int tmp___11 ; uint32_t tmp___12 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70032L, 1); rgvmodectl = tmp; spin_lock_irq(& mchdev_lock); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70164L, 1); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 70164L, (int )((unsigned int )((uint16_t )tmp___0) | 1U), 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 69633L, 1); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 69633L, (int )((unsigned int )((uint16_t )tmp___1) | 1U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70064L, 100000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70068L, 100000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70044L, 90000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70048L, 80000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70012L, 1U, 1); fmax = (u8 )((rgvmodectl & 240U) >> 4); fmin = (unsigned int )((u8 )rgvmodectl) & 15U; fstart = (u8 )((rgvmodectl & 3840U) >> 8); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )fstart + 17476) * 4), 1); vstart = (u8 )((tmp___2 & 2130706432U) >> 24); dev_priv->ips.fmax = fmax; dev_priv->ips.fstart = fstart; dev_priv->ips.max_delay = fstart; dev_priv->ips.min_delay = fmin; dev_priv->ips.cur_delay = fstart; tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("ironlake_enable_drps", "fmax: %d, fmin: %d, fstart: %d\n", (int )fmax, (int )fmin, (int )fstart); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70016L, 144U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70092L, (uint32_t )vstart, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70092L, 0); rgvmodectl = rgvmodectl | 16384U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70032L, rgvmodectl, 1); tmp___4 = msecs_to_jiffies(10U); timeout__ = (tmp___4 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50409; ldv_50408: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70000L, 1); if ((tmp___5 & 4096U) != 0U) { ret__ = -110; } else { } goto ldv_50407; } else { } cpu_relax(); ldv_50409: tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70000L, 1); if ((tmp___6 & 4096U) != 0U) { goto ldv_50408; } else { } ldv_50407: ; if (ret__ != 0) { drm_err("stuck trying to change perf mode\n"); } else { } if (1) { __const_udelay(4295000UL); } else { __ms = 1UL; goto ldv_50413; ldv_50412: __const_udelay(4295000UL); ldv_50413: tmp___7 = __ms; __ms = __ms - 1UL; if (tmp___7 != 0UL) { goto ldv_50412; } else { } } ironlake_set_drps(dev, (int )fstart); tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70372L, 1); tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70376L, 1); tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70368L, 1); dev_priv->ips.last_count1 = (u64 )((tmp___8 + tmp___9) + tmp___10); tmp___11 = jiffies_to_msecs(jiffies); dev_priv->ips.last_time1 = (unsigned long )tmp___11; tmp___12 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70388L, 1); dev_priv->ips.last_count2 = (u64 )tmp___12; dev_priv->ips.last_time2 = ktime_get_raw_ns(); spin_unlock_irq(& mchdev_lock); return; } } static void ironlake_disable_drps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u16 rgvswctl ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; unsigned long __ms ; unsigned long tmp___2 ; unsigned long __ms___0 ; unsigned long tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; spin_lock_irq(& mchdev_lock); rgvswctl = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 70000L, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70016L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70016L, tmp & 4294967279U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70020L, 16U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278540L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278540L, tmp___0 & 4261412863U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278536L, 33554432U, 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278532L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278532L, tmp___1 | 33554432U, 1); ironlake_set_drps(dev, (int )dev_priv->ips.fstart); if (1) { __const_udelay(4295000UL); } else { __ms = 1UL; goto ldv_50422; ldv_50421: __const_udelay(4295000UL); ldv_50422: tmp___2 = __ms; __ms = __ms - 1UL; if (tmp___2 != 0UL) { goto ldv_50421; } else { } } rgvswctl = (u16 )((unsigned int )rgvswctl | 4096U); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70000L, (uint32_t )rgvswctl, 1); if (1) { __const_udelay(4295000UL); } else { __ms___0 = 1UL; goto ldv_50426; ldv_50425: __const_udelay(4295000UL); ldv_50426: tmp___3 = __ms___0; __ms___0 = __ms___0 - 1UL; if (tmp___3 != 0UL) { goto ldv_50425; } else { } } spin_unlock_irq(& mchdev_lock); return; } } static u32 intel_rps_limits(struct drm_i915_private *dev_priv , u8 val ) { u32 limits ; struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) == 9U) { limits = (u32 )((int )dev_priv->rps.max_freq_softlimit << 23); if ((int )dev_priv->rps.min_freq_softlimit >= (int )val) { limits = (u32 )((int )dev_priv->rps.min_freq_softlimit << 14) | limits; } else { } } else { limits = (u32 )((int )dev_priv->rps.max_freq_softlimit << 24); if ((int )dev_priv->rps.min_freq_softlimit >= (int )val) { limits = (u32 )((int )dev_priv->rps.min_freq_softlimit << 16) | limits; } else { } } return (limits); } } static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv , u8 val ) { int new_power ; u32 threshold_up ; u32 threshold_down ; u32 ei_up ; u32 ei_down ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { threshold_up = 0U; threshold_down = 0U; ei_up = 0U; ei_down = 0U; new_power = (int )dev_priv->rps.power; switch ((unsigned int )dev_priv->rps.power) { case 0U: ; if ((int )val > (int )dev_priv->rps.efficient_freq + 1 && (int )dev_priv->rps.cur_freq < (int )val) { new_power = 1; } else { } goto ldv_50449; case 1U: ; if ((int )dev_priv->rps.efficient_freq >= (int )val && (int )dev_priv->rps.cur_freq > (int )val) { new_power = 0; } else if ((int )dev_priv->rps.rp0_freq <= (int )val && (int )dev_priv->rps.cur_freq < (int )val) { new_power = 2; } else { } goto ldv_50449; case 2U: ; if ((int )val < ((int )dev_priv->rps.rp1_freq + (int )dev_priv->rps.rp0_freq) >> 1 && (int )dev_priv->rps.cur_freq > (int )val) { new_power = 1; } else { } goto ldv_50449; } ldv_50449: ; if ((int )dev_priv->rps.min_freq_softlimit >= (int )val) { new_power = 0; } else { } if ((int )dev_priv->rps.max_freq_softlimit <= (int )val) { new_power = 2; } else { } if ((unsigned int )new_power == (unsigned int )dev_priv->rps.power) { return; } else { } switch (new_power) { case 0: ei_up = 16000U; threshold_up = 95U; ei_down = 32000U; threshold_down = 85U; goto ldv_50453; case 1: ei_up = 13000U; threshold_up = 90U; ei_down = 32000U; threshold_down = 75U; goto ldv_50453; case 2: ei_up = 10000U; threshold_up = 85U; ei_down = 32000U; threshold_down = 60U; goto ldv_50453; } ldv_50453: __p = dev_priv; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41064L, (unsigned int )((unsigned char )__p->info.gen) == 9U ? ei_up * 3U >> 2 : ei_up * 100U >> 7, 1); __p___0 = dev_priv; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41004L, (unsigned int )((unsigned char )__p___0->info.gen) == 9U ? ((ei_up * threshold_up) / 100U) * 3U >> 2 : ((ei_up * threshold_up) / 100U) * 100U >> 7, 1); __p___1 = dev_priv; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41068L, (unsigned int )((unsigned char )__p___1->info.gen) == 9U ? ei_down * 3U >> 2 : ei_down * 100U >> 7, 1); __p___2 = dev_priv; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41008L, (unsigned int )((unsigned char )__p___2->info.gen) == 9U ? ((ei_down * threshold_down) / 100U) * 3U >> 2 : ((ei_down * threshold_down) / 100U) * 100U >> 7, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40996L, 3474U, 1); dev_priv->rps.power = (int )new_power; dev_priv->rps.up_threshold = (u8 )threshold_up; dev_priv->rps.down_threshold = (u8 )threshold_down; dev_priv->rps.last_adj = 0; return; } } static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv , u8 val ) { u32 mask ; u32 tmp ; { mask = 0U; if ((int )dev_priv->rps.min_freq_softlimit < (int )val) { mask = mask | 82U; } else { } if ((int )dev_priv->rps.max_freq_softlimit > (int )val) { mask = mask | 36U; } else { } mask = dev_priv->pm_rps_events & mask; tmp = gen6_sanitize_rps_pm_mask(dev_priv, ~ mask); return (tmp); } } static void gen6_set_rps(struct drm_device *dev , u8 val ) { struct drm_i915_private *dev_priv ; int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; int __ret_warn_on___1 ; long tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; u32 tmp___3 ; u32 tmp___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4039, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (int )dev_priv->rps.max_freq < (int )val; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4040, "WARN_ON(val > dev_priv->rps.max_freq)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); __ret_warn_on___1 = (int )dev_priv->rps.min_freq > (int )val; tmp___2 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4041, "WARN_ON(val < dev_priv->rps.min_freq)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if ((int )dev_priv->rps.cur_freq != (int )val) { gen6_set_rps_thresholds(dev_priv, (int )val); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40968L, (uint32_t )((int )val << 23), 1); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40968L, (uint32_t )((int )val << 24), 1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40968L, (uint32_t )((int )val << 24), 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40968L, (uint32_t )((int )val << 25), 1); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40968L, (uint32_t )((int )val << 25), 1); } } } } else { } tmp___3 = intel_rps_limits(dev_priv, (int )val); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40980L, tmp___3, 1); tmp___4 = gen6_rps_pm_mask(dev_priv, (int )val); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41320L, tmp___4, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 40968L, 0); dev_priv->rps.cur_freq = val; trace_intel_gpu_freq_change((u32 )((int )val * 50)); return; } } static void valleyview_set_rps(struct drm_device *dev , u8 val ) { struct drm_i915_private *dev_priv ; int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; int __ret_warn_on___1 ; long tmp___2 ; bool __warned ; int __ret_warn_once ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp___3 ; int __ret_warn_on___2 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; u32 tmp___8 ; int tmp___9 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4078, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (int )dev_priv->rps.max_freq < (int )val; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4079, "WARN_ON(val > dev_priv->rps.max_freq)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); __ret_warn_on___1 = (int )dev_priv->rps.min_freq > (int )val; tmp___2 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4080, "WARN_ON(val < dev_priv->rps.min_freq)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { if ((int )val & 1) { tmp___3 = 1; } else { tmp___3 = 0; } } else { tmp___3 = 0; } } else { tmp___3 = 0; } __ret_warn_once = tmp___3; tmp___6 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___6 != 0L) { __ret_warn_on___2 = ! __warned; tmp___4 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4083, "Odd GPU freq value\n"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___5 != 0L) { __warned = 1; } else { } } else { } tmp___7 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___7 != 0L) { val = (unsigned int )val & 254U; } else { } if ((int )dev_priv->rps.cur_freq != (int )val) { vlv_punit_write(dev_priv, 212U, (u32 )val); __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { gen6_set_rps_thresholds(dev_priv, (int )val); } else { __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { gen6_set_rps_thresholds(dev_priv, (int )val); } else { } } } else { } tmp___8 = gen6_rps_pm_mask(dev_priv, (int )val); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41320L, tmp___8, 1); dev_priv->rps.cur_freq = val; tmp___9 = intel_gpu_freq(dev_priv, (int )val); trace_intel_gpu_freq_change((u32 )tmp___9); return; } } static void vlv_set_rps_idle(struct drm_i915_private *dev_priv ) { u32 val ; { val = (u32 )dev_priv->rps.idle_freq; if ((u32 )dev_priv->rps.cur_freq <= val) { return; } else { } intel_uncore_forcewake_get(dev_priv, 4); valleyview_set_rps(dev_priv->dev, (int )((u8 )val)); intel_uncore_forcewake_put(dev_priv, 4); return; } } void gen6_rps_busy(struct drm_i915_private *dev_priv ) { u32 tmp ; { mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); if ((int )dev_priv->rps.enabled) { if ((dev_priv->pm_rps_events & 6U) != 0U) { gen6_rps_reset_ei(dev_priv); } else { } tmp = gen6_rps_pm_mask(dev_priv, (int )dev_priv->rps.cur_freq); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41320L, tmp, 1); } else { } mutex_unlock(& dev_priv->rps.hw_lock); return; } } void gen6_rps_idle(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct drm_i915_private *__p ; int tmp ; { dev = dev_priv->dev; mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); if ((int )dev_priv->rps.enabled) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { vlv_set_rps_idle(dev_priv); } else { gen6_set_rps(dev_priv->dev, (int )dev_priv->rps.idle_freq); } dev_priv->rps.last_adj = 0; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41320L, 4294967295U, 1); } else { } mutex_unlock(& dev_priv->rps.hw_lock); spin_lock(& dev_priv->rps.client_lock); goto ldv_50578; ldv_50577: list_del_init(dev_priv->rps.clients.next); ldv_50578: tmp = list_empty((struct list_head const *)(& dev_priv->rps.clients)); if (tmp == 0) { goto ldv_50577; } else { } spin_unlock(& dev_priv->rps.client_lock); return; } } void gen6_rps_boost(struct drm_i915_private *dev_priv , struct intel_rps_client *rps , unsigned long submitted ) { unsigned long tmp ; int tmp___0 ; { if ((! dev_priv->mm.busy || ! dev_priv->rps.enabled) || (int )dev_priv->rps.cur_freq >= (int )dev_priv->rps.max_freq_softlimit) { return; } else { } if ((unsigned long )rps != (unsigned long )((struct intel_rps_client *)0)) { tmp = msecs_to_jiffies(20U); if ((long )((tmp + submitted) - (unsigned long )jiffies) < 0L) { rps = (struct intel_rps_client *)0; } else { } } else { } spin_lock(& dev_priv->rps.client_lock); if ((unsigned long )rps == (unsigned long )((struct intel_rps_client *)0)) { goto _L; } else { tmp___0 = list_empty((struct list_head const *)(& rps->link)); if (tmp___0 != 0) { _L: /* CIL Label */ spin_lock_irq(& dev_priv->irq_lock); if ((int )dev_priv->rps.interrupts_enabled) { dev_priv->rps.client_boost = 1; queue_work(dev_priv->wq, & dev_priv->rps.work); } else { } spin_unlock_irq(& dev_priv->irq_lock); if ((unsigned long )rps != (unsigned long )((struct intel_rps_client *)0)) { list_add(& rps->link, & dev_priv->rps.clients); rps->boosts = rps->boosts + 1U; } else { dev_priv->rps.boosts = dev_priv->rps.boosts + 1U; } } else { } } spin_unlock(& dev_priv->rps.client_lock); return; } } void intel_set_rps(struct drm_device *dev , u8 val ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { valleyview_set_rps(dev, (int )val); } else { gen6_set_rps(dev, (int )val); } return; } } static void gen9_disable_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41488L, 0U, 1); return; } } static void gen6_disable_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40968L, 2147483648U, 1); return; } } static void cherryview_disable_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, 0U, 1); return; } } static void valleyview_disable_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; intel_uncore_forcewake_get(dev_priv, 7); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, 0U, 1); intel_uncore_forcewake_put(dev_priv, 7); return; } } static void intel_print_rc6_info(struct drm_device *dev , u32 mode ) { struct drm_i915_private *__p ; long tmp ; long tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { if ((mode & 402653184U) != 0U) { mode = 262144U; } else { mode = 0U; } } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { _L: /* CIL Label */ tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_print_rc6_info", "Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", (mode & 262144U) != 0U ? (char *)"on" : (char *)"off", (mode & 131072U) != 0U ? (char *)"on" : (char *)"off", (mode & 65536U) != 0U ? (char *)"on" : (char *)"off"); } else { } } else { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_print_rc6_info", "Enabling RC6 states: RC6 %s\n", (mode & 262144U) != 0U ? (char *)"on" : (char *)"off"); } else { } } } return; } } static int sanitize_rc6_option(struct drm_device const *dev , int enable_rc6 ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int mask ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; long tmp ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { __p = to_i915(dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { return (0); } else { } __p___0 = to_i915(dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 5U) { __p___1 = to_i915(dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) != 70U) { return (0); } else { } } else { } if (enable_rc6 >= 0) { __p___2 = to_i915(dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 6U) { mask = 7; } else { __p___3 = to_i915(dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { mask = 7; } else { mask = 1; } } if ((enable_rc6 & mask) != enable_rc6) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("sanitize_rc6_option", "Adjusting RC6 mask to %d (requested %d, valid %d)\n", enable_rc6 & mask, enable_rc6, mask); } else { } } else { } return (enable_rc6 & mask); } else { } __p___4 = to_i915(dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 5U) { return (0); } else { } __p___5 = to_i915(dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { return (3); } else { } return (1); } } int intel_enable_rc6(struct drm_device const *dev ) { { return (i915.enable_rc6); } } static void gen6_init_rps_frequencies(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t rp_state_cap ; u32 ddcc_status ; int ret ; struct drm_i915_private *__p ; u8 __min1 ; u8 __max1 ; u8 __max2 ; u8 __min2 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int __max1___0 ; int __max2___0 ; int tmp ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ddcc_status = 0U; rp_state_cap = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1333656L, 1); dev_priv->rps.cur_freq = 0U; dev_priv->rps.rp0_freq = (u8 )rp_state_cap; dev_priv->rps.rp1_freq = (u8 )(rp_state_cap >> 8); dev_priv->rps.min_freq = (u8 )(rp_state_cap >> 16); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { dev_priv->rps.rp0_freq = (unsigned int )dev_priv->rps.rp0_freq * 3U; dev_priv->rps.rp1_freq = (unsigned int )dev_priv->rps.rp1_freq * 3U; dev_priv->rps.min_freq = (unsigned int )dev_priv->rps.min_freq * 3U; } else { } dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { _L: /* CIL Label */ ret = sandybridge_pcode_read(dev_priv, 26U, & ddcc_status); if (ret == 0) { __max1 = (u8 )(ddcc_status >> 8); __max2 = dev_priv->rps.min_freq; __min1 = (u8 )((int )__max1 > (int )__max2 ? __max1 : __max2); __min2 = dev_priv->rps.max_freq; dev_priv->rps.efficient_freq = (u8 )((int )__min1 < (int )__min2 ? __min1 : __min2); } else { } } else { } } else { } } dev_priv->rps.idle_freq = dev_priv->rps.min_freq; if ((unsigned int )dev_priv->rps.max_freq_softlimit == 0U) { dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; } else { } if ((unsigned int )dev_priv->rps.min_freq_softlimit == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __max1___0 = (int )dev_priv->rps.efficient_freq; tmp = intel_freq_opcode(dev_priv, 450); __max2___0 = tmp; dev_priv->rps.min_freq_softlimit = (u8 )(__max1___0 > __max2___0 ? __max1___0 : __max2___0); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 8U) { __max1___0 = (int )dev_priv->rps.efficient_freq; tmp = intel_freq_opcode(dev_priv, 450); __max2___0 = tmp; dev_priv->rps.min_freq_softlimit = (u8 )(__max1___0 > __max2___0 ? __max1___0 : __max2___0); } else { dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; } } else { dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; } } } else { } return; } } static void gen9_enable_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; intel_uncore_forcewake_get(dev_priv, 7); gen6_init_rps_frequencies(dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40972L, (uint32_t )((int )dev_priv->rps.rp1_freq << 23), 1); __p = dev_priv; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40976L, (unsigned int )((unsigned char )__p->info.gen) == 9U ? 750000U : 781250U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41072L, 10U, 1); dev_priv->rps.power = 2; gen6_set_rps(dev_priv->dev, (int )dev_priv->rps.min_freq_softlimit); intel_uncore_forcewake_put(dev_priv, 7); return; } } static void gen9_enable_rc6(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; uint32_t rc6_mask ; int unused ; bool tmp ; int tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; rc6_mask = 0U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41108L, 0U, 1); intel_uncore_forcewake_get(dev_priv, 7); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41116L, 3538944U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41128L, 125000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41132L, 25U, 1); unused = 0; goto ldv_50767; ldv_50766: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )unused; tmp = intel_ring_initialized(ring); if ((int )tmp) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 84U), 10U, 1); } else { } unused = unused + 1; ldv_50767: ; if (unused <= 4) { goto ldv_50766; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41136L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41144L, 37500U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41156L, 25U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41160L, 25U, 1); tmp___0 = intel_enable_rc6((struct drm_device const *)dev); if (tmp___0 & 1) { rc6_mask = 262144U; } else { } printk("\016[drm] RC6 %s\n", (rc6_mask & 262144U) != 0U ? (char *)"on" : (char *)"off"); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, rc6_mask | 2281701376U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41488L, (rc6_mask & 262144U) != 0U ? 2U : 0U, 1); intel_uncore_forcewake_put(dev_priv, 7); return; } } static void gen8_enable_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; uint32_t rc6_mask ; int unused ; bool tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; rc6_mask = 0U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41108L, 0U, 1); intel_uncore_forcewake_get(dev_priv, 7); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, 0U, 1); gen6_init_rps_frequencies(dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41116L, 2621440U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41128L, 125000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41132L, 25U, 1); unused = 0; goto ldv_50777; ldv_50776: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )unused; tmp = intel_ring_initialized(ring); if ((int )tmp) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 84U), 10U, 1); } else { } unused = unused + 1; ldv_50777: ; if (unused <= 4) { goto ldv_50776; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41136L, 0U, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41144L, 625U, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41144L, 50000U, 1); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41144L, 50000U, 1); } tmp___0 = intel_enable_rc6((struct drm_device const *)dev); if (tmp___0 & 1) { rc6_mask = 262144U; } else { } intel_print_rc6_info(dev, rc6_mask); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, rc6_mask | 2415919104U, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, rc6_mask | 2281701376U, 1); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, rc6_mask | 2281701376U, 1); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40968L, (uint32_t )((int )dev_priv->rps.rp1_freq << 24), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40972L, (uint32_t )((int )dev_priv->rps.rp1_freq << 24), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40976L, 781250U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40980L, (uint32_t )(((int )dev_priv->rps.max_freq_softlimit << 24) | ((int )dev_priv->rps.min_freq_softlimit << 16)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41004L, 59375U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41008L, 244531U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41064L, 66000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41068L, 350000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41072L, 10U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40996L, 3474U, 1); dev_priv->rps.power = 2; gen6_set_rps(dev_priv->dev, (int )dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, 7); return; } } static void gen6_enable_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; u32 rc6vids ; u32 pcu_mbox ; u32 rc6_mask ; u32 gtfifodbg ; int rc6_mode ; int i ; int ret ; int __ret_warn_on ; int tmp ; long tmp___0 ; bool tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pcu_mbox = 0U; rc6_mask = 0U; tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4517, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41108L, 0U, 1); gtfifodbg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1179648L, 1); if (gtfifodbg != 0U) { drm_err("GT fifo had a previous error %x\n", gtfifodbg); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1179648L, gtfifodbg, 1); } else { } intel_uncore_forcewake_get(dev_priv, 7); gen6_init_rps_frequencies(dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41112L, 65536000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41116L, 2621470U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41120L, 30U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41128L, 125000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41132L, 25U, 1); i = 0; goto ldv_50818; ldv_50817: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___1 = intel_ring_initialized(ring); if ((int )tmp___1) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 84U), 10U, 1); } else { } i = i + 1; ldv_50818: ; if (i <= 4) { goto ldv_50817; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41136L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41140L, 1000U, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41144L, 125000U, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41144L, 50000U, 1); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41148L, 150000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41152L, 64000U, 1); rc6_mode = intel_enable_rc6((struct drm_device const *)dev_priv->dev); if (rc6_mode & 1) { rc6_mask = rc6_mask | 262144U; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { if ((rc6_mode & 2) != 0) { rc6_mask = rc6_mask | 131072U; } else { } if ((rc6_mode & 4) != 0) { rc6_mask = rc6_mask | 65536U; } else { } } else { } intel_print_rc6_info(dev, rc6_mask); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, rc6_mask | 2281701376U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40976L, 50000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41072L, 10U, 1); ret = sandybridge_pcode_write(dev_priv, 8U, 0U); if (ret != 0) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("gen6_enable_rps", "Failed to set the min frequency\n"); } else { } } else { } ret = sandybridge_pcode_read(dev_priv, 12U, & pcu_mbox); if (ret == 0 && (int )pcu_mbox < 0) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("gen6_enable_rps", "Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", (int )dev_priv->rps.max_freq_softlimit * 50, (pcu_mbox & 255U) * 50U); } else { } dev_priv->rps.max_freq = (u8 )pcu_mbox; } else { } dev_priv->rps.power = 2; gen6_set_rps(dev_priv->dev, (int )dev_priv->rps.idle_freq); rc6vids = 0U; ret = sandybridge_pcode_read(dev_priv, 5U, & rc6vids); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 6U && ret != 0) { tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("gen6_enable_rps", "Couldn\'t check for BIOS workaround\n"); } else { } } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 6U && (rc6vids & 255U) * 5U + 245U <= 449U) { tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("gen6_enable_rps", "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", (rc6vids & 255U) * 5U + 245U, 450); } else { } rc6vids = rc6vids & 16776960U; rc6vids = rc6vids | 41U; ret = sandybridge_pcode_write(dev_priv, 4U, rc6vids); if (ret != 0) { drm_err("Couldn\'t fix incorrect rc6 voltage\n"); } else { } } else { } } intel_uncore_forcewake_put(dev_priv, 7); return; } } static void __gen6_update_ring_freq(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int min_freq ; unsigned int gpu_freq ; unsigned int max_ia_freq ; unsigned int min_ring_freq ; int scaling_factor ; struct cpufreq_policy *policy ; int __ret_warn_on ; int tmp ; long tmp___0 ; uint32_t tmp___1 ; unsigned int quot ; unsigned int rem ; int diff ; unsigned int ia_freq ; unsigned int ring_freq ; unsigned int _max1 ; unsigned int _max2 ; unsigned int quot___0 ; unsigned int rem___0 ; unsigned int _max1___0 ; unsigned int _max2___0 ; unsigned int __x ; int __d ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; min_freq = 15; scaling_factor = 180; tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4625, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); policy = cpufreq_cpu_get(0U); if ((unsigned long )policy != (unsigned long )((struct cpufreq_policy *)0)) { max_ia_freq = policy->cpuinfo.max_freq; cpufreq_cpu_put(policy); } else { max_ia_freq = tsc_khz; } max_ia_freq = max_ia_freq / 1000U; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1334788L, 1); min_ring_freq = tmp___1 & 15U; quot = min_ring_freq / 3U; rem = min_ring_freq % 3U; min_ring_freq = quot * 8U + (rem * 8U) / 3U; gpu_freq = (unsigned int )dev_priv->rps.max_freq; goto ldv_50888; ldv_50887: diff = (int )((unsigned int )dev_priv->rps.max_freq - gpu_freq); ia_freq = 0U; ring_freq = 0U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { _max1 = min_ring_freq; _max2 = gpu_freq; ring_freq = _max1 > _max2 ? _max1 : _max2; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { quot___0 = gpu_freq / 4U; rem___0 = gpu_freq & 3U; ring_freq = quot___0 * 5U + (rem___0 * 5U) / 4U; _max1___0 = min_ring_freq; _max2___0 = ring_freq; ring_freq = _max1___0 > _max2___0 ? _max1___0 : _max2___0; } else { if ((unsigned int )min_freq > gpu_freq) { ia_freq = 800U; } else { ia_freq = max_ia_freq - (unsigned int )((diff * scaling_factor) / 2); } __x = ia_freq; __d = 100; ia_freq = ((unsigned int )(__d / 2) + __x) / (unsigned int )__d; } } sandybridge_pcode_write(dev_priv, 8U, ((ia_freq << 8) | (ring_freq << 16)) | gpu_freq); gpu_freq = gpu_freq - 1U; ldv_50888: ; if ((unsigned int )dev_priv->rps.min_freq <= gpu_freq) { goto ldv_50887; } else { } return; } } void gen6_update_ring_freq(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { return; } else { } } mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); __gen6_update_ring_freq(dev); mutex_unlock(& dev_priv->rps.hw_lock); return; } } static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; u32 val ; u32 rp0 ; struct drm_i915_private *__p ; { dev = dev_priv->dev; if ((unsigned int )(dev->pdev)->revision > 31U) { val = vlv_punit_read(dev_priv, 310U); __p = to_i915((struct drm_device const *)dev); switch ((int )__p->info.eu_total) { case 8: rp0 = val >> 24; goto ldv_50919; case 12: rp0 = val >> 16; goto ldv_50919; case 16: ; default: rp0 = val >> 8; goto ldv_50919; } ldv_50919: rp0 = rp0 & 255U; } else { val = vlv_punit_read(dev_priv, 219U); rp0 = (val >> 16) & 255U; } return ((int )rp0); } } static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv ) { u32 val ; u32 rpe ; { val = vlv_punit_read(dev_priv, 223U); rpe = (val >> 8) & 255U; return ((int )rpe); } } static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; u32 val ; u32 rp1 ; { dev = dev_priv->dev; if ((unsigned int )(dev->pdev)->revision > 31U) { val = vlv_punit_read(dev_priv, 310U); rp1 = val & 255U; } else { val = vlv_punit_read(dev_priv, 216U); rp1 = (val >> 16) & 255U; } return ((int )rp1); } } static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv ) { u32 val ; u32 rp1 ; { val = vlv_nc_read(dev_priv, 28); rp1 = (val & 522240U) >> 11; return ((int )rp1); } } static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv ) { u32 val ; u32 rp0 ; u32 __min1 ; u32 __min2 ; { val = vlv_nc_read(dev_priv, 28); rp0 = (val & 2040U) >> 3; __min1 = rp0; __min2 = 234U; rp0 = __min1 < __min2 ? __min1 : __min2; return ((int )rp0); } } static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv ) { u32 val ; u32 rpe ; { val = vlv_nc_read(dev_priv, 48); rpe = val >> 27; val = vlv_nc_read(dev_priv, 52); rpe = ((val << 5) & 255U) | rpe; return ((int )rpe); } } static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv ) { u32 tmp ; { tmp = vlv_punit_read(dev_priv, 211U); return ((int )tmp & 255); } } static void valleyview_check_pctx(struct drm_i915_private *dev_priv ) { unsigned long pctx_addr ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581344L, 1); pctx_addr = (unsigned long )tmp & 4294963200UL; __ret_warn_on = (unsigned long long )dev_priv->mm.stolen_base + ((dev_priv->vlv_pctx)->stolen)->start != (unsigned long long )pctx_addr; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4806, "WARN_ON(pctx_addr != dev_priv->mm.stolen_base + dev_priv->vlv_pctx->stolen->start)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void cherryview_check_pctx(struct drm_i915_private *dev_priv ) { unsigned long pctx_addr ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581344L, 1); pctx_addr = (unsigned long )tmp & 4294963200UL; __ret_warn_on = pctx_addr >> 12 == 0UL; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4815, "WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void cherryview_setup_pctx(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; unsigned long pctx_paddr ; unsigned long paddr ; struct i915_gtt *gtt ; u32 pcbr ; int pctx_size ; int __ret_warn_on ; int tmp ; long tmp___0 ; long tmp___1 ; uint32_t tmp___2 ; long tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; gtt = & dev_priv->gtt; pctx_size = 32768; tmp = mutex_is_locked(& dev->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4826, "WARN_ON(!mutex_is_locked(&dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); pcbr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581344L, 1); if (pcbr >> 12 == 0U) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("cherryview_setup_pctx", "BIOS didn\'t set up PCBR, fixing up\n"); } else { } paddr = dev_priv->mm.stolen_base + (gtt->stolen_size - (size_t )pctx_size); pctx_paddr = paddr & 0xfffffffffffff000UL; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581344L, (uint32_t )pctx_paddr, 1); } else { } tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581344L, 1); drm_ut_debug_printk("cherryview_setup_pctx", "PCBR: 0x%08x\n", tmp___2); } else { } return; } } static void valleyview_setup_pctx(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *pctx ; unsigned long pctx_paddr ; u32 pcbr ; int pctx_size ; int __ret_warn_on ; int tmp ; long tmp___0 ; int pcbr_offset ; long tmp___1 ; long tmp___2 ; uint32_t tmp___3 ; long tmp___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pctx_size = 24576; tmp = mutex_is_locked(& dev->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4849, "WARN_ON(!mutex_is_locked(&dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); pcbr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581344L, 1); if (pcbr != 0U) { pcbr_offset = (int )((pcbr & 4294963200U) - (unsigned int )dev_priv->mm.stolen_base); pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, (u32 )pcbr_offset, 4294967295U, (u32 )pctx_size); goto out; } else { } tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("valleyview_setup_pctx", "BIOS didn\'t set up PCBR, fixing up\n"); } else { } pctx = i915_gem_object_create_stolen(dev, (u32 )pctx_size); if ((unsigned long )pctx == (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___2 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("valleyview_setup_pctx", "not enough stolen space for PCTX, disabling\n"); } else { } return; } else { } pctx_paddr = (unsigned long )((unsigned long long )dev_priv->mm.stolen_base + (pctx->stolen)->start); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581344L, (uint32_t )pctx_paddr, 1); out: tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581344L, 1); drm_ut_debug_printk("valleyview_setup_pctx", "PCBR: 0x%08x\n", tmp___3); } else { } dev_priv->vlv_pctx = pctx; return; } } static void valleyview_cleanup_pctx(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int __ret_warn_on ; long tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __ret_warn_on = (unsigned long )dev_priv->vlv_pctx == (unsigned long )((struct drm_i915_gem_object *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 4892, "WARN_ON(!dev_priv->vlv_pctx)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } drm_gem_object_unreference(& (dev_priv->vlv_pctx)->base); dev_priv->vlv_pctx = (struct drm_i915_gem_object *)0; return; } } static void valleyview_init_gt_powersave(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 val ; long tmp ; int tmp___0 ; int tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; long tmp___5 ; int tmp___6 ; int tmp___7 ; long tmp___8 ; int tmp___9 ; int tmp___10 ; long tmp___11 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; valleyview_setup_pctx(dev); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); val = vlv_punit_read(dev_priv, 216U); switch ((val >> 6) & 3U) { case 0U: ; case 1U: dev_priv->mem_freq = 800U; goto ldv_51005; case 2U: dev_priv->mem_freq = 1066U; goto ldv_51005; case 3U: dev_priv->mem_freq = 1333U; goto ldv_51005; } ldv_51005: tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("valleyview_init_gt_powersave", "DDR speed: %d MHz\n", dev_priv->mem_freq); } else { } tmp___0 = valleyview_rps_max_freq(dev_priv); dev_priv->rps.max_freq = (u8 )tmp___0; dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { tmp___1 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.max_freq); drm_ut_debug_printk("valleyview_init_gt_powersave", "max GPU freq: %d MHz (%u)\n", tmp___1, (int )dev_priv->rps.max_freq); } else { } tmp___3 = valleyview_rps_rpe_freq(dev_priv); dev_priv->rps.efficient_freq = (u8 )tmp___3; tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { tmp___4 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.efficient_freq); drm_ut_debug_printk("valleyview_init_gt_powersave", "RPe GPU freq: %d MHz (%u)\n", tmp___4, (int )dev_priv->rps.efficient_freq); } else { } tmp___6 = valleyview_rps_guar_freq(dev_priv); dev_priv->rps.rp1_freq = (u8 )tmp___6; tmp___8 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___8 != 0L) { tmp___7 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.rp1_freq); drm_ut_debug_printk("valleyview_init_gt_powersave", "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", tmp___7, (int )dev_priv->rps.rp1_freq); } else { } tmp___9 = valleyview_rps_min_freq(dev_priv); dev_priv->rps.min_freq = (u8 )tmp___9; tmp___11 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___11 != 0L) { tmp___10 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.min_freq); drm_ut_debug_printk("valleyview_init_gt_powersave", "min GPU freq: %d MHz (%u)\n", tmp___10, (int )dev_priv->rps.min_freq); } else { } dev_priv->rps.idle_freq = dev_priv->rps.min_freq; if ((unsigned int )dev_priv->rps.max_freq_softlimit == 0U) { dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; } else { } if ((unsigned int )dev_priv->rps.min_freq_softlimit == 0U) { dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; } else { } mutex_unlock(& dev_priv->rps.hw_lock); return; } } static void cherryview_init_gt_powersave(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 val ; long tmp ; int tmp___0 ; int tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; long tmp___5 ; int tmp___6 ; int tmp___7 ; long tmp___8 ; int tmp___9 ; long tmp___10 ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp___11 ; long tmp___12 ; long tmp___13 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; cherryview_setup_pctx(dev); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_cck_read(dev_priv, 8U); mutex_unlock(& dev_priv->sb_lock); switch ((val >> 2) & 7U) { case 0U: ; case 1U: dev_priv->rps.cz_freq = 200U; dev_priv->mem_freq = 1600U; goto ldv_51016; case 2U: dev_priv->rps.cz_freq = 267U; dev_priv->mem_freq = 1600U; goto ldv_51016; case 3U: dev_priv->rps.cz_freq = 333U; dev_priv->mem_freq = 2000U; goto ldv_51016; case 4U: dev_priv->rps.cz_freq = 320U; dev_priv->mem_freq = 1600U; goto ldv_51016; case 5U: dev_priv->rps.cz_freq = 400U; dev_priv->mem_freq = 1600U; goto ldv_51016; } ldv_51016: tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("cherryview_init_gt_powersave", "DDR speed: %d MHz\n", dev_priv->mem_freq); } else { } tmp___0 = cherryview_rps_max_freq(dev_priv); dev_priv->rps.max_freq = (u8 )tmp___0; dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { tmp___1 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.max_freq); drm_ut_debug_printk("cherryview_init_gt_powersave", "max GPU freq: %d MHz (%u)\n", tmp___1, (int )dev_priv->rps.max_freq); } else { } tmp___3 = cherryview_rps_rpe_freq(dev_priv); dev_priv->rps.efficient_freq = (u8 )tmp___3; tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { tmp___4 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.efficient_freq); drm_ut_debug_printk("cherryview_init_gt_powersave", "RPe GPU freq: %d MHz (%u)\n", tmp___4, (int )dev_priv->rps.efficient_freq); } else { } tmp___6 = cherryview_rps_guar_freq(dev_priv); dev_priv->rps.rp1_freq = (u8 )tmp___6; tmp___8 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___8 != 0L) { tmp___7 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.rp1_freq); drm_ut_debug_printk("cherryview_init_gt_powersave", "RP1(Guar) GPU freq: %d MHz (%u)\n", tmp___7, (int )dev_priv->rps.rp1_freq); } else { } dev_priv->rps.min_freq = dev_priv->rps.efficient_freq; tmp___10 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___10 != 0L) { tmp___9 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.min_freq); drm_ut_debug_printk("cherryview_init_gt_powersave", "min GPU freq: %d MHz (%u)\n", tmp___9, (int )dev_priv->rps.min_freq); } else { } __ret_warn_once = ((((int )dev_priv->rps.max_freq | (int )dev_priv->rps.efficient_freq) | (int )dev_priv->rps.rp1_freq) | (int )dev_priv->rps.min_freq) & 1; tmp___13 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___13 != 0L) { __ret_warn_on = ! __warned; tmp___11 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___11 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 5020, "Odd GPU freq values\n"); } else { } tmp___12 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___12 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); dev_priv->rps.idle_freq = dev_priv->rps.min_freq; if ((unsigned int )dev_priv->rps.max_freq_softlimit == 0U) { dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; } else { } if ((unsigned int )dev_priv->rps.min_freq_softlimit == 0U) { dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; } else { } mutex_unlock(& dev_priv->rps.hw_lock); return; } } static void valleyview_cleanup_gt_powersave(struct drm_device *dev ) { { valleyview_cleanup_pctx(dev); return; } } static void cherryview_enable_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; u32 gtfifodbg ; u32 val ; u32 rc6_mode ; u32 pcbr ; int i ; int __ret_warn_on ; int tmp ; long tmp___0 ; long tmp___1 ; bool tmp___2 ; int _a ; int tmp___3 ; bool __warned ; int __ret_warn_once ; int __ret_warn_on___0 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; int tmp___9 ; long tmp___10 ; int tmp___11 ; long tmp___12 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; rc6_mode = 0U; tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 5046, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); gtfifodbg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1179648L, 1); if (gtfifodbg != 0U) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("cherryview_enable_rps", "GT fifo had a previous error %x\n", gtfifodbg); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1179648L, gtfifodbg, 1); } else { } cherryview_check_pctx(dev_priv); intel_uncore_forcewake_get(dev_priv, 7); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41116L, 2621440U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41128L, 125000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41132L, 25U, 1); i = 0; goto ldv_51044; ldv_51043: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___2 = intel_ring_initialized(ring); if ((int )tmp___2) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 84U), 10U, 1); } else { } i = i + 1; ldv_51044: ; if (i <= 4) { goto ldv_51043; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41136L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41144L, 390U, 1); _a = 32771; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278212L, (uint32_t )((_a << 16) | _a), 1); pcbr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581344L, 1); tmp___3 = intel_enable_rc6((struct drm_device const *)dev); if (tmp___3 & 1 && pcbr >> 12 != 0U) { rc6_mode = 268435456U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, rc6_mode, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40976L, 1000000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41004L, 59400U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41008L, 245000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41064L, 66000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41068L, 350000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41072L, 10U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40996L, 1426U, 1); val = 15U; vlv_punit_write(dev_priv, 4U, val); val = vlv_punit_read(dev_priv, 216U); __ret_warn_once = (val & 16U) == 0U; tmp___6 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___6 != 0L) { __ret_warn_on___0 = ! __warned; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 5118, "GPLL not enabled\n"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); tmp___7 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("cherryview_enable_rps", "GPLL enabled? %s\n", (val & 16U) != 0U ? (char *)"yes" : (char *)"no"); } else { } tmp___8 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("cherryview_enable_rps", "GPU status: 0x%08x\n", val); } else { } dev_priv->rps.cur_freq = (u8 )(val >> 8); tmp___10 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___10 != 0L) { tmp___9 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.cur_freq); drm_ut_debug_printk("cherryview_enable_rps", "current GPU freq: %d MHz (%u)\n", tmp___9, (int )dev_priv->rps.cur_freq); } else { } tmp___12 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___12 != 0L) { tmp___11 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.efficient_freq); drm_ut_debug_printk("cherryview_enable_rps", "setting GPU freq to %d MHz (%u)\n", tmp___11, (int )dev_priv->rps.efficient_freq); } else { } valleyview_set_rps(dev_priv->dev, (int )dev_priv->rps.efficient_freq); intel_uncore_forcewake_put(dev_priv, 7); return; } } static void valleyview_enable_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; u32 gtfifodbg ; u32 val ; u32 rc6_mode ; int i ; int __ret_warn_on ; int tmp ; long tmp___0 ; long tmp___1 ; bool tmp___2 ; int _a ; int tmp___3 ; bool __warned ; int __ret_warn_once ; int __ret_warn_on___0 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; int tmp___9 ; long tmp___10 ; int tmp___11 ; long tmp___12 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; rc6_mode = 0U; tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 5144, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); valleyview_check_pctx(dev_priv); gtfifodbg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1179648L, 1); if (gtfifodbg != 0U) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("valleyview_enable_rps", "GT fifo had a previous error %x\n", gtfifodbg); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1179648L, gtfifodbg, 1); } else { } intel_uncore_forcewake_get(dev_priv, 7); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40976L, 1000000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41004L, 59400U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41008L, 245000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41064L, 66000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41068L, 350000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41072L, 10U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 40996L, 3473U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41116L, 2621440U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41128L, 125000U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41132L, 25U, 1); i = 0; goto ldv_51079; ldv_51078: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___2 = intel_ring_initialized(ring); if ((int )tmp___2) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 84U), 10U, 1); } else { } i = i + 1; ldv_51079: ; if (i <= 4) { goto ldv_51078; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41144L, 1367U, 1); _a = 51; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278212L, (uint32_t )((_a << 16) | _a), 1); tmp___3 = intel_enable_rc6((struct drm_device const *)dev); if (tmp___3 & 1) { rc6_mode = 285212672U; } else { } intel_print_rc6_info(dev, rc6_mode); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41104L, rc6_mode, 1); val = 27U; vlv_punit_write(dev_priv, 4U, val); val = vlv_punit_read(dev_priv, 216U); __ret_warn_once = (val & 16U) == 0U; tmp___6 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___6 != 0L) { __ret_warn_on___0 = ! __warned; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 5208, "GPLL not enabled\n"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); tmp___7 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("valleyview_enable_rps", "GPLL enabled? %s\n", (val & 16U) != 0U ? (char *)"yes" : (char *)"no"); } else { } tmp___8 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("valleyview_enable_rps", "GPU status: 0x%08x\n", val); } else { } dev_priv->rps.cur_freq = (u8 )(val >> 8); tmp___10 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___10 != 0L) { tmp___9 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.cur_freq); drm_ut_debug_printk("valleyview_enable_rps", "current GPU freq: %d MHz (%u)\n", tmp___9, (int )dev_priv->rps.cur_freq); } else { } tmp___12 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___12 != 0L) { tmp___11 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.efficient_freq); drm_ut_debug_printk("valleyview_enable_rps", "setting GPU freq to %d MHz (%u)\n", tmp___11, (int )dev_priv->rps.efficient_freq); } else { } valleyview_set_rps(dev_priv->dev, (int )dev_priv->rps.efficient_freq); intel_uncore_forcewake_put(dev_priv, 7); return; } } static unsigned long intel_pxfreq(u32 vidfreq ) { unsigned long freq ; int div ; int post ; int pre ; { div = (int )((vidfreq & 4128768U) >> 16); post = (int )((vidfreq & 12288U) >> 12); pre = (int )vidfreq & 7; if (pre == 0) { return (0UL); } else { } freq = (unsigned long )((div * 133333) / (pre << post)); return (freq); } } static struct cparams const cparams[6U] = { {1U, 1333U, 301U, 28664U}, {1U, 1066U, 294U, 24460U}, {1U, 800U, 294U, 25192U}, {0U, 1333U, 276U, 27605U}, {0U, 1066U, 276U, 27605U}, {0U, 800U, 231U, 23784U}}; static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv ) { u64 total_count ; u64 diff ; u64 ret ; u32 count1 ; u32 count2 ; u32 count3 ; u32 m ; u32 c ; unsigned long now ; unsigned int tmp ; unsigned long diff1 ; int i ; int tmp___0 ; long tmp___1 ; { m = 0U; c = 0U; tmp = jiffies_to_msecs(jiffies); now = (unsigned long )tmp; tmp___0 = queued_spin_is_locked(& mchdev_lock.__annonCompField18.rlock.raw_lock); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c"), "i" (5263), "i" (12UL)); ldv_51128: ; goto ldv_51128; } else { } diff1 = now - dev_priv->ips.last_time1; if (diff1 <= 10UL) { return (dev_priv->ips.chipset_power); } else { } count1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70372L, 1); count2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70376L, 1); count3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70368L, 1); total_count = (u64 )((count1 + count2) + count3); if (dev_priv->ips.last_count1 > total_count) { diff = ~ dev_priv->ips.last_count1; diff = diff + total_count; } else { diff = total_count - dev_priv->ips.last_count1; } i = 0; goto ldv_51133; ldv_51132: ; if ((int )cparams[i].i == dev_priv->ips.c_m && (int )cparams[i].t == dev_priv->ips.r_t) { m = (u32 )cparams[i].m; c = (u32 )cparams[i].c; goto ldv_51131; } else { } i = i + 1; ldv_51133: ; if ((unsigned int )i <= 5U) { goto ldv_51132; } else { } ldv_51131: diff = div_u64(diff, (u32 )diff1); ret = (u64 )m * diff + (u64 )c; ret = div_u64(ret, 10U); dev_priv->ips.last_count1 = total_count; dev_priv->ips.last_time1 = now; dev_priv->ips.chipset_power = (unsigned long )ret; return ((unsigned long )ret); } } unsigned long i915_chipset_val(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; unsigned long val ; struct drm_i915_private *__p ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 5U) { return (0UL); } else { } spin_lock_irq(& mchdev_lock); val = __i915_chipset_val(dev_priv); spin_unlock_irq(& mchdev_lock); return (val); } } unsigned long i915_mch_val(struct drm_i915_private *dev_priv ) { unsigned long m ; unsigned long x ; unsigned long b ; u32 tsfs ; uint8_t tmp ; { tsfs = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 69664L, 1); m = (unsigned long )((tsfs & 65280U) >> 8); tmp = (*(dev_priv->uncore.funcs.mmio_readb))(dev_priv, 69638L, 1); x = (unsigned long )tmp; b = (unsigned long )tsfs & 255UL; return ((m * x) / 127UL - b); } } static int _pxvid_to_vd(u8 pxvid ) { { if ((unsigned int )pxvid == 0U) { return (0); } else { } if ((unsigned int )pxvid > 7U && (unsigned int )pxvid <= 30U) { pxvid = 31U; } else { } return ((int )pxvid * 125 + 250); } } static u32 pvid_to_extvid(struct drm_i915_private *dev_priv , u8 pxvid ) { struct drm_device *dev ; int vd ; int tmp ; int vm ; struct drm_i915_private *__p ; { dev = dev_priv->dev; tmp = _pxvid_to_vd((int )pxvid); vd = tmp; vm = vd + -1125; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { return ((u32 )(0 > vm ? 0 : vm)); } else { } return ((u32 )vd); } } static void __i915_update_gfx_val(struct drm_i915_private *dev_priv ) { u64 now ; u64 diff ; u64 diffms ; u32 count ; int tmp ; long tmp___0 ; uint32_t __base ; uint32_t __rem ; { tmp = queued_spin_is_locked(& mchdev_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c"), "i" (5370), "i" (12UL)); ldv_51175: ; goto ldv_51175; } else { } now = ktime_get_raw_ns(); diffms = now - dev_priv->ips.last_time2; __base = 1000000U; __rem = (uint32_t )(diffms % (u64 )__base); diffms = diffms / (u64 )__base; if (diffms == 0ULL) { return; } else { } count = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70388L, 1); if ((u64 )count < dev_priv->ips.last_count2) { diff = ~ dev_priv->ips.last_count2; diff = (u64 )count + diff; } else { diff = (u64 )count - dev_priv->ips.last_count2; } dev_priv->ips.last_count2 = (u64 )count; dev_priv->ips.last_time2 = now; diff = diff * 1181ULL; diff = div_u64(diff, (u32 )diffms * 10U); dev_priv->ips.gfx_power = (unsigned long )diff; return; } } void i915_update_gfx_val(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct drm_i915_private *__p ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 5U) { return; } else { } spin_lock_irq(& mchdev_lock); __i915_update_gfx_val(dev_priv); spin_unlock_irq(& mchdev_lock); return; } } static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv ) { unsigned long t ; unsigned long corr ; unsigned long state1 ; unsigned long corr2 ; unsigned long state2 ; u32 pxvid ; u32 ext_v ; int tmp ; long tmp___0 ; { tmp = queued_spin_is_locked(& mchdev_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c"), "i" (5417), "i" (12UL)); ldv_51199: ; goto ldv_51199; } else { } pxvid = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )dev_priv->rps.cur_freq + 17476) * 4), 1); pxvid = (pxvid >> 24) & 127U; ext_v = pvid_to_extvid(dev_priv, (int )((u8 )pxvid)); state1 = (unsigned long )ext_v; t = i915_mch_val(dev_priv); if (t > 80UL) { corr = t * 2349UL + 135940UL; } else if (t > 49UL) { corr = t * 964UL + 29317UL; } else { corr = t * 301UL + 1004UL; } corr = ((state1 * 150142UL) / 10000UL - 78642UL) * corr; corr = corr / 100000UL; corr2 = (unsigned long )dev_priv->ips.corr * corr; state2 = (corr2 * state1) / 10000UL; state2 = state2 / 100UL; __i915_update_gfx_val(dev_priv); return (dev_priv->ips.gfx_power + state2); } } unsigned long i915_gfx_val(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; unsigned long val ; struct drm_i915_private *__p ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 5U) { return (0UL); } else { } spin_lock_irq(& mchdev_lock); val = __i915_gfx_val(dev_priv); spin_unlock_irq(& mchdev_lock); return (val); } } unsigned long i915_read_mch_val(void) { struct drm_i915_private *dev_priv ; unsigned long chipset_val ; unsigned long graphics_val ; unsigned long ret ; { ret = 0UL; spin_lock_irq(& mchdev_lock); if ((unsigned long )i915_mch_dev == (unsigned long )((struct drm_i915_private *)0)) { goto out_unlock; } else { } dev_priv = i915_mch_dev; chipset_val = __i915_chipset_val(dev_priv); graphics_val = __i915_gfx_val(dev_priv); ret = chipset_val + graphics_val; out_unlock: spin_unlock_irq(& mchdev_lock); return (ret); } } static char const __kstrtab_i915_read_mch_val[18U] = { 'i', '9', '1', '5', '_', 'r', 'e', 'a', 'd', '_', 'm', 'c', 'h', '_', 'v', 'a', 'l', '\000'}; struct kernel_symbol const __ksymtab_i915_read_mch_val ; struct kernel_symbol const __ksymtab_i915_read_mch_val = {(unsigned long )(& i915_read_mch_val), (char const *)(& __kstrtab_i915_read_mch_val)}; bool i915_gpu_raise(void) { struct drm_i915_private *dev_priv ; bool ret ; { ret = 1; spin_lock_irq(& mchdev_lock); if ((unsigned long )i915_mch_dev == (unsigned long )((struct drm_i915_private *)0)) { ret = 0; goto out_unlock; } else { } dev_priv = i915_mch_dev; if ((int )dev_priv->ips.max_delay > (int )dev_priv->ips.fmax) { dev_priv->ips.max_delay = (u8 )((int )dev_priv->ips.max_delay - 1); } else { } out_unlock: spin_unlock_irq(& mchdev_lock); return (ret); } } static char const __kstrtab_i915_gpu_raise[15U] = { 'i', '9', '1', '5', '_', 'g', 'p', 'u', '_', 'r', 'a', 'i', 's', 'e', '\000'}; struct kernel_symbol const __ksymtab_i915_gpu_raise ; struct kernel_symbol const __ksymtab_i915_gpu_raise = {(unsigned long )(& i915_gpu_raise), (char const *)(& __kstrtab_i915_gpu_raise)}; bool i915_gpu_lower(void) { struct drm_i915_private *dev_priv ; bool ret ; { ret = 1; spin_lock_irq(& mchdev_lock); if ((unsigned long )i915_mch_dev == (unsigned long )((struct drm_i915_private *)0)) { ret = 0; goto out_unlock; } else { } dev_priv = i915_mch_dev; if ((int )dev_priv->ips.max_delay < (int )dev_priv->ips.min_delay) { dev_priv->ips.max_delay = (u8 )((int )dev_priv->ips.max_delay + 1); } else { } out_unlock: spin_unlock_irq(& mchdev_lock); return (ret); } } static char const __kstrtab_i915_gpu_lower[15U] = { 'i', '9', '1', '5', '_', 'g', 'p', 'u', '_', 'l', 'o', 'w', 'e', 'r', '\000'}; struct kernel_symbol const __ksymtab_i915_gpu_lower ; struct kernel_symbol const __ksymtab_i915_gpu_lower = {(unsigned long )(& i915_gpu_lower), (char const *)(& __kstrtab_i915_gpu_lower)}; bool i915_gpu_busy(void) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; bool ret ; int i ; int tmp ; bool tmp___0 ; { ret = 0; spin_lock_irq(& mchdev_lock); if ((unsigned long )i915_mch_dev == (unsigned long )((struct drm_i915_private *)0)) { goto out_unlock; } else { } dev_priv = i915_mch_dev; i = 0; goto ldv_51258; ldv_51257: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(ring); if ((int )tmp___0) { tmp = list_empty((struct list_head const *)(& ring->request_list)); ret = ((int )ret | (tmp == 0)) != 0; } else { } i = i + 1; ldv_51258: ; if (i <= 4) { goto ldv_51257; } else { } out_unlock: spin_unlock_irq(& mchdev_lock); return (ret); } } static char const __kstrtab_i915_gpu_busy[14U] = { 'i', '9', '1', '5', '_', 'g', 'p', 'u', '_', 'b', 'u', 's', 'y', '\000'}; struct kernel_symbol const __ksymtab_i915_gpu_busy ; struct kernel_symbol const __ksymtab_i915_gpu_busy = {(unsigned long )(& i915_gpu_busy), (char const *)(& __kstrtab_i915_gpu_busy)}; bool i915_gpu_turbo_disable(void) { struct drm_i915_private *dev_priv ; bool ret ; bool tmp ; int tmp___0 ; { ret = 1; spin_lock_irq(& mchdev_lock); if ((unsigned long )i915_mch_dev == (unsigned long )((struct drm_i915_private *)0)) { ret = 0; goto out_unlock; } else { } dev_priv = i915_mch_dev; dev_priv->ips.max_delay = dev_priv->ips.fstart; tmp = ironlake_set_drps(dev_priv->dev, (int )dev_priv->ips.fstart); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { ret = 0; } else { } out_unlock: spin_unlock_irq(& mchdev_lock); return (ret); } } static char const __kstrtab_i915_gpu_turbo_disable[23U] = { 'i', '9', '1', '5', '_', 'g', 'p', 'u', '_', 't', 'u', 'r', 'b', 'o', '_', 'd', 'i', 's', 'a', 'b', 'l', 'e', '\000'}; struct kernel_symbol const __ksymtab_i915_gpu_turbo_disable ; struct kernel_symbol const __ksymtab_i915_gpu_turbo_disable = {(unsigned long )(& i915_gpu_turbo_disable), (char const *)(& __kstrtab_i915_gpu_turbo_disable)}; static void ips_ping_for_i915_load(void) { void (*link)(void) ; void *tmp ; { tmp = __symbol_get("ips_link_to_i915_driver"); link = (void (*)(void))tmp; if ((unsigned long )link != (unsigned long )((void (*)(void))0)) { (*link)(); __symbol_put("ips_link_to_i915_driver"); } else { } return; } } void intel_gpu_ips_init(struct drm_i915_private *dev_priv ) { { spin_lock_irq(& mchdev_lock); i915_mch_dev = dev_priv; spin_unlock_irq(& mchdev_lock); ips_ping_for_i915_load(); return; } } void intel_gpu_ips_teardown(void) { { spin_lock_irq(& mchdev_lock); i915_mch_dev = (struct drm_i915_private *)0; spin_unlock_irq(& mchdev_lock); return; } } static void intel_init_emon(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 lcfuse ; u8 pxw[16U] ; int i ; u32 pxvidfreq ; uint32_t tmp ; unsigned long freq ; unsigned long tmp___0 ; unsigned long vid ; unsigned long val ; u32 val___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71168L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 71168L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70220L, 352587008U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70224L, 8323072U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70228L, 505544708U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 70232L, 67108868U, 1); i = 0; goto ldv_51297; ldv_51296: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i + 17559) * 4), 0U, 1); i = i + 1; ldv_51297: ; if (i <= 4) { goto ldv_51296; } else { } i = 0; goto ldv_51300; ldv_51299: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i + 17564) * 4), 0U, 1); i = i + 1; ldv_51300: ; if (i <= 2) { goto ldv_51299; } else { } i = 0; goto ldv_51307; ldv_51306: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((i + 17476) * 4), 1); pxvidfreq = tmp; tmp___0 = intel_pxfreq(pxvidfreq); freq = tmp___0; vid = (unsigned long )((pxvidfreq & 2130706432U) >> 24); val = vid * vid; val = (freq / 1000UL) * val; val = val * 255UL; val = val / 14516100UL; if (val > 255UL) { drm_err("bad pxval: %ld\n", val); } else { } pxw[i] = (u8 )val; i = i + 1; ldv_51307: ; if (i <= 15) { goto ldv_51306; } else { } pxw[14] = 0U; pxw[15] = 0U; i = 0; goto ldv_51311; ldv_51310: val___0 = (u32 )(((((int )pxw[i * 4] << 24) | ((int )pxw[i * 4 + 1] << 16)) | ((int )pxw[i * 4 + 2] << 8)) | (int )pxw[i * 4 + 3]); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i + 17817) * 4), val___0, 1); i = i + 1; ldv_51311: ; if (i <= 3) { goto ldv_51310; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71176L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71180L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71184L, 32512U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71188L, 14U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71192L, 917504U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71196L, 1744831232U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71200L, 1107296256U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71204L, 1310769U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71208L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71212L, 0U, 1); i = 0; goto ldv_51314; ldv_51313: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i + 17824) * 4), 0U, 1); i = i + 1; ldv_51314: ; if (i <= 7) { goto ldv_51313; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 71168L, 2147483673U, 1); lcfuse = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 71360L, 1); dev_priv->ips.corr = (u8 )lcfuse; return; } } void intel_init_gt_powersave(struct drm_device *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { i915.enable_rc6 = sanitize_rc6_option((struct drm_device const *)dev, i915.enable_rc6); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { cherryview_init_gt_powersave(dev); } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { valleyview_init_gt_powersave(dev); } else { } } return; } } void intel_cleanup_gt_powersave(struct drm_device *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { return; } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { valleyview_cleanup_gt_powersave(dev); } else { } } return; } } static void gen6_suspend_rps(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ldv_flush_delayed_work_86(& dev_priv->rps.delayed_resume_work); gen6_disable_rps_interrupts(dev); return; } } void intel_suspend_gt_powersave(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return; } else { } gen6_suspend_rps(dev); gen6_rps_idle(dev_priv); return; } } void intel_disable_gt_powersave(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___4->info.device_id) == 70U) { ironlake_disable_drps(dev); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 5U) { intel_suspend_gt_powersave(dev); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 8U) { gen9_disable_rps(dev); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { cherryview_disable_rps(dev); } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { valleyview_disable_rps(dev); } else { gen6_disable_rps(dev); } } } dev_priv->rps.enabled = 0; mutex_unlock(& dev_priv->rps.hw_lock); } else { } } return; } } static void intel_gen6_powersave_work(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; struct drm_device *dev ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; long tmp___0 ; int __ret_warn_on___1 ; long tmp___1 ; int __ret_warn_on___2 ; long tmp___2 ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff4610UL; dev = dev_priv->dev; mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); gen6_reset_rps_interrupts(dev); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 8U) { cherryview_enable_rps(dev); } else { goto _L; } } else { _L: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { valleyview_enable_rps(dev); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 8U) { gen9_enable_rc6(dev); gen9_enable_rps(dev); __gen6_update_ring_freq(dev); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { gen8_enable_rps(dev); __gen6_update_ring_freq(dev); } else { gen6_enable_rps(dev); __gen6_update_ring_freq(dev); } } else { gen6_enable_rps(dev); __gen6_update_ring_freq(dev); } } } } __ret_warn_on = (int )dev_priv->rps.max_freq < (int )dev_priv->rps.min_freq; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 5814, "WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (int )dev_priv->rps.idle_freq > (int )dev_priv->rps.max_freq; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 5815, "WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); __ret_warn_on___1 = (int )dev_priv->rps.efficient_freq < (int )dev_priv->rps.min_freq; tmp___1 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 5817, "WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); __ret_warn_on___2 = (int )dev_priv->rps.efficient_freq > (int )dev_priv->rps.max_freq; tmp___2 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 5818, "WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq)"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); dev_priv->rps.enabled = 1; gen6_enable_rps_interrupts(dev); mutex_unlock(& dev_priv->rps.hw_lock); intel_runtime_pm_put(dev_priv); return; } } void intel_enable_gt_powersave(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; bool tmp ; unsigned long tmp___0 ; bool tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_vgpu_active(dev); if ((int )tmp) { return; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 70U) { mutex_lock_nested(& dev->struct_mutex, 0U); ironlake_enable_drps(dev); intel_init_emon(dev); mutex_unlock(& dev->struct_mutex); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { tmp___0 = round_jiffies_up_relative(250UL); tmp___1 = schedule_delayed_work(& dev_priv->rps.delayed_resume_work, tmp___0); if ((int )tmp___1) { intel_runtime_pm_get_noresume(dev_priv); } else { } } else { } } return; } } void intel_reset_gt_powersave(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return; } else { } gen6_suspend_rps(dev); dev_priv->rps.enabled = 0; return; } } static void ibx_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794656L, 536870912U, 1); return; } } static void g4x_disable_trickle_feed(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; enum pipe pipe ; uint32_t tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = 0; goto ldv_51505; ldv_51504: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U), tmp | 16384U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), tmp___0, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 0); pipe = (enum pipe )((int )pipe + 1); ldv_51505: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_51504; } else { } return; } } static void ilk_init_lp_watermarks(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282896L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282896L, tmp & 2147483647U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282892L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282892L, tmp___0 & 2147483647U, 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282888L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282888L, tmp___1 & 2147483647U, 1); return; } } extern void __compiletime_assert_5980(void) ; static void ironlake_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t dspclk_gate ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; struct drm_i915_private *__p ; uint32_t tmp___3 ; int _a ; bool __cond___3 ; bool __cond___4 ; bool __cond___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dspclk_gate = 268435456U; dspclk_gate = dspclk_gate | 896U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286752L, 262146U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286756L, 2048U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270340L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270340L, tmp | 6291456U, 1); dspclk_gate = dspclk_gate | 32U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282624L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282624L, tmp___0 | 32768U, 1); ilk_init_lp_watermarks(dev); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 70U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270336L, tmp___1 | 4194304U, 1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270340L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270340L, tmp___2 | 4194304U, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270368L, dspclk_gate, 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270340L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270340L, tmp___3 | 33554432U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8332L, 1073758208U, 1); _a = 256; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, (uint32_t )((_a << 16) | _a), 1); __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_5980(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_5980(); } else { } __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_5980(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, (uint32_t )65536, 1); g4x_disable_trickle_feed(dev); ibx_init_clock_gating(dev); return; } } static void cpt_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int pipe ; uint32_t val ; uint32_t tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794656L, 1610629120U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794628L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794628L, tmp | 1U, 1); pipe = 0; goto ldv_51563; ldv_51562: val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 983140), 1); val = val | 2147483648U; val = val & 3758096383U; if ((unsigned int )*((unsigned char *)dev_priv + 41280UL) != 0U) { val = val | 536870912U; } else { } val = val & 3892314111U; val = val & 4227858431U; val = val & 4261412863U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 983140), val, 1); pipe = pipe + 1; ldv_51563: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_51562; } else { } pipe = 0; goto ldv_51572; ldv_51571: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 983136), 16U, 1); pipe = pipe + 1; ldv_51572: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > pipe) { goto ldv_51571; } else { } return; } } static void gen6_check_mch_setup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1334544L, 1); if ((tmp & 63U) != 12U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("gen6_check_mch_setup", "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", tmp); } else { } } else { } return; } } extern void __compiletime_assert_6051(void) ; extern void __compiletime_assert_6062(void) ; extern void __compiletime_assert_6067(void) ; static void gen6_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t dspclk_gate ; uint32_t tmp ; int _a ; bool __cond___2 ; bool __cond___3 ; bool __cond___4 ; bool __cond___5 ; bool __cond___6 ; bool __cond___7 ; bool __cond___8 ; bool __cond___9 ; bool __cond___10 ; uint32_t tmp___0 ; int _a___0 ; int _a___1 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dspclk_gate = 268435456U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270368L, dspclk_gate, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270340L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270340L, tmp | 33554432U, 1); _a = 1024; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8324L, (uint32_t )((_a << 16) | _a), 1); __cond___2 = 0; if ((int )__cond___2) { __compiletime_assert_6051(); } else { } __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_6051(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_6051(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, (uint32_t )65536, 1); __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_6062(); } else { } __cond___6 = 0; if ((int )__cond___6) { __compiletime_assert_6062(); } else { } __cond___7 = 0; if ((int )__cond___7) { __compiletime_assert_6062(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8400L, (uint32_t )41943552, 1); ilk_init_lp_watermarks(dev); __cond___8 = 0; if ((int )__cond___8) { __compiletime_assert_6067(); } else { } __cond___9 = 0; if ((int )__cond___9) { __compiletime_assert_6067(); } else { } __cond___10 = 0; if ((int )__cond___10) { __compiletime_assert_6067(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, (uint32_t )2097152, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37888L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37888L, tmp___0 | 160U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37892L, 6144U, 1); _a___0 = 32; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8336L, (uint32_t )((_a___0 << 16) | _a___0), 1); _a___1 = 2; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8336L, (uint32_t )((_a___1 << 16) | _a___1), 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270336L, tmp___1 | 6291456U, 1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270340L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270340L, tmp___2 | 6291456U, 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270368L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270368L, tmp___3 | 160U, 1); g4x_disable_trickle_feed(dev); cpt_init_clock_gating(dev); gen6_check_mch_setup(dev); return; } } static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv ) { uint32_t reg ; uint32_t tmp ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 1); reg = tmp; reg = reg & 4294479759U; reg = reg; reg = reg; reg = reg; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, reg, 1); return; } } static void lpt_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned int )dev_priv->pch_id == 39936U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794656L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794656L, tmp | 4096U, 1); } else { } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983136L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983136L, tmp___0 | 16U, 1); return; } } static void lpt_suspend_hw(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t val ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned int )dev_priv->pch_id == 39936U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794656L, 1); val = tmp; val = val & 4294963199U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794656L, val, 1); } else { } return; } } static void broadwell_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; enum pipe pipe ; uint32_t misccpctl ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; struct drm_i915_private *__p ; uint32_t tmp___2 ; int _a ; uint32_t tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ilk_init_lp_watermarks(dev); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16528L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16528L, tmp | 64U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270464L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270464L, tmp___0 | 32768U, 1); pipe = 0; goto ldv_51695; ldv_51694: tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe + 67628) * 4), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )pipe + 67628) * 4), tmp___1 | 1U, 1); pipe = (enum pipe )((int )pipe + 1); ldv_51695: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_51694; } else { } tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, tmp___2 & 4294410239U, 1); _a = 4096; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8272L, (uint32_t )((_a << 16) | _a), 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37936L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37936L, tmp___3 | 16384U, 1); misccpctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37924L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37924L, misccpctl & 4294967294U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 45312L, 7880704U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37924L, misccpctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16420L, 4026564607U, 1); lpt_init_clock_gating(dev); return; } } extern void __compiletime_assert_6256(void) ; extern void __compiletime_assert_6260(void) ; extern void __compiletime_assert_6275(void) ; static void haswell_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int _a ; uint32_t tmp ; uint32_t tmp___0 ; bool __cond___2 ; bool __cond___3 ; bool __cond___4 ; bool __cond___5 ; bool __cond___6 ; bool __cond___7 ; int _a___0 ; bool __cond___11 ; bool __cond___12 ; bool __cond___13 ; int _a___1 ; uint32_t tmp___1 ; uint32_t tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ilk_init_lp_watermarks(dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 45112L, 134217728U, 1); _a = 64; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 58524L, (uint32_t )((_a << 16) | _a), 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36912L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 36912L, tmp | 2048U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, tmp___0 & 4294934527U, 1); __cond___2 = 0; if ((int )__cond___2) { __compiletime_assert_6256(); } else { } __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_6256(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_6256(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28672L, (uint32_t )65536, 1); __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_6260(); } else { } __cond___6 = 0; if ((int )__cond___6) { __compiletime_assert_6260(); } else { } __cond___7 = 0; if ((int )__cond___7) { __compiletime_assert_6260(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28672L, (uint32_t )262144, 1); _a___0 = 64; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28676L, (uint32_t )((_a___0 << 16) | _a___0), 1); __cond___11 = 0; if ((int )__cond___11) { __compiletime_assert_6275(); } else { } __cond___12 = 0; if ((int )__cond___12) { __compiletime_assert_6275(); } else { } __cond___13 = 0; if ((int )__cond___13) { __compiletime_assert_6275(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28680L, (uint32_t )41943552, 1); _a___1 = 512; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 57732L, (uint32_t )((_a___1 << 16) | _a___1), 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16528L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16528L, tmp___1 | 64U, 1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270464L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270464L, tmp___2 | 16384U, 1); lpt_init_clock_gating(dev); return; } } extern void __compiletime_assert_6315(void) ; extern void __compiletime_assert_6376(void) ; static void ivybridge_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t snpcr ; int _a ; int _a___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; bool __cond___8 ; bool __cond___9 ; bool __cond___10 ; int _a___1 ; int _a___2 ; int _a___3 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; uint32_t tmp ; uint32_t tmp___0 ; int _a___4 ; bool __cond___29 ; bool __cond___30 ; bool __cond___31 ; struct drm_i915_private *__p___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ilk_init_lp_watermarks(dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270368L, 268435456U, 1); _a = 1024; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8336L, (uint32_t )((_a << 16) | _a), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270348L, 36U, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 342U) { _a___0 = 8; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 57600L, (uint32_t )((_a___0 << 16) | _a___0), 1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 338U) { _a___0 = 8; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 57600L, (uint32_t )((_a___0 << 16) | _a___0), 1); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) == 346U) { _a___0 = 8; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 57600L, (uint32_t )((_a___0 << 16) | _a___0), 1); } else { } } } __cond___8 = 0; if ((int )__cond___8) { __compiletime_assert_6315(); } else { } __cond___9 = 0; if ((int )__cond___9) { __compiletime_assert_6315(); } else { } __cond___10 = 0; if ((int )__cond___10) { __compiletime_assert_6315(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28672L, (uint32_t )65536, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28688L, 67109888U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 45084L, 1011351436U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 45104L, 536870912U, 1); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) == 342U) { _a___1 = 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 58612L, (uint32_t )((_a___1 << 16) | _a___1), 1); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___3->info.device_id) == 338U) { _a___1 = 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 58612L, (uint32_t )((_a___1 << 16) | _a___1), 1); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___4->info.device_id) == 346U) { _a___1 = 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 58612L, (uint32_t )((_a___1 << 16) | _a___1), 1); } else { _a___2 = 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 58612L, (uint32_t )((_a___2 << 16) | _a___2), 1); _a___3 = 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 62708L, (uint32_t )((_a___3 << 16) | _a___3), 1); } } } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 45108L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 45108L, tmp & 4160749567U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37892L, 8192U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36912L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 36912L, tmp___0 | 2048U, 1); g4x_disable_trickle_feed(dev); gen7_setup_fixed_func_scheduler(dev_priv); _a___4 = 64; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28676L, (uint32_t )((_a___4 << 16) | _a___4), 1); __cond___29 = 0; if ((int )__cond___29) { __compiletime_assert_6376(); } else { } __cond___30 = 0; if ((int )__cond___30) { __compiletime_assert_6376(); } else { } __cond___31 = 0; if ((int )__cond___31) { __compiletime_assert_6376(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28680L, (uint32_t )41943552, 1); snpcr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36876L, 1); snpcr = snpcr & 4288675839U; snpcr = snpcr | 2097152U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 36876L, snpcr, 1); __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___5->pch_type != 5U) { cpt_init_clock_gating(dev); } else { } gen6_check_mch_setup(dev); return; } } static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv ) { { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), 268435456U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1598724L, 4U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2032640L, 0U, 1); return; } } extern void __compiletime_assert_6422(void) ; extern void __compiletime_assert_6468(void) ; static void valleyview_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int _a ; int _a___0 ; bool __cond___5 ; bool __cond___6 ; bool __cond___7 ; uint32_t tmp ; int _a___1 ; uint32_t tmp___0 ; uint32_t tmp___1 ; int _a___2 ; bool __cond___14 ; bool __cond___15 ; bool __cond___16 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; vlv_init_display_clock_gating(dev_priv); _a = 1024; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8336L, (uint32_t )((_a << 16) | _a), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270348L, 36U, 1); _a___0 = 32776; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 57600L, (uint32_t )((_a___0 << 16) | _a___0), 1); __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_6422(); } else { } __cond___6 = 0; if ((int )__cond___6) { __compiletime_assert_6422(); } else { } __cond___7 = 0; if ((int )__cond___7) { __compiletime_assert_6422(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28672L, (uint32_t )65536, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 45108L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 45108L, tmp & 4160749567U, 1); _a___1 = 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 58612L, (uint32_t )((_a___1 << 16) | _a___1), 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36912L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 36912L, tmp___0 | 2048U, 1); gen7_setup_fixed_func_scheduler(dev_priv); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37892L, 8192U, 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37900L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37900L, tmp___1 | 33554432U, 1); _a___2 = 64; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28676L, (uint32_t )((_a___2 << 16) | _a___2), 1); __cond___14 = 0; if ((int )__cond___14) { __compiletime_assert_6468(); } else { } __cond___15 = 0; if ((int )__cond___15) { __compiletime_assert_6468(); } else { } __cond___16 = 0; if ((int )__cond___16) { __compiletime_assert_6468(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 28680L, (uint32_t )41943552, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 45072L, 13828096U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581152L, 256U, 1); return; } } static void cherryview_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; int _a ; uint32_t tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; vlv_init_display_clock_gating(dev_priv); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, tmp & 4294410239U, 1); _a = 4096; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8272L, (uint32_t )((_a << 16) | _a), 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37888L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37888L, tmp___0 | 128U, 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37936L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37936L, tmp___1 | 16384U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16420L, 4026564607U, 1); return; } } extern void __compiletime_assert_6537(void) ; static void g4x_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t dspclk_gate ; struct drm_i915_private *__p ; int _a ; bool __cond___3 ; bool __cond___4 ; bool __cond___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25092L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25096L, 704U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25104L, 0U, 1); dspclk_gate = 268435468U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 10818U) { dspclk_gate = dspclk_gate | 262144U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), dspclk_gate, 1); _a = 256; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, (uint32_t )((_a << 16) | _a), 1); __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_6537(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_6537(); } else { } __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_6537(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, (uint32_t )65536, 1); g4x_disable_trickle_feed(dev); return; } } extern void __compiletime_assert_6555(void) ; static void crestline_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int _a ; bool __cond___2 ; bool __cond___3 ; bool __cond___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25092L, 536870912U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25096L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25104L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 25108L, 0, 1); _a = 4; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8420L, (uint32_t )((_a << 16) | _a), 1); __cond___2 = 0; if ((int )__cond___2) { __compiletime_assert_6555(); } else { } __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_6555(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_6555(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, (uint32_t )65536, 1); return; } } extern void __compiletime_assert_6572(void) ; static void broadwater_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int _a ; bool __cond___2 ; bool __cond___3 ; bool __cond___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25092L, 1887502336U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25096L, 0U, 1); _a = 4; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8420L, (uint32_t )((_a << 16) | _a), 1); __cond___2 = 0; if ((int )__cond___2) { __compiletime_assert_6572(); } else { } __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_6572(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_6572(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, (uint32_t )65536, 1); return; } } extern void __compiletime_assert_6588(void) ; static void gen3_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 dstate ; uint32_t tmp ; int _a ; struct drm_i915_private *__p ; bool __cond___3 ; bool __cond___4 ; bool __cond___5 ; int _a___0 ; int _a___1 ; int _a___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 24836L, 1); dstate = tmp; dstate = dstate | 11U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 24836L, dstate, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { _a = 8; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8656L, (uint32_t )((_a << 16) | _a), 1); } else { } __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_6588(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_6588(); } else { } __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_6588(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8656L, (uint32_t )65536, 1); _a___0 = 2048; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8384L, (uint32_t )((_a___0 << 16) | _a___0), 1); _a___1 = 2048; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8420L, (uint32_t )((_a___1 << 16) | _a___1), 1); _a___2 = 4; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8420L, (uint32_t )((_a___2 << 16) | _a___2), 1); return; } } extern void __compiletime_assert_6608(void) ; static void i85x_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int _a ; bool __cond___2 ; bool __cond___3 ; bool __cond___4 ; int _a___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25092L, 1U, 1); _a = 2; __cond___2 = 0; if ((int )__cond___2) { __compiletime_assert_6608(); } else { } __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_6608(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_6608(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8420L, (uint32_t )(((_a << 16) | _a) | 65536), 1); _a___0 = 4; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8396L, (uint32_t )((_a___0 << 16) | _a___0), 1); return; } } static void i830_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int _a ; int _a___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), 8U, 1); _a = 4; _a___0 = 8; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8396L, (uint32_t )(((_a << 16) | _a) | ((_a___0 << 16) | _a___0)), 1); return; } } void intel_init_clock_gating(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->display.init_clock_gating != (unsigned long )((void (*)(struct drm_device * ))0)) { (*(dev_priv->display.init_clock_gating))(dev); } else { } return; } } void intel_suspend_hw(struct drm_device *dev ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 3U) { lpt_suspend_hw(dev); } else { } return; } } void intel_init_pm(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; long tmp ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; struct cxsr_latency const *tmp___0 ; struct drm_i915_private *__p___12 ; struct drm_i915_private *__p___13 ; struct drm_i915_private *__p___14 ; struct drm_i915_private *__p___15 ; struct drm_i915_private *__p___16 ; struct drm_i915_private *__p___17 ; struct drm_i915_private *__p___18 ; struct drm_i915_private *__p___19 ; struct drm_i915_private *__p___20 ; struct drm_i915_private *__p___21 ; struct drm_i915_private *__p___22 ; struct drm_i915_private *__p___23 ; struct drm_i915_private *__p___24 ; struct drm_i915_private *__p___25 ; struct drm_i915_private *__p___26 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; intel_fbc_init(dev_priv); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { i915_pineview_get_mem_freq(dev); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 5U) { i915_ironlake_get_mem_freq(dev); } else { } } __p___26 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___26->info.gen) > 8U) { skl_setup_wm_latency(dev); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { dev_priv->display.init_clock_gating = & bxt_init_clock_gating; } else { goto _L; } } else { _L: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { dev_priv->display.init_clock_gating = & skl_init_clock_gating; } else { } } dev_priv->display.update_wm = & skl_update_wm; dev_priv->display.update_sprite_wm = & skl_update_sprite_wm; } else { __p___25 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___25->pch_type != 0U) { ilk_setup_wm_latency(dev); __p___4 = to_i915((struct drm_device const *)dev); if ((((unsigned int )((unsigned char )__p___4->info.gen) == 5U && (unsigned int )dev_priv->wm.pri_latency[1] != 0U) && (unsigned int )dev_priv->wm.spr_latency[1] != 0U) && (unsigned int )dev_priv->wm.cur_latency[1] != 0U) { dev_priv->display.update_wm = & ilk_update_wm; dev_priv->display.update_sprite_wm = & ilk_update_sprite_wm; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((((unsigned int )((unsigned char )__p___5->info.gen) != 5U && (unsigned int )dev_priv->wm.pri_latency[0] != 0U) && (unsigned int )dev_priv->wm.spr_latency[0] != 0U) && (unsigned int )dev_priv->wm.cur_latency[0] != 0U) { dev_priv->display.update_wm = & ilk_update_wm; dev_priv->display.update_sprite_wm = & ilk_update_sprite_wm; } else { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_init_pm", "Failed to read display plane latency. Disable CxSR\n"); } else { } } } __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___10->info.gen) == 5U) { dev_priv->display.init_clock_gating = & ironlake_init_clock_gating; } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___9->info.gen) == 6U) { dev_priv->display.init_clock_gating = & gen6_init_clock_gating; } else { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { dev_priv->display.init_clock_gating = & ivybridge_init_clock_gating; } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { dev_priv->display.init_clock_gating = & haswell_init_clock_gating; } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 8U) { dev_priv->display.init_clock_gating = & broadwell_init_clock_gating; } else { } } } } } } else { __p___23 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___23 + 45UL) != 0U) { __p___24 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___24->info.gen) == 8U) { dev_priv->display.update_wm = & valleyview_update_wm; dev_priv->display.update_sprite_wm = & valleyview_update_sprite_wm; dev_priv->display.init_clock_gating = & cherryview_init_clock_gating; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___22 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___22 + 45UL) != 0U) { dev_priv->display.update_wm = & valleyview_update_wm; dev_priv->display.update_sprite_wm = & valleyview_update_sprite_wm; dev_priv->display.init_clock_gating = & valleyview_init_clock_gating; } else { __p___21 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___21 + 44UL) != 0U) { __p___11 = to_i915((struct drm_device const *)dev); tmp___0 = intel_get_cxsr_latency((unsigned int )((unsigned short )__p___11->info.device_id) == 40961U, (int )dev_priv->is_ddr3, (int )dev_priv->fsb_freq, (int )dev_priv->mem_freq); if ((unsigned long )tmp___0 == (unsigned long )((struct cxsr_latency const *)0)) { printk("\016[drm] failed to find known CxSR latency (found ddr%s fsb freq %d, mem freq %d), disabling CxSR\n", dev_priv->is_ddr3 == 1U ? (char *)"3" : (char *)"2", dev_priv->fsb_freq, dev_priv->mem_freq); intel_set_memory_cxsr(dev_priv, 0); dev_priv->display.update_wm = (void (*)(struct drm_crtc * ))0; } else { dev_priv->display.update_wm = & pineview_update_wm; } dev_priv->display.init_clock_gating = & gen3_init_clock_gating; } else { __p___20 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___20 + 44UL) != 0U) { dev_priv->display.update_wm = & g4x_update_wm; dev_priv->display.init_clock_gating = & g4x_init_clock_gating; } else { __p___19 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___19->info.gen) == 4U) { dev_priv->display.update_wm = & i965_update_wm; __p___13 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___13 + 45UL) != 0U) { dev_priv->display.init_clock_gating = & crestline_init_clock_gating; } else { __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___12 + 45UL) != 0U) { dev_priv->display.init_clock_gating = & broadwater_init_clock_gating; } else { } } } else { __p___18 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___18->info.gen) == 3U) { dev_priv->display.update_wm = & i9xx_update_wm; dev_priv->display.get_fifo_size = & i9xx_get_fifo_size; dev_priv->display.init_clock_gating = & gen3_init_clock_gating; } else { __p___17 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___17->info.gen) == 2U) { __p___14 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___14 + 38UL) == 1U) { dev_priv->display.update_wm = & i845_update_wm; dev_priv->display.get_fifo_size = & i845_get_fifo_size; } else { dev_priv->display.update_wm = & i9xx_update_wm; dev_priv->display.get_fifo_size = & i830_get_fifo_size; } __p___15 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___15 + 44UL) != 0U) { dev_priv->display.init_clock_gating = & i85x_init_clock_gating; } else { __p___16 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___16->info.device_id) == 9586U) { dev_priv->display.init_clock_gating = & i85x_init_clock_gating; } else { dev_priv->display.init_clock_gating = & i830_init_clock_gating; } } } else { drm_err("unexpected fall-through in intel_init_pm\n"); } } } } } } } } } return; } } int sandybridge_pcode_read(struct drm_i915_private *dev_priv , u32 mbox , u32 *val ) { int __ret_warn_on ; int tmp ; long tmp___0 ; long tmp___1 ; uint32_t tmp___2 ; unsigned long timeout__ ; unsigned long tmp___3 ; int ret__ ; uint32_t tmp___4 ; bool tmp___5 ; uint32_t tmp___6 ; { tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 6747, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278244L, 1); if ((int )tmp___2 < 0) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("sandybridge_pcode_read", "warning: pcode (read) mailbox access failed\n"); } else { } return (-11); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278248L, *val, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278252L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278244L, mbox | 2147483648U, 1); tmp___3 = msecs_to_jiffies(500U); timeout__ = (tmp___3 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52560; ldv_52559: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278244L, 1); if ((int )tmp___4 < 0) { ret__ = -110; } else { } goto ldv_52558; } else { } tmp___5 = drm_can_sleep___0(); if ((int )tmp___5) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52560: tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278244L, 1); if ((int )tmp___6 < 0) { goto ldv_52559; } else { } ldv_52558: ; if (ret__ != 0) { drm_err("timeout waiting for pcode read (%d) to finish\n", mbox); return (-110); } else { } *val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278248L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278248L, 0U, 1); return (0); } } int sandybridge_pcode_write(struct drm_i915_private *dev_priv , u32 mbox , u32 val ) { int __ret_warn_on ; int tmp ; long tmp___0 ; long tmp___1 ; uint32_t tmp___2 ; unsigned long timeout__ ; unsigned long tmp___3 ; int ret__ ; uint32_t tmp___4 ; bool tmp___5 ; uint32_t tmp___6 ; { tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_pm.c", 6772, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278244L, 1); if ((int )tmp___2 < 0) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("sandybridge_pcode_write", "warning: pcode (write) mailbox access failed\n"); } else { } return (-11); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278248L, val, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278244L, mbox | 2147483648U, 1); tmp___3 = msecs_to_jiffies(500U); timeout__ = (tmp___3 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52580; ldv_52579: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278244L, 1); if ((int )tmp___4 < 0) { ret__ = -110; } else { } goto ldv_52578; } else { } tmp___5 = drm_can_sleep___0(); if ((int )tmp___5) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52580: tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278244L, 1); if ((int )tmp___6 < 0) { goto ldv_52579; } else { } ldv_52578: ; if (ret__ != 0) { drm_err("timeout waiting for pcode write (%d) to finish\n", mbox); return (-110); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278248L, 0U, 1); return (0); } } static int vlv_gpu_freq_div(unsigned int czclk_freq ) { { switch (czclk_freq) { case 200U: ; return (10); case 267U: ; return (12); case 320U: ; case 333U: ; return (16); case 400U: ; return (20); default: ; return (-1); } } } static int byt_gpu_freq(struct drm_i915_private *dev_priv , int val ) { int div ; int czclk_freq ; unsigned int __x ; int __d ; int __x___0 ; int __d___0 ; { __x = dev_priv->mem_freq; __d = 4; czclk_freq = (int )(((unsigned int )(__d / 2) + __x) / (unsigned int )__d); div = vlv_gpu_freq_div((unsigned int )czclk_freq); if (div < 0) { return (div); } else { } __x___0 = (val + -183) * czclk_freq; __d___0 = div; return (__x___0 > 0 ? (__d___0 / 2 + __x___0) / __d___0 : (__x___0 - __d___0 / 2) / __d___0); } } static int byt_freq_opcode(struct drm_i915_private *dev_priv , int val ) { int mul ; int czclk_freq ; unsigned int __x ; int __d ; int __x___0 ; int __d___0 ; { __x = dev_priv->mem_freq; __d = 4; czclk_freq = (int )(((unsigned int )(__d / 2) + __x) / (unsigned int )__d); mul = vlv_gpu_freq_div((unsigned int )czclk_freq); if (mul < 0) { return (mul); } else { } __x___0 = mul * val; __d___0 = czclk_freq; return ((__x___0 > 0 ? (__d___0 / 2 + __x___0) / __d___0 : (__x___0 - __d___0 / 2) / __d___0) + 183); } } static int chv_gpu_freq(struct drm_i915_private *dev_priv , int val ) { int div ; int czclk_freq ; int tmp ; int __x ; int __d ; { czclk_freq = (int )dev_priv->rps.cz_freq; tmp = vlv_gpu_freq_div((unsigned int )czclk_freq); div = tmp / 2; if (div < 0) { return (div); } else { } __x = czclk_freq * val; __d = div * 2; return ((__x > 0 ? (__d / 2 + __x) / __d : (__x - __d / 2) / __d) / 2); } } static int chv_freq_opcode(struct drm_i915_private *dev_priv , int val ) { int mul ; int czclk_freq ; int tmp ; int __x ; int __d ; { czclk_freq = (int )dev_priv->rps.cz_freq; tmp = vlv_gpu_freq_div((unsigned int )czclk_freq); mul = tmp / 2; if (mul < 0) { return (mul); } else { } __x = (val * 2) * mul; __d = czclk_freq; return ((__x > 0 ? (__d / 2 + __x) / __d : (__x - __d / 2) / __d) * 2); } } int intel_gpu_freq(struct drm_i915_private *dev_priv , int val ) { int tmp ; int tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { __p___2 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { return ((val * 50) / 3); } else { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { tmp = chv_gpu_freq(dev_priv, val); return (tmp); } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { tmp___0 = byt_gpu_freq(dev_priv, val); return (tmp___0); } else { return (val * 50); } } } } } int intel_freq_opcode(struct drm_i915_private *dev_priv , int val ) { int tmp ; int tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { __p___2 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { return ((val * 3) / 50); } else { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { tmp = chv_freq_opcode(dev_priv, val); return (tmp); } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { tmp___0 = byt_freq_opcode(dev_priv, val); return (tmp___0); } else { return (val / 50); } } } } } static void __intel_rps_boost_work(struct work_struct *work ) { struct request_boost *boost ; struct work_struct const *__mptr ; struct drm_i915_gem_request *req ; struct drm_i915_private *tmp ; bool tmp___0 ; int tmp___1 ; { __mptr = (struct work_struct const *)work; boost = (struct request_boost *)__mptr; req = boost->req; tmp___0 = i915_gem_request_completed(req, 1); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { tmp = to_i915((struct drm_device const *)(req->ring)->dev); gen6_rps_boost(tmp, (struct intel_rps_client *)0, req->emitted_jiffies); } else { } i915_gem_request_unreference__unlocked(req); kfree((void const *)boost); return; } } void intel_queue_rps_boost_for_request(struct drm_device *dev , struct drm_i915_gem_request *req ) { struct request_boost *boost ; struct drm_i915_private *__p ; bool tmp ; void *tmp___0 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; struct drm_i915_private *tmp___1 ; { if ((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0)) { return; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return; } else { } } tmp = i915_gem_request_completed(req, 1); if ((int )tmp) { return; } else { } tmp___0 = kmalloc(88UL, 32U); boost = (struct request_boost *)tmp___0; if ((unsigned long )boost == (unsigned long )((struct request_boost *)0)) { return; } else { } i915_gem_request_reference(req); boost->req = req; __init_work(& boost->work, 0); __constr_expr_0___0.counter = 137438953408L; boost->work.data = __constr_expr_0___0; lockdep_init_map(& boost->work.lockdep_map, "(&boost->work)", & __key, 0); INIT_LIST_HEAD(& boost->work.entry); boost->work.func = & __intel_rps_boost_work; tmp___1 = to_i915((struct drm_device const *)dev); queue_work(tmp___1->wq, & boost->work); return; } } void intel_pm_setup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; atomic_long_t __constr_expr_0___0 ; struct lock_class_key __key___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mutex_init(& dev_priv->rps.hw_lock, "&dev_priv->rps.hw_lock", & __key); spinlock_check(& dev_priv->rps.client_lock); __raw_spin_lock_init(& dev_priv->rps.client_lock.__annonCompField18.rlock, "&(&dev_priv->rps.client_lock)->rlock", & __key___0); __init_work(& dev_priv->rps.delayed_resume_work.work, 0); __constr_expr_0___0.counter = 137438953408L; dev_priv->rps.delayed_resume_work.work.data = __constr_expr_0___0; lockdep_init_map(& dev_priv->rps.delayed_resume_work.work.lockdep_map, "(&(&dev_priv->rps.delayed_resume_work)->work)", & __key___1, 0); INIT_LIST_HEAD(& dev_priv->rps.delayed_resume_work.work.entry); dev_priv->rps.delayed_resume_work.work.func = & intel_gen6_powersave_work; init_timer_key(& dev_priv->rps.delayed_resume_work.timer, 2097152U, "(&(&dev_priv->rps.delayed_resume_work)->timer)", & __key___2); dev_priv->rps.delayed_resume_work.timer.function = & delayed_work_timer_fn; dev_priv->rps.delayed_resume_work.timer.data = (unsigned long )(& dev_priv->rps.delayed_resume_work); INIT_LIST_HEAD(& dev_priv->rps.clients); INIT_LIST_HEAD(& dev_priv->rps.semaphores.link); INIT_LIST_HEAD(& dev_priv->rps.mmioflips.link); dev_priv->pm.suspended = 0; return; } } void work_init_1(void) { { ldv_work_1_0 = 0; ldv_work_1_1 = 0; ldv_work_1_2 = 0; ldv_work_1_3 = 0; return; } } void invoke_work_1(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_1_0 == 2 || ldv_work_1_0 == 3) { ldv_work_1_0 = 4; __intel_rps_boost_work(ldv_work_struct_1_0); ldv_work_1_0 = 1; } else { } goto ldv_52730; case 1: ; if (ldv_work_1_1 == 2 || ldv_work_1_1 == 3) { ldv_work_1_1 = 4; __intel_rps_boost_work(ldv_work_struct_1_0); ldv_work_1_1 = 1; } else { } goto ldv_52730; case 2: ; if (ldv_work_1_2 == 2 || ldv_work_1_2 == 3) { ldv_work_1_2 = 4; __intel_rps_boost_work(ldv_work_struct_1_0); ldv_work_1_2 = 1; } else { } goto ldv_52730; case 3: ; if (ldv_work_1_3 == 2 || ldv_work_1_3 == 3) { ldv_work_1_3 = 4; __intel_rps_boost_work(ldv_work_struct_1_0); ldv_work_1_3 = 1; } else { } goto ldv_52730; default: ldv_stop(); } ldv_52730: ; return; } } void call_and_disable_work_1(struct work_struct *work ) { { if ((ldv_work_1_0 == 2 || ldv_work_1_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_0) { __intel_rps_boost_work(work); ldv_work_1_0 = 1; return; } else { } if ((ldv_work_1_1 == 2 || ldv_work_1_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_1) { __intel_rps_boost_work(work); ldv_work_1_1 = 1; return; } else { } if ((ldv_work_1_2 == 2 || ldv_work_1_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_2) { __intel_rps_boost_work(work); ldv_work_1_2 = 1; return; } else { } if ((ldv_work_1_3 == 2 || ldv_work_1_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_3) { __intel_rps_boost_work(work); ldv_work_1_3 = 1; return; } else { } return; } } void disable_work_1(struct work_struct *work ) { { if ((ldv_work_1_0 == 3 || ldv_work_1_0 == 2) && (unsigned long )ldv_work_struct_1_0 == (unsigned long )work) { ldv_work_1_0 = 1; } else { } if ((ldv_work_1_1 == 3 || ldv_work_1_1 == 2) && (unsigned long )ldv_work_struct_1_1 == (unsigned long )work) { ldv_work_1_1 = 1; } else { } if ((ldv_work_1_2 == 3 || ldv_work_1_2 == 2) && (unsigned long )ldv_work_struct_1_2 == (unsigned long )work) { ldv_work_1_2 = 1; } else { } if ((ldv_work_1_3 == 3 || ldv_work_1_3 == 2) && (unsigned long )ldv_work_struct_1_3 == (unsigned long )work) { ldv_work_1_3 = 1; } else { } return; } } void disable_work_2(struct work_struct *work ) { { if ((ldv_work_2_0 == 3 || ldv_work_2_0 == 2) && (unsigned long )ldv_work_struct_2_0 == (unsigned long )work) { ldv_work_2_0 = 1; } else { } if ((ldv_work_2_1 == 3 || ldv_work_2_1 == 2) && (unsigned long )ldv_work_struct_2_1 == (unsigned long )work) { ldv_work_2_1 = 1; } else { } if ((ldv_work_2_2 == 3 || ldv_work_2_2 == 2) && (unsigned long )ldv_work_struct_2_2 == (unsigned long )work) { ldv_work_2_2 = 1; } else { } if ((ldv_work_2_3 == 3 || ldv_work_2_3 == 2) && (unsigned long )ldv_work_struct_2_3 == (unsigned long )work) { ldv_work_2_3 = 1; } else { } return; } } void call_and_disable_work_2(struct work_struct *work ) { { if ((ldv_work_2_0 == 2 || ldv_work_2_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_0) { intel_gen6_powersave_work(work); ldv_work_2_0 = 1; return; } else { } if ((ldv_work_2_1 == 2 || ldv_work_2_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_1) { intel_gen6_powersave_work(work); ldv_work_2_1 = 1; return; } else { } if ((ldv_work_2_2 == 2 || ldv_work_2_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_2) { intel_gen6_powersave_work(work); ldv_work_2_2 = 1; return; } else { } if ((ldv_work_2_3 == 2 || ldv_work_2_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_3) { intel_gen6_powersave_work(work); ldv_work_2_3 = 1; return; } else { } return; } } void activate_work_1(struct work_struct *work , int state ) { { if (ldv_work_1_0 == 0) { ldv_work_struct_1_0 = work; ldv_work_1_0 = state; return; } else { } if (ldv_work_1_1 == 0) { ldv_work_struct_1_1 = work; ldv_work_1_1 = state; return; } else { } if (ldv_work_1_2 == 0) { ldv_work_struct_1_2 = work; ldv_work_1_2 = state; return; } else { } if (ldv_work_1_3 == 0) { ldv_work_struct_1_3 = work; ldv_work_1_3 = state; return; } else { } return; } } void activate_work_2(struct work_struct *work , int state ) { { if (ldv_work_2_0 == 0) { ldv_work_struct_2_0 = work; ldv_work_2_0 = state; return; } else { } if (ldv_work_2_1 == 0) { ldv_work_struct_2_1 = work; ldv_work_2_1 = state; return; } else { } if (ldv_work_2_2 == 0) { ldv_work_struct_2_2 = work; ldv_work_2_2 = state; return; } else { } if (ldv_work_2_3 == 0) { ldv_work_struct_2_3 = work; ldv_work_2_3 = state; return; } else { } return; } } void call_and_disable_all_2(int state ) { { if (ldv_work_2_0 == state) { call_and_disable_work_2(ldv_work_struct_2_0); } else { } if (ldv_work_2_1 == state) { call_and_disable_work_2(ldv_work_struct_2_1); } else { } if (ldv_work_2_2 == state) { call_and_disable_work_2(ldv_work_struct_2_2); } else { } if (ldv_work_2_3 == state) { call_and_disable_work_2(ldv_work_struct_2_3); } else { } return; } } void invoke_work_2(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_2_0 == 2 || ldv_work_2_0 == 3) { ldv_work_2_0 = 4; intel_gen6_powersave_work(ldv_work_struct_2_0); ldv_work_2_0 = 1; } else { } goto ldv_52768; case 1: ; if (ldv_work_2_1 == 2 || ldv_work_2_1 == 3) { ldv_work_2_1 = 4; intel_gen6_powersave_work(ldv_work_struct_2_0); ldv_work_2_1 = 1; } else { } goto ldv_52768; case 2: ; if (ldv_work_2_2 == 2 || ldv_work_2_2 == 3) { ldv_work_2_2 = 4; intel_gen6_powersave_work(ldv_work_struct_2_0); ldv_work_2_2 = 1; } else { } goto ldv_52768; case 3: ; if (ldv_work_2_3 == 2 || ldv_work_2_3 == 3) { ldv_work_2_3 = 4; intel_gen6_powersave_work(ldv_work_struct_2_0); ldv_work_2_3 = 1; } else { } goto ldv_52768; default: ldv_stop(); } ldv_52768: ; return; } } void work_init_2(void) { { ldv_work_2_0 = 0; ldv_work_2_1 = 0; ldv_work_2_2 = 0; ldv_work_2_3 = 0; return; } } void call_and_disable_all_1(int state ) { { if (ldv_work_1_0 == state) { call_and_disable_work_1(ldv_work_struct_1_0); } else { } if (ldv_work_1_1 == state) { call_and_disable_work_1(ldv_work_struct_1_1); } else { } if (ldv_work_1_2 == state) { call_and_disable_work_1(ldv_work_struct_1_2); } else { } if (ldv_work_1_3 == state) { call_and_disable_work_1(ldv_work_struct_1_3); } else { } return; } } bool ldv_queue_work_on_81(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_82(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_83(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_84(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_85(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_flush_delayed_work_86(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___1(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void atomic_inc(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incl %0": "+m" (v->counter)); return; } } bool ldv_queue_work_on_97(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_99(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_98(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_101(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_100(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void outb(unsigned char value , int port ) { { __asm__ volatile ("outb %b0, %w1": : "a" (value), "Nd" (port)); return; } } __inline static unsigned char inb(int port ) { unsigned char value ; { __asm__ volatile ("inb %w1, %b0": "=a" (value): "Nd" (port)); return (value); } } extern int __pm_runtime_suspend(struct device * , int ) ; extern int __pm_runtime_resume(struct device * , int ) ; extern int __pm_runtime_set_status(struct device * , unsigned int ) ; extern void __pm_runtime_disable(struct device * , bool ) ; extern void __pm_runtime_use_autosuspend(struct device * , bool ) ; extern void pm_runtime_set_autosuspend_delay(struct device * , int ) ; __inline static void pm_runtime_get_noresume(struct device *dev ) { { atomic_inc(& dev->power.usage_count); return; } } __inline static int pm_runtime_get_sync(struct device *dev ) { int tmp ; { tmp = __pm_runtime_resume(dev, 4); return (tmp); } } __inline static int pm_runtime_put_autosuspend(struct device *dev ) { int tmp ; { tmp = __pm_runtime_suspend(dev, 13); return (tmp); } } __inline static int pm_runtime_set_active(struct device *dev ) { int tmp ; { tmp = __pm_runtime_set_status(dev, 0U); return (tmp); } } __inline static void pm_runtime_disable(struct device *dev ) { { __pm_runtime_disable(dev, 1); return; } } __inline static void pm_runtime_use_autosuspend(struct device *dev ) { { __pm_runtime_use_autosuspend(dev, 1); return; } } extern int vga_get(struct pci_dev * , unsigned int , int ) ; __inline static int vga_get_uninterruptible(struct pci_dev *pdev , unsigned int rsrc ) { int tmp ; { tmp = vga_get(pdev, rsrc, 0); return (tmp); } } extern void vga_put(struct pci_dev * , unsigned int ) ; __inline static bool drm_can_sleep___1(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_40315; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40315; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40315; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40315; default: __bad_percpu_size(); } ldv_40315: pscr_ret__ = pfo_ret__; goto ldv_40321; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40325; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40325; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40325; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40325; default: __bad_percpu_size(); } ldv_40325: pscr_ret__ = pfo_ret_____0; goto ldv_40321; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40334; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40334; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40334; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40334; default: __bad_percpu_size(); } ldv_40334: pscr_ret__ = pfo_ret_____1; goto ldv_40321; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40343; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40343; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40343; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40343; default: __bad_percpu_size(); } ldv_40343: pscr_ret__ = pfo_ret_____2; goto ldv_40321; default: __bad_size_call_parameter(); goto ldv_40321; } ldv_40321: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___1(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } __inline static bool intel_irqs_enabled(struct drm_i915_private *dev_priv ) { { return (dev_priv->pm.irqs_enabled); } } void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv , unsigned int pipe_mask ) ; void assert_pll(struct drm_i915_private *dev_priv , enum pipe pipe , bool state ) ; enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv ) ; void assert_csr_loaded(struct drm_i915_private *dev_priv ) ; void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv ) ; int intel_power_domains_init(struct drm_i915_private *dev_priv ) ; void intel_power_domains_fini(struct drm_i915_private *dev_priv ) ; void intel_runtime_pm_enable(struct drm_i915_private *dev_priv ) ; bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv , enum intel_display_power_domain domain ) ; bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv , enum intel_display_power_domain domain ) ; void intel_display_power_get(struct drm_i915_private *dev_priv , enum intel_display_power_domain domain ) ; void intel_display_power_put(struct drm_i915_private *dev_priv , enum intel_display_power_domain domain ) ; void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv ) ; void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv ) ; void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv ) ; void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv ) ; void i915_redisable_vga_power_on(struct drm_device *dev ) ; bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv , int power_well_id ) ; static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { uint32_t tmp ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); return (tmp == 3221225472U); } } bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv , enum intel_display_power_domain domain ) { struct i915_power_domains *power_domains ; struct i915_power_well *power_well ; bool is_enabled ; int i ; { if ((int )dev_priv->pm.suspended) { return (0); } else { } power_domains = & dev_priv->power_domains; is_enabled = 1; i = power_domains->power_well_count + -1; goto ldv_48345; ldv_48344: ; if ((int )(power_well->domains >> (int )domain) & 1) { if ((int )power_well->always_on) { goto ldv_48342; } else { } if (! power_well->hw_enabled) { is_enabled = 0; goto ldv_48343; } else { } } else { } ldv_48342: i = i - 1; ldv_48345: ; if (i >= 0) { power_well = power_domains->power_wells + (unsigned long )i; if ((unsigned long )power_well != (unsigned long )((struct i915_power_well *)0)) { goto ldv_48344; } else { goto ldv_48343; } } else { } ldv_48343: ; return (is_enabled); } } bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv , enum intel_display_power_domain domain ) { struct i915_power_domains *power_domains ; bool ret ; { power_domains = & dev_priv->power_domains; mutex_lock_nested(& power_domains->lock, 0U); ret = __intel_display_power_is_enabled(dev_priv, domain); mutex_unlock(& power_domains->lock); return (ret); } } void intel_display_set_init_power(struct drm_i915_private *dev_priv , bool enable ) { { if ((int )dev_priv->power_domains.init_power_on == (int )enable) { return; } else { } if ((int )enable) { intel_display_power_get(dev_priv, 28); } else { intel_display_power_put(dev_priv, 28); } dev_priv->power_domains.init_power_on = enable; return; } } static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; unsigned char tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = dev_priv->dev; vga_get_uninterruptible(dev->pdev, 1U); tmp = inb(972); outb((int )tmp, 962); vga_put(dev->pdev, 1U); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { gen8_irq_power_well_post_enable(dev_priv, 6U); } else { } } else { } return; } } static void skl_power_well_post_enable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { struct drm_device *dev ; unsigned char tmp ; { dev = dev_priv->dev; if (power_well->data == 15UL) { vga_get_uninterruptible(dev->pdev, 1U); tmp = inb(972); outb((int )tmp, 962); vga_put(dev->pdev, 1U); gen8_irq_power_well_post_enable(dev_priv, 6U); } else { } if (power_well->data == 14UL) { intel_prepare_ddi(dev); gen8_irq_power_well_post_enable(dev_priv, 1U); } else { } return; } } static void hsw_set_power_well(struct drm_i915_private *dev_priv , struct i915_power_well *power_well , bool enable ) { bool is_enabled ; bool enable_requested ; uint32_t tmp ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; long tmp___5 ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); is_enabled = (tmp & 1073741824U) != 0U; enable_requested = (tmp & 2147483648U) != 0U; if ((int )enable) { if (! enable_requested) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283652L, 2147483648U, 1); } else { } if (! is_enabled) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("hsw_set_power_well", "Enabling power well\n"); } else { } tmp___1 = msecs_to_jiffies(20U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48396; ldv_48395: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); if ((tmp___2 & 1073741824U) == 0U) { ret__ = -110; } else { } goto ldv_48394; } else { } tmp___3 = drm_can_sleep___1(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48396: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); if ((tmp___4 & 1073741824U) == 0U) { goto ldv_48395; } else { } ldv_48394: ; if (ret__ != 0) { drm_err("Timeout enabling power well\n"); } else { } hsw_power_well_post_enable(dev_priv); } else { } } else if ((int )enable_requested) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283652L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 0); tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("hsw_set_power_well", "Requesting to disable the power well\n"); } else { } } else { } return; } } static void assert_can_enable_dc9(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; int __ret_warn_on ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; uint32_t tmp___1 ; long tmp___2 ; int __ret_warn_on___1 ; uint32_t tmp___3 ; long tmp___4 ; int __ret_warn_on___2 ; uint32_t tmp___5 ; long tmp___6 ; int __ret_warn_on___3 ; bool tmp___7 ; long tmp___8 ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { tmp = 1; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 9U) { tmp = 1; } else { tmp = 0; } } __ret_warn_on = tmp; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 366, "Platform doesn\'t support DC9.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); __ret_warn_on___0 = (tmp___1 & 8U) != 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 368, "DC9 already programmed to be enabled.\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); __ret_warn_on___1 = (int )tmp___3 & 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 370, "DC5 still not disabled to enable DC9.\n"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); __ret_warn_on___2 = tmp___5 != 0U; tmp___6 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 371, "Power well on.\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); tmp___7 = intel_irqs_enabled(dev_priv); __ret_warn_on___3 = (int )tmp___7; tmp___8 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 372, "Interrupts not disabled yet.\n"); } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); return; } } static void assert_can_disable_dc9(struct drm_i915_private *dev_priv ) { int __ret_warn_on ; bool tmp ; long tmp___0 ; int __ret_warn_on___0 ; uint32_t tmp___1 ; long tmp___2 ; int __ret_warn_on___1 ; uint32_t tmp___3 ; long tmp___4 ; { tmp = intel_irqs_enabled(dev_priv); __ret_warn_on = (int )tmp; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 385, "Interrupts not disabled yet.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); __ret_warn_on___0 = (tmp___1 & 8U) == 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 387, "DC9 already programmed to be disabled.\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); __ret_warn_on___1 = (int )tmp___3 & 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 389, "DC5 still not disabled.\n"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); return; } } void bxt_enable_dc9(struct drm_i915_private *dev_priv ) { uint32_t val ; long tmp ; { assert_can_enable_dc9(dev_priv); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("bxt_enable_dc9", "Enabling DC9\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); val = val | 8U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283908L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 0); return; } } void bxt_disable_dc9(struct drm_i915_private *dev_priv ) { uint32_t val ; long tmp ; { assert_can_disable_dc9(dev_priv); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("bxt_disable_dc9", "Disabling DC9\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); val = val & 4294967287U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283908L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 0); return; } } static void gen9_set_dc_state_debugmask_memory_up(struct drm_i915_private *dev_priv ) { uint32_t val ; { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283936L, 1); if ((val & 2U) == 0U) { val = val | 2U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283936L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283936L, 0); } else { } return; } } static void assert_can_enable_dc5(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; bool pg2_enabled ; bool tmp ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp___0 ; int __ret_warn_on___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; int tmp___1 ; long tmp___2 ; int __ret_warn_on___1 ; long tmp___3 ; int __ret_warn_on___2 ; uint32_t tmp___4 ; long tmp___5 ; int __ret_warn_on___3 ; long tmp___6 ; { dev = dev_priv->dev; tmp = intel_display_power_well_is_enabled(dev_priv, 15); pg2_enabled = tmp; __p = to_i915((struct drm_device const *)dev); __ret_warn_on = (unsigned int )*((unsigned char *)__p + 45UL) == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 448, "Platform doesn\'t support DC5.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 6U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { goto _L; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) != 8U) { _L: /* CIL Label */ __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } } else { tmp___1 = 0; } } } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on___0 = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 449, "Runtime PM not enabled.\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); __ret_warn_on___1 = (int )pg2_enabled; tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 450, "PG2 not disabled to enable DC5.\n"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); __ret_warn_on___2 = (int )tmp___4 & 1; tmp___5 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 453, "DC5 already programmed to be enabled.\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); __ret_warn_on___3 = (int )dev_priv->pm.suspended; tmp___6 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 455, "DC5 cannot be enabled, if platform is runtime-suspended.\n"); } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); assert_csr_loaded(dev_priv); return; } } static void assert_can_disable_dc5(struct drm_i915_private *dev_priv ) { bool pg2_enabled ; bool tmp ; int __ret_warn_on ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; { tmp = intel_display_power_well_is_enabled(dev_priv, 15); pg2_enabled = tmp; if ((int )dev_priv->power_domains.initializing) { return; } else { } __ret_warn_on = ! pg2_enabled; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 471, "PG2 not enabled to disable DC5.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (int )dev_priv->pm.suspended; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 473, "Disabling of DC5 while platform is runtime-suspended should never happen.\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); return; } } static void gen9_enable_dc5(struct drm_i915_private *dev_priv ) { uint32_t val ; long tmp ; { assert_can_enable_dc5(dev_priv); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen9_enable_dc5", "Enabling DC5\n"); } else { } gen9_set_dc_state_debugmask_memory_up(dev_priv); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); val = val & 4294967292U; val = val | 1U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283908L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 0); return; } } static void gen9_disable_dc5(struct drm_i915_private *dev_priv ) { uint32_t val ; long tmp ; { assert_can_disable_dc5(dev_priv); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen9_disable_dc5", "Disabling DC5\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); val = val & 4294967294U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283908L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 0); return; } } static void assert_can_enable_dc6(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp ; int __ret_warn_on___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; int tmp___0 ; long tmp___1 ; int __ret_warn_on___1 ; uint32_t tmp___2 ; long tmp___3 ; int __ret_warn_on___2 ; uint32_t tmp___4 ; long tmp___5 ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); __ret_warn_on = (unsigned int )*((unsigned char *)__p + 45UL) == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 511, "Platform doesn\'t support DC6.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 6U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { goto _L; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) != 8U) { _L: /* CIL Label */ __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { tmp___0 = 1; } else { tmp___0 = 0; } } else { tmp___0 = 0; } } else { tmp___0 = 0; } } } else { tmp___0 = 0; } } else { tmp___0 = 0; } __ret_warn_on___0 = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 512, "Runtime PM not enabled.\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295936L, 1); __ret_warn_on___1 = (int )tmp___2 < 0; tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 514, "Backlight is not disabled.\n"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); __ret_warn_on___2 = (tmp___4 & 2U) != 0U; tmp___5 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 516, "DC6 already programmed to be enabled.\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); assert_csr_loaded(dev_priv); return; } } static void assert_can_disable_dc6(struct drm_i915_private *dev_priv ) { int __ret_warn_on ; uint32_t tmp ; long tmp___0 ; { if ((int )dev_priv->power_domains.initializing) { return; } else { } assert_csr_loaded(dev_priv); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); __ret_warn_on = (tmp & 2U) == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 532, "DC6 already programmed to be disabled.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void skl_enable_dc6(struct drm_i915_private *dev_priv ) { uint32_t val ; long tmp ; { assert_can_enable_dc6(dev_priv); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("skl_enable_dc6", "Enabling DC6\n"); } else { } gen9_set_dc_state_debugmask_memory_up(dev_priv); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); val = val & 4294967292U; val = val | 2U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283908L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 0); return; } } static void skl_disable_dc6(struct drm_i915_private *dev_priv ) { uint32_t val ; long tmp ; { assert_can_disable_dc6(dev_priv); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("skl_disable_dc6", "Disabling DC6\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 1); val = val & 4294967293U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283908L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283908L, 0); return; } } static void skl_set_power_well(struct drm_i915_private *dev_priv , struct i915_power_well *power_well , bool enable ) { struct drm_device *dev ; uint32_t tmp ; uint32_t fuse_status ; uint32_t req_mask ; uint32_t state_mask ; bool is_enabled ; bool enable_requested ; bool check_fuse_status ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; int __ret_warn_on ; long tmp___4 ; int __ret_warn_on___0 ; uint32_t tmp___5 ; int tmp___6 ; long tmp___7 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; long tmp___8 ; unsigned long timeout_____0 ; unsigned long tmp___9 ; int ret_____0 ; uint32_t tmp___10 ; bool tmp___11 ; uint32_t tmp___12 ; long tmp___13 ; enum csr_state state ; unsigned long timeout_____1 ; unsigned long tmp___14 ; int ret_____1 ; bool tmp___15 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; unsigned long timeout_____2 ; unsigned long tmp___16 ; int ret_____2 ; uint32_t tmp___17 ; bool tmp___18 ; uint32_t tmp___19 ; unsigned long timeout_____3 ; unsigned long tmp___20 ; int ret_____3 ; uint32_t tmp___21 ; bool tmp___22 ; uint32_t tmp___23 ; { dev = dev_priv->dev; check_fuse_status = 0; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); fuse_status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); switch (power_well->data) { case 14UL: tmp___0 = msecs_to_jiffies(1U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48615; ldv_48614: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); if ((tmp___1 & 134217728U) == 0U) { ret__ = -110; } else { } goto ldv_48613; } else { } tmp___2 = drm_can_sleep___1(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48615: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); if ((tmp___3 & 134217728U) == 0U) { goto ldv_48614; } else { } ldv_48613: ; if (ret__ != 0) { drm_err("PG0 not enabled\n"); return; } else { } goto ldv_48617; case 15UL: ; if ((fuse_status & 67108864U) == 0U) { drm_err("PG1 in disabled state\n"); return; } else { } goto ldv_48617; case 1UL: ; case 2UL: ; case 3UL: ; case 4UL: ; case 0UL: ; goto ldv_48617; default: __ret_warn_on = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 598, "Unknown power well %lu\n", power_well->data); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } ldv_48617: req_mask = (uint32_t )(1 << (int )((unsigned int )power_well->data * 2U + 1U)); enable_requested = (tmp & req_mask) != 0U; state_mask = (uint32_t )(1 << (int )((unsigned int )power_well->data * 2U)); is_enabled = (tmp & state_mask) != 0U; if ((int )enable) { if (! enable_requested) { if ((tmp & state_mask) != 0U) { tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283648L, 1); if (tmp___5 == 0U) { tmp___6 = 1; } else { tmp___6 = 0; } } else { tmp___6 = 0; } __ret_warn_on___0 = tmp___6; tmp___7 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___7 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 612, "Invalid for power well status to be enabled, unless done by the BIOS, \t\t\t\twhen request is to disable!\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U && power_well->data == 15UL) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { skl_disable_dc6(dev_priv); if (! dev_priv->power_domains.initializing) { intel_prepare_ddi(dev); } else { } } else { gen9_disable_dc5(dev_priv); } } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283652L, tmp | req_mask, 1); } else { } if (! is_enabled) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("skl_set_power_well", "Enabling %s\n", power_well->name); } else { } tmp___9 = msecs_to_jiffies(1U); timeout_____0 = (tmp___9 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_48652; ldv_48651: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); if ((tmp___10 & state_mask) == 0U) { ret_____0 = -110; } else { } goto ldv_48650; } else { } tmp___11 = drm_can_sleep___1(); if ((int )tmp___11) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48652: tmp___12 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); if ((tmp___12 & state_mask) == 0U) { goto ldv_48651; } else { } ldv_48650: ; if (ret_____0 != 0) { drm_err("%s enable timeout\n", power_well->name); } else { } check_fuse_status = 1; } else { } } else if ((int )enable_requested) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283652L, ~ req_mask & tmp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 0); tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("skl_set_power_well", "Disabling %s\n", power_well->name); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U && power_well->data == 15UL) { tmp___14 = msecs_to_jiffies(1000U); timeout_____1 = (tmp___14 + (unsigned long )jiffies) + 1UL; ret_____1 = 0; goto ldv_48671; ldv_48670: ; if ((long )(timeout_____1 - (unsigned long )jiffies) < 0L) { state = intel_csr_load_status_get(dev_priv); if ((unsigned int )state == 0U) { ret_____1 = -110; } else { } goto ldv_48669; } else { } tmp___15 = drm_can_sleep___1(); if ((int )tmp___15) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48671: state = intel_csr_load_status_get(dev_priv); if ((unsigned int )state == 0U) { goto ldv_48670; } else { } ldv_48669: ; if ((unsigned int )state != 1U) { drm_err("CSR firmware not ready (%d)\n", (unsigned int )state); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { skl_enable_dc6(dev_priv); } else { gen9_enable_dc5(dev_priv); } } } else { } } else { } if ((int )check_fuse_status) { if (power_well->data == 14UL) { tmp___16 = msecs_to_jiffies(1U); timeout_____2 = (tmp___16 + (unsigned long )jiffies) + 1UL; ret_____2 = 0; goto ldv_48689; ldv_48688: ; if ((long )(timeout_____2 - (unsigned long )jiffies) < 0L) { tmp___17 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); if ((tmp___17 & 67108864U) == 0U) { ret_____2 = -110; } else { } goto ldv_48687; } else { } tmp___18 = drm_can_sleep___1(); if ((int )tmp___18) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48689: tmp___19 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); if ((tmp___19 & 67108864U) == 0U) { goto ldv_48688; } else { } ldv_48687: ; if (ret_____2 != 0) { drm_err("PG1 distributing status timeout\n"); } else { } } else if (power_well->data == 15UL) { tmp___20 = msecs_to_jiffies(1U); timeout_____3 = (tmp___20 + (unsigned long )jiffies) + 1UL; ret_____3 = 0; goto ldv_48701; ldv_48700: ; if ((long )(timeout_____3 - (unsigned long )jiffies) < 0L) { tmp___21 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); if ((tmp___21 & 33554432U) == 0U) { ret_____3 = -110; } else { } goto ldv_48699; } else { } tmp___22 = drm_can_sleep___1(); if ((int )tmp___22) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48701: tmp___23 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); if ((tmp___23 & 33554432U) == 0U) { goto ldv_48700; } else { } ldv_48699: ; if (ret_____3 != 0) { drm_err("PG2 distributing status timeout\n"); } else { } } else { } } else { } if ((int )enable && ! is_enabled) { skl_power_well_post_enable(dev_priv, power_well); } else { } return; } } static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { uint32_t tmp ; { hsw_set_power_well(dev_priv, power_well, power_well->count > 0); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283648L, 1); if ((int )tmp < 0) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283648L, 0U, 1); } else { } return; } } static void hsw_power_well_enable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { hsw_set_power_well(dev_priv, power_well, 1); return; } } static void hsw_power_well_disable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { hsw_set_power_well(dev_priv, power_well, 0); return; } } static bool skl_power_well_enabled(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { uint32_t mask ; uint32_t tmp ; { mask = (uint32_t )((1 << (int )((unsigned int )power_well->data * 2U + 1U)) | (1 << (int )((unsigned int )power_well->data * 2U))); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); return ((tmp & mask) == mask); } } static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { skl_set_power_well(dev_priv, power_well, power_well->count > 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 283648L, 0U, 1); return; } } static void skl_power_well_enable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { skl_set_power_well(dev_priv, power_well, 1); return; } } static void skl_power_well_disable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { skl_set_power_well(dev_priv, power_well, 0); return; } } static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { return; } } static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { return (1); } } static void vlv_set_power_well(struct drm_i915_private *dev_priv , struct i915_power_well *power_well , bool enable ) { enum punit_power_well power_well_id ; u32 mask ; u32 state ; u32 ctrl ; u32 tmp ; u32 tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; u32 tmp___2 ; bool tmp___3 ; u32 tmp___4 ; { power_well_id = (enum punit_power_well )power_well->data; mask = (u32 )(3 << (int )((unsigned int )power_well_id * 2U)); state = (int )enable ? 0U : (u32 )(3 << (int )((unsigned int )power_well_id * 2U)); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp = vlv_punit_read(dev_priv, 97U); if ((tmp & mask) == state) { goto out; } else { } ctrl = vlv_punit_read(dev_priv, 96U); ctrl = ~ mask & ctrl; ctrl = ctrl | state; vlv_punit_write(dev_priv, 96U, ctrl); tmp___1 = msecs_to_jiffies(100U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48760; ldv_48759: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = vlv_punit_read(dev_priv, 97U); if ((tmp___2 & mask) != state) { ret__ = -110; } else { } goto ldv_48758; } else { } tmp___3 = drm_can_sleep___1(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48760: tmp___4 = vlv_punit_read(dev_priv, 97U); if ((tmp___4 & mask) != state) { goto ldv_48759; } else { } ldv_48758: ; if (ret__ != 0) { tmp___0 = vlv_punit_read(dev_priv, 96U); drm_err("timeout setting power well state %08x (%08x)\n", state, tmp___0); } else { } out: mutex_unlock(& dev_priv->rps.hw_lock); return; } } static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { vlv_set_power_well(dev_priv, power_well, power_well->count > 0); return; } } static void vlv_power_well_enable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { vlv_set_power_well(dev_priv, power_well, 1); return; } } static void vlv_power_well_disable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { vlv_set_power_well(dev_priv, power_well, 0); return; } } static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { int power_well_id ; bool enabled ; u32 mask ; u32 state ; u32 ctrl ; u32 tmp ; int __ret_warn_on ; long tmp___0 ; u32 tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; { power_well_id = (int )power_well->data; enabled = 0; mask = (u32 )(3 << power_well_id * 2); ctrl = 0U; mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp = vlv_punit_read(dev_priv, 97U); state = tmp & mask; __ret_warn_on = state != 0U && (u32 )(3 << power_well_id * 2) != state; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 822, "WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && state != PUNIT_PWRGT_PWR_GATE(power_well_id))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if (state == ctrl) { enabled = 1; } else { } tmp___1 = vlv_punit_read(dev_priv, 96U); ctrl = tmp___1 & mask; __ret_warn_on___0 = ctrl != state; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 831, "WARN_ON(ctrl != state)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); mutex_unlock(& dev_priv->rps.hw_lock); return (enabled); } } static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { __ret_warn_once = power_well->data != 3UL; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 841, "WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); vlv_set_power_well(dev_priv, power_well, 1); spin_lock_irq(& dev_priv->irq_lock); valleyview_enable_display_irqs(dev_priv); spin_unlock_irq(& dev_priv->irq_lock); if ((int )dev_priv->power_domains.initializing) { return; } else { } intel_hpd_init(dev_priv); i915_redisable_vga_power_on(dev_priv->dev); return; } } static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { __ret_warn_once = power_well->data != 3UL; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 864, "WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); spin_lock_irq(& dev_priv->irq_lock); valleyview_disable_display_irqs(dev_priv); spin_unlock_irq(& dev_priv->irq_lock); vlv_set_power_well(dev_priv, power_well, 0); vlv_power_sequencer_reset(dev_priv); return; } } static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; { __ret_warn_once = power_well->data != 5UL; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 878, "WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24600U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24600U), tmp___2 | 536887296U, 1); __const_udelay(4295UL); vlv_set_power_well(dev_priv, power_well, 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581328L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581328L, tmp___3 | 1U, 1); return; } } static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { enum pipe pipe ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p ; uint32_t tmp___2 ; { __ret_warn_once = power_well->data != 5UL; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 910, "WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); pipe = 0; goto ldv_48831; ldv_48830: assert_pll(dev_priv, pipe, 0); pipe = (enum pipe )((int )pipe + 1); ldv_48831: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_48830; } else { } tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581328L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581328L, tmp___2 & 4294967294U, 1); vlv_set_power_well(dev_priv, power_well, 0); return; } } static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { enum dpio_phy phy ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; unsigned long timeout__ ; unsigned long tmp___5 ; int ret__ ; uint32_t tmp___6 ; bool tmp___7 ; uint32_t tmp___8 ; { __ret_warn_once = power_well->data != 5UL && power_well->data != 12UL; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 927, "WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); if (power_well->data == 5UL) { phy = 0; tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24600U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24600U), tmp___2 | 536870912U, 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24600U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24600U), tmp___3 | 536887296U, 1); } else { phy = 1; tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24624U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24624U), tmp___4 | 536887296U, 1); } __const_udelay(4295UL); vlv_set_power_well(dev_priv, power_well, 1); tmp___5 = msecs_to_jiffies(1U); timeout__ = (tmp___5 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48853; ldv_48852: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1966340L, 1); if ((tmp___6 & ((unsigned int )phy == 0U ? 2147483648U : 1073741824U)) == 0U) { ret__ = -110; } else { } goto ldv_48851; } else { } tmp___7 = drm_can_sleep___1(); if ((int )tmp___7) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48853: tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1966340L, 1); if ((tmp___8 & ((unsigned int )phy == 0U ? 2147483648U : 1073741824U)) == 0U) { goto ldv_48852; } else { } ldv_48851: ; if (ret__ != 0) { drm_err("Display PHY %d is not power up\n", (unsigned int )phy); } else { } dev_priv->chv_phy_control = dev_priv->chv_phy_control | (u32 )(1 << (int )phy); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1966336L, dev_priv->chv_phy_control, 1); return; } } static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { enum dpio_phy phy ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { __ret_warn_once = power_well->data != 5UL && power_well->data != 12UL; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 962, "WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); if (power_well->data == 5UL) { phy = 0; assert_pll(dev_priv, 0, 0); assert_pll(dev_priv, 1, 0); } else { phy = 1; assert_pll(dev_priv, 2, 0); } dev_priv->chv_phy_control = dev_priv->chv_phy_control & (u32 )(~ (1 << (int )phy)); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1966336L, dev_priv->chv_phy_control, 1); vlv_set_power_well(dev_priv, power_well, 0); return; } } static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { enum pipe pipe ; bool enabled ; u32 state ; u32 ctrl ; u32 tmp ; int __ret_warn_on ; long tmp___0 ; u32 tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; { pipe = (enum pipe )power_well->data; mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp = vlv_punit_read(dev_priv, 54U); state = tmp & (u32 )(3 << ((int )pipe + 8) * 2); __ret_warn_on = state != 0U && (u32 )(3 << ((int )pipe + 8) * 2) != state; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 993, "WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); enabled = state == 0U; tmp___1 = vlv_punit_read(dev_priv, 54U); ctrl = tmp___1 & (u32 )(3 << (int )pipe * 2); __ret_warn_on___0 = ctrl << 16 != state; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 1001, "WARN_ON(ctrl << 16 != state)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); mutex_unlock(& dev_priv->rps.hw_lock); return (enabled); } } static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv , struct i915_power_well *power_well , bool enable ) { enum pipe pipe ; u32 state ; u32 ctrl ; u32 tmp ; u32 tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; u32 tmp___2 ; bool tmp___3 ; u32 tmp___4 ; { pipe = (enum pipe )power_well->data; state = (int )enable ? 0U : (u32 )(3 << ((int )pipe + 8) * 2); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp = vlv_punit_read(dev_priv, 54U); if ((tmp & (u32 )(3 << ((int )pipe + 8) * 2)) == state) { goto out; } else { } ctrl = vlv_punit_read(dev_priv, 54U); ctrl = (u32 )(~ (3 << (int )pipe * 2)) & ctrl; ctrl = ((int )enable ? 0U : (u32 )(3 << (int )pipe * 2)) | ctrl; vlv_punit_write(dev_priv, 54U, ctrl); tmp___1 = msecs_to_jiffies(100U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48896; ldv_48895: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = vlv_punit_read(dev_priv, 54U); if ((tmp___2 & (u32 )(3 << ((int )pipe + 8) * 2)) != state) { ret__ = -110; } else { } goto ldv_48894; } else { } tmp___3 = drm_can_sleep___1(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48896: tmp___4 = vlv_punit_read(dev_priv, 54U); if ((tmp___4 & (u32 )(3 << ((int )pipe + 8) * 2)) != state) { goto ldv_48895; } else { } ldv_48894: ; if (ret__ != 0) { tmp___0 = vlv_punit_read(dev_priv, 54U); drm_err("timeout setting power well state %08x (%08x)\n", state, tmp___0); } else { } out: mutex_unlock(& dev_priv->rps.hw_lock); return; } } static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { { chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); return; } } static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { __ret_warn_once = (power_well->data != 0UL && power_well->data != 1UL) && power_well->data != 2UL; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 1053, "WARN_ON_ONCE(power_well->data != PIPE_A && power_well->data != PIPE_B && power_well->data != PIPE_C)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); chv_set_pipe_power_well(dev_priv, power_well, 1); if (power_well->data == 0UL) { spin_lock_irq(& dev_priv->irq_lock); valleyview_enable_display_irqs(dev_priv); spin_unlock_irq(& dev_priv->irq_lock); if ((int )dev_priv->power_domains.initializing) { return; } else { } intel_hpd_init(dev_priv); i915_redisable_vga_power_on(dev_priv->dev); } else { } return; } } static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv , struct i915_power_well *power_well ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { __ret_warn_once = (power_well->data != 0UL && power_well->data != 1UL) && power_well->data != 2UL; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 1080, "WARN_ON_ONCE(power_well->data != PIPE_A && power_well->data != PIPE_B && power_well->data != PIPE_C)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); if (power_well->data == 0UL) { spin_lock_irq(& dev_priv->irq_lock); valleyview_disable_display_irqs(dev_priv); spin_unlock_irq(& dev_priv->irq_lock); } else { } chv_set_pipe_power_well(dev_priv, power_well, 0); if (power_well->data == 0UL) { vlv_power_sequencer_reset(dev_priv); } else { } return; } } void intel_display_power_get(struct drm_i915_private *dev_priv , enum intel_display_power_domain domain ) { struct i915_power_domains *power_domains ; struct i915_power_well *power_well ; int i ; long tmp ; int tmp___0 ; { intel_runtime_pm_get(dev_priv); power_domains = & dev_priv->power_domains; mutex_lock_nested(& power_domains->lock, 0U); i = 0; goto ldv_48929; ldv_48928: ; if ((int )(power_well->domains >> (int )domain) & 1) { tmp___0 = power_well->count; power_well->count = power_well->count + 1; if (tmp___0 == 0) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_display_power_get", "enabling %s\n", power_well->name); } else { } (*((power_well->ops)->enable))(dev_priv, power_well); power_well->hw_enabled = 1; } else { } } else { } i = i + 1; ldv_48929: ; if (power_domains->power_well_count > i) { power_well = power_domains->power_wells + (unsigned long )i; if ((unsigned long )power_well != (unsigned long )((struct i915_power_well *)0)) { goto ldv_48928; } else { goto ldv_48930; } } else { } ldv_48930: power_domains->domain_use_count[(unsigned int )domain] = power_domains->domain_use_count[(unsigned int )domain] + 1; mutex_unlock(& power_domains->lock); return; } } void intel_display_power_put(struct drm_i915_private *dev_priv , enum intel_display_power_domain domain ) { struct i915_power_domains *power_domains ; struct i915_power_well *power_well ; int i ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; long tmp___0 ; long tmp___1 ; { power_domains = & dev_priv->power_domains; mutex_lock_nested(& power_domains->lock, 0U); __ret_warn_on = power_domains->domain_use_count[(unsigned int )domain] == 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 1152, "WARN_ON(!power_domains->domain_use_count[domain])"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); power_domains->domain_use_count[(unsigned int )domain] = power_domains->domain_use_count[(unsigned int )domain] - 1; i = power_domains->power_well_count + -1; goto ldv_48944; ldv_48943: ; if ((int )(power_well->domains >> (int )domain) & 1) { __ret_warn_on___0 = power_well->count == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 1156, "WARN_ON(!power_well->count)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); power_well->count = power_well->count - 1; if (power_well->count == 0 && i915.disable_power_well != 0) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_display_power_put", "disabling %s\n", power_well->name); } else { } power_well->hw_enabled = 0; (*((power_well->ops)->disable))(dev_priv, power_well); } else { } } else { } i = i - 1; ldv_48944: ; if (i >= 0) { power_well = power_domains->power_wells + (unsigned long )i; if ((unsigned long )power_well != (unsigned long )((struct i915_power_well *)0)) { goto ldv_48943; } else { goto ldv_48945; } } else { } ldv_48945: mutex_unlock(& power_domains->lock); intel_runtime_pm_put(dev_priv); return; } } static struct i915_power_well_ops const i9xx_always_on_power_well_ops = {& i9xx_always_on_power_well_noop, & i9xx_always_on_power_well_noop, & i9xx_always_on_power_well_noop, & i9xx_always_on_power_well_enabled}; static struct i915_power_well_ops const chv_pipe_power_well_ops = {& chv_pipe_power_well_sync_hw, & chv_pipe_power_well_enable, & chv_pipe_power_well_disable, & chv_pipe_power_well_enabled}; static struct i915_power_well_ops const chv_dpio_cmn_power_well_ops = {& vlv_power_well_sync_hw, & chv_dpio_cmn_power_well_enable, & chv_dpio_cmn_power_well_disable, & vlv_power_well_enabled}; static struct i915_power_well i9xx_always_on_power_well[1U] = { {"always-on", 1, 0, (_Bool)0, 536870911UL, 0UL, & i9xx_always_on_power_well_ops}}; static struct i915_power_well_ops const hsw_power_well_ops = {& hsw_power_well_sync_hw, & hsw_power_well_enable, & hsw_power_well_disable, & hsw_power_well_enabled}; static struct i915_power_well_ops const skl_power_well_ops = {& skl_power_well_sync_hw, & skl_power_well_enable, & skl_power_well_disable, & skl_power_well_enabled}; static struct i915_power_well hsw_power_wells[2U] = { {"always-on", 1, 0, (_Bool)0, 529268225UL, 0UL, & i9xx_always_on_power_well_ops}, {"display", (_Bool)0, 0, (_Bool)0, 276038142UL, 0UL, & hsw_power_well_ops}}; static struct i915_power_well bdw_power_wells[2U] = { {"always-on", 1, 0, (_Bool)0, 529268233UL, 0UL, & i9xx_always_on_power_well_ops}, {"display", (_Bool)0, 0, (_Bool)0, 276038134UL, 0UL, & hsw_power_well_ops}}; static struct i915_power_well_ops const vlv_display_power_well_ops = {& vlv_power_well_sync_hw, & vlv_display_power_well_enable, & vlv_display_power_well_disable, & vlv_power_well_enabled}; static struct i915_power_well_ops const vlv_dpio_cmn_power_well_ops = {& vlv_power_well_sync_hw, & vlv_dpio_cmn_power_well_enable, & vlv_dpio_cmn_power_well_disable, & vlv_power_well_enabled}; static struct i915_power_well_ops const vlv_dpio_power_well_ops = {& vlv_power_well_sync_hw, & vlv_power_well_enable, & vlv_power_well_disable, & vlv_power_well_enabled}; static struct i915_power_well vlv_power_wells[7U] = { {"always-on", 1, 0, (_Bool)0, 268435456UL, 0UL, & i9xx_always_on_power_well_ops}, {"display", (_Bool)0, 0, (_Bool)0, 536870911UL, 3UL, & vlv_display_power_well_ops}, {"dpio-tx-b-01", (_Bool)0, 0, (_Bool)0, 369160192UL, 6UL, & vlv_dpio_power_well_ops}, {"dpio-tx-b-23", (_Bool)0, 0, (_Bool)0, 369160192UL, 7UL, & vlv_dpio_power_well_ops}, {"dpio-tx-c-01", (_Bool)0, 0, (_Bool)0, 369160192UL, 8UL, & vlv_dpio_power_well_ops}, {"dpio-tx-c-23", (_Bool)0, 0, (_Bool)0, 369160192UL, 9UL, & vlv_dpio_power_well_ops}, {"dpio-common", (_Bool)0, 0, (_Bool)0, 369684480UL, 5UL, & vlv_dpio_cmn_power_well_ops}}; static struct i915_power_well chv_power_wells[4U] = { {"always-on", 1, 0, (_Bool)0, 268435456UL, 0UL, & i9xx_always_on_power_well_ops}, {"display", (_Bool)0, 0, (_Bool)0, 536870911UL, 0UL, & chv_pipe_power_well_ops}, {"dpio-common-bc", (_Bool)0, 0, (_Bool)0, 369160192UL, 5UL, & chv_dpio_cmn_power_well_ops}, {"dpio-common-d", (_Bool)0, 0, (_Bool)0, 402849792UL, 12UL, & chv_dpio_cmn_power_well_ops}}; static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv , int power_well_id ) { struct i915_power_domains *power_domains ; struct i915_power_well *power_well ; int i ; { power_domains = & dev_priv->power_domains; i = 0; goto ldv_48967; ldv_48966: ; if ((power_well->domains & 536870911UL) != 0UL) { if (power_well->data == (unsigned long )power_well_id) { return (power_well); } else { } } else { } i = i + 1; ldv_48967: ; if (power_domains->power_well_count > i) { power_well = power_domains->power_wells + (unsigned long )i; if ((unsigned long )power_well != (unsigned long )((struct i915_power_well *)0)) { goto ldv_48966; } else { goto ldv_48968; } } else { } ldv_48968: ; return ((struct i915_power_well *)0); } } bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv , int power_well_id ) { struct i915_power_well *power_well ; bool ret ; { power_well = lookup_power_well(dev_priv, power_well_id); ret = (*((power_well->ops)->is_enabled))(dev_priv, power_well); return (ret); } } static struct i915_power_well skl_power_wells[8U] = { {"always-on", 1, 0, (_Bool)0, 270270464UL, 0UL, & i9xx_always_on_power_well_ops}, {"power well 1", (_Bool)0, 0, (_Bool)0, 535035903UL, 14UL, & skl_power_well_ops}, {"MISC IO power well", (_Bool)0, 0, (_Bool)0, 535035903UL, 0UL, & skl_power_well_ops}, {"power well 2", (_Bool)0, 0, (_Bool)0, 509866486UL, 15UL, & skl_power_well_ops}, {"DDI A/E power well", (_Bool)0, 0, (_Bool)0, 268438528UL, 1UL, & skl_power_well_ops}, {"DDI B power well", (_Bool)0, 0, (_Bool)0, 268447744UL, 2UL, & skl_power_well_ops}, {"DDI C power well", (_Bool)0, 0, (_Bool)0, 268484608UL, 3UL, & skl_power_well_ops}, {"DDI D power well", (_Bool)0, 0, (_Bool)0, 268632064UL, 4UL, & skl_power_well_ops}}; static struct i915_power_well bxt_power_wells[3U] = { {"always-on", 1, 0, (_Bool)0, 404684800UL, 0UL, & i9xx_always_on_power_well_ops}, {"power well 1", (_Bool)0, 0, (_Bool)0, 400621567UL, 14UL, & skl_power_well_ops}, {"power well 2", (_Bool)0, 0, (_Bool)0, 375452150UL, 15UL, & skl_power_well_ops}}; int intel_power_domains_init(struct drm_i915_private *dev_priv ) { struct i915_power_domains *power_domains ; struct lock_class_key __key ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; { power_domains = & dev_priv->power_domains; __mutex_init(& power_domains->lock, "&power_domains->lock", & __key); __p___7 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { power_domains->power_wells = (struct i915_power_well *)(& hsw_power_wells); power_domains->power_well_count = 2; } else { __p___5 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 8U) { power_domains->power_wells = (struct i915_power_well *)(& bdw_power_wells); power_domains->power_well_count = 2; } else { goto _L___1; } } else { _L___1: /* CIL Label */ __p___4 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { power_domains->power_wells = (struct i915_power_well *)(& skl_power_wells); power_domains->power_well_count = 8; } else { __p___2 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { power_domains->power_wells = (struct i915_power_well *)(& bxt_power_wells); power_domains->power_well_count = 3; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { power_domains->power_wells = (struct i915_power_well *)(& chv_power_wells); power_domains->power_well_count = 4; } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { power_domains->power_wells = (struct i915_power_well *)(& vlv_power_wells); power_domains->power_well_count = 7; } else { power_domains->power_wells = (struct i915_power_well *)(& i9xx_always_on_power_well); power_domains->power_well_count = 1; } } } } } } return (0); } } static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct device *device ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int tmp ; { dev = dev_priv->dev; device = & (dev->pdev)->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { _L: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { return; } else { } } else { } } else { } } } else { } } else { } tmp = intel_enable_rc6((struct drm_device const *)dev); if (tmp == 0) { return; } else { } pm_runtime_get_sync(device); pm_runtime_disable(device); return; } } void intel_power_domains_fini(struct drm_i915_private *dev_priv ) { { intel_runtime_pm_disable(dev_priv); intel_display_set_init_power(dev_priv, 1); return; } } static void intel_power_domains_resume(struct drm_i915_private *dev_priv ) { struct i915_power_domains *power_domains ; struct i915_power_well *power_well ; int i ; { power_domains = & dev_priv->power_domains; mutex_lock_nested(& power_domains->lock, 0U); i = 0; goto ldv_49108; ldv_49107: ; if ((power_well->domains & 536870911UL) != 0UL) { (*((power_well->ops)->sync_hw))(dev_priv, power_well); power_well->hw_enabled = (*((power_well->ops)->is_enabled))(dev_priv, power_well); } else { } i = i + 1; ldv_49108: ; if (power_domains->power_well_count > i) { power_well = power_domains->power_wells + (unsigned long )i; if ((unsigned long )power_well != (unsigned long )((struct i915_power_well *)0)) { goto ldv_49107; } else { goto ldv_49109; } } else { } ldv_49109: mutex_unlock(& power_domains->lock); return; } } static void chv_phy_control_init(struct drm_i915_private *dev_priv ) { struct i915_power_well *cmn_bc ; struct i915_power_well *tmp ; struct i915_power_well *cmn_d ; struct i915_power_well *tmp___0 ; bool tmp___1 ; bool tmp___2 ; { tmp = lookup_power_well(dev_priv, 5); cmn_bc = tmp; tmp___0 = lookup_power_well(dev_priv, 12); cmn_d = tmp___0; dev_priv->chv_phy_control = 83886372U; tmp___1 = (*((cmn_bc->ops)->is_enabled))(dev_priv, cmn_bc); if ((int )tmp___1) { dev_priv->chv_phy_control = dev_priv->chv_phy_control | 1U; } else { } tmp___2 = (*((cmn_d->ops)->is_enabled))(dev_priv, cmn_d); if ((int )tmp___2) { dev_priv->chv_phy_control = dev_priv->chv_phy_control | 2U; } else { } return; } } static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv ) { struct i915_power_well *cmn ; struct i915_power_well *tmp ; struct i915_power_well *disp2d ; struct i915_power_well *tmp___0 ; bool tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; long tmp___4 ; { tmp = lookup_power_well(dev_priv, 5); cmn = tmp; tmp___0 = lookup_power_well(dev_priv, 3); disp2d = tmp___0; tmp___1 = (*((cmn->ops)->is_enabled))(dev_priv, cmn); if ((int )tmp___1) { tmp___2 = (*((disp2d->ops)->is_enabled))(dev_priv, disp2d); if ((int )tmp___2) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581328L, 1); if ((int )tmp___3 & 1) { return; } else { } } else { } } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("vlv_cmnlane_wa", "toggling display PHY side reset\n"); } else { } (*((disp2d->ops)->enable))(dev_priv, disp2d); (*((cmn->ops)->disable))(dev_priv, cmn); return; } } void intel_power_domains_init_hw(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct i915_power_domains *power_domains ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = dev_priv->dev; power_domains = & dev_priv->power_domains; power_domains->initializing = 1; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { chv_phy_control_init(dev_priv); } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { mutex_lock_nested(& power_domains->lock, 0U); vlv_cmnlane_wa(dev_priv); mutex_unlock(& power_domains->lock); } else { } } intel_display_set_init_power(dev_priv, 1); intel_power_domains_resume(dev_priv); power_domains->initializing = 0; return; } } void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv ) { { intel_runtime_pm_get(dev_priv); return; } } void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv ) { { intel_runtime_pm_put(dev_priv); return; } } void intel_runtime_pm_get(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct device *device ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int __ret_warn_on ; long tmp ; { dev = dev_priv->dev; device = & (dev->pdev)->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { _L: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { return; } else { } } else { } } else { } } } else { } } else { } pm_runtime_get_sync(device); __ret_warn_on = (int )dev_priv->pm.suspended; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 1753, "Device still suspended.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct device *device ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int __ret_warn_on ; long tmp ; { dev = dev_priv->dev; device = & (dev->pdev)->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { _L: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { return; } else { } } else { } } else { } } } else { } } else { } __ret_warn_on = (int )dev_priv->pm.suspended; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_runtime_pm.c", 1781, "Getting nosync-ref while suspended.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); pm_runtime_get_noresume(device); return; } } void intel_runtime_pm_put(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct device *device ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev = dev_priv->dev; device = & (dev->pdev)->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { _L: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { return; } else { } } else { } } else { } } } else { } } else { } pm_runtime_mark_last_busy(device); pm_runtime_put_autosuspend(device); return; } } void intel_runtime_pm_enable(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct device *device ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int tmp ; { dev = dev_priv->dev; device = & (dev->pdev)->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { _L: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { return; } else { } } else { } } else { } } } else { } } else { } pm_runtime_set_active(device); tmp = intel_enable_rc6((struct drm_device const *)dev); if (tmp == 0) { printk("\016[drm] RC6 disabled, disabling runtime PM support\n"); return; } else { } pm_runtime_set_autosuspend_delay(device, 10000); pm_runtime_mark_last_busy(device); pm_runtime_use_autosuspend(device); pm_runtime_put_autosuspend(device); return; } } void ldv_initialize_i915_power_well_ops_165(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); vlv_display_power_well_ops_group0 = (struct i915_power_well *)tmp; tmp___0 = ldv_init_zalloc(51720UL); vlv_display_power_well_ops_group1 = (struct drm_i915_private *)tmp___0; return; } } void ldv_initialize_i915_power_well_ops_163(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); vlv_dpio_power_well_ops_group0 = (struct i915_power_well *)tmp; tmp___0 = ldv_init_zalloc(51720UL); vlv_dpio_power_well_ops_group1 = (struct drm_i915_private *)tmp___0; return; } } void ldv_initialize_i915_power_well_ops_170(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); i9xx_always_on_power_well_ops_group0 = (struct i915_power_well *)tmp; tmp___0 = ldv_init_zalloc(51720UL); i9xx_always_on_power_well_ops_group1 = (struct drm_i915_private *)tmp___0; return; } } void ldv_initialize_i915_power_well_ops_168(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); chv_dpio_cmn_power_well_ops_group0 = (struct i915_power_well *)tmp; tmp___0 = ldv_init_zalloc(51720UL); chv_dpio_cmn_power_well_ops_group1 = (struct drm_i915_private *)tmp___0; return; } } void ldv_initialize_i915_power_well_ops_169(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); chv_pipe_power_well_ops_group0 = (struct i915_power_well *)tmp; tmp___0 = ldv_init_zalloc(51720UL); chv_pipe_power_well_ops_group1 = (struct drm_i915_private *)tmp___0; return; } } void ldv_initialize_i915_power_well_ops_166(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); skl_power_well_ops_group0 = (struct i915_power_well *)tmp; tmp___0 = ldv_init_zalloc(51720UL); skl_power_well_ops_group1 = (struct drm_i915_private *)tmp___0; return; } } void ldv_initialize_i915_power_well_ops_164(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); vlv_dpio_cmn_power_well_ops_group0 = (struct i915_power_well *)tmp; tmp___0 = ldv_init_zalloc(51720UL); vlv_dpio_cmn_power_well_ops_group1 = (struct drm_i915_private *)tmp___0; return; } } void ldv_initialize_i915_power_well_ops_167(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); hsw_power_well_ops_group0 = (struct i915_power_well *)tmp; tmp___0 = ldv_init_zalloc(51720UL); hsw_power_well_ops_group1 = (struct drm_i915_private *)tmp___0; return; } } void ldv_main_exported_170(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_170 == 1) { i9xx_always_on_power_well_noop(i9xx_always_on_power_well_ops_group1, i9xx_always_on_power_well_ops_group0); ldv_state_variable_170 = 1; } else { } goto ldv_49346; case 1: ; if (ldv_state_variable_170 == 1) { i9xx_always_on_power_well_enabled(i9xx_always_on_power_well_ops_group1, i9xx_always_on_power_well_ops_group0); ldv_state_variable_170 = 1; } else { } goto ldv_49346; case 2: ; if (ldv_state_variable_170 == 1) { i9xx_always_on_power_well_noop(i9xx_always_on_power_well_ops_group1, i9xx_always_on_power_well_ops_group0); ldv_state_variable_170 = 1; } else { } goto ldv_49346; case 3: ; if (ldv_state_variable_170 == 1) { i9xx_always_on_power_well_noop(i9xx_always_on_power_well_ops_group1, i9xx_always_on_power_well_ops_group0); ldv_state_variable_170 = 1; } else { } goto ldv_49346; default: ldv_stop(); } ldv_49346: ; return; } } void ldv_main_exported_165(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_165 == 1) { vlv_display_power_well_enable(vlv_display_power_well_ops_group1, vlv_display_power_well_ops_group0); ldv_state_variable_165 = 1; } else { } goto ldv_49355; case 1: ; if (ldv_state_variable_165 == 1) { vlv_power_well_enabled(vlv_display_power_well_ops_group1, vlv_display_power_well_ops_group0); ldv_state_variable_165 = 1; } else { } goto ldv_49355; case 2: ; if (ldv_state_variable_165 == 1) { vlv_power_well_sync_hw(vlv_display_power_well_ops_group1, vlv_display_power_well_ops_group0); ldv_state_variable_165 = 1; } else { } goto ldv_49355; case 3: ; if (ldv_state_variable_165 == 1) { vlv_display_power_well_disable(vlv_display_power_well_ops_group1, vlv_display_power_well_ops_group0); ldv_state_variable_165 = 1; } else { } goto ldv_49355; default: ldv_stop(); } ldv_49355: ; return; } } void ldv_main_exported_168(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_168 == 1) { chv_dpio_cmn_power_well_enable(chv_dpio_cmn_power_well_ops_group1, chv_dpio_cmn_power_well_ops_group0); ldv_state_variable_168 = 1; } else { } goto ldv_49364; case 1: ; if (ldv_state_variable_168 == 1) { vlv_power_well_enabled(chv_dpio_cmn_power_well_ops_group1, chv_dpio_cmn_power_well_ops_group0); ldv_state_variable_168 = 1; } else { } goto ldv_49364; case 2: ; if (ldv_state_variable_168 == 1) { vlv_power_well_sync_hw(chv_dpio_cmn_power_well_ops_group1, chv_dpio_cmn_power_well_ops_group0); ldv_state_variable_168 = 1; } else { } goto ldv_49364; case 3: ; if (ldv_state_variable_168 == 1) { chv_dpio_cmn_power_well_disable(chv_dpio_cmn_power_well_ops_group1, chv_dpio_cmn_power_well_ops_group0); ldv_state_variable_168 = 1; } else { } goto ldv_49364; default: ldv_stop(); } ldv_49364: ; return; } } void ldv_main_exported_167(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_167 == 1) { hsw_power_well_enable(hsw_power_well_ops_group1, hsw_power_well_ops_group0); ldv_state_variable_167 = 1; } else { } goto ldv_49373; case 1: ; if (ldv_state_variable_167 == 1) { hsw_power_well_enabled(hsw_power_well_ops_group1, hsw_power_well_ops_group0); ldv_state_variable_167 = 1; } else { } goto ldv_49373; case 2: ; if (ldv_state_variable_167 == 1) { hsw_power_well_sync_hw(hsw_power_well_ops_group1, hsw_power_well_ops_group0); ldv_state_variable_167 = 1; } else { } goto ldv_49373; case 3: ; if (ldv_state_variable_167 == 1) { hsw_power_well_disable(hsw_power_well_ops_group1, hsw_power_well_ops_group0); ldv_state_variable_167 = 1; } else { } goto ldv_49373; default: ldv_stop(); } ldv_49373: ; return; } } void ldv_main_exported_163(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_163 == 1) { vlv_power_well_enable(vlv_dpio_power_well_ops_group1, vlv_dpio_power_well_ops_group0); ldv_state_variable_163 = 1; } else { } goto ldv_49382; case 1: ; if (ldv_state_variable_163 == 1) { vlv_power_well_enabled(vlv_dpio_power_well_ops_group1, vlv_dpio_power_well_ops_group0); ldv_state_variable_163 = 1; } else { } goto ldv_49382; case 2: ; if (ldv_state_variable_163 == 1) { vlv_power_well_sync_hw(vlv_dpio_power_well_ops_group1, vlv_dpio_power_well_ops_group0); ldv_state_variable_163 = 1; } else { } goto ldv_49382; case 3: ; if (ldv_state_variable_163 == 1) { vlv_power_well_disable(vlv_dpio_power_well_ops_group1, vlv_dpio_power_well_ops_group0); ldv_state_variable_163 = 1; } else { } goto ldv_49382; default: ldv_stop(); } ldv_49382: ; return; } } void ldv_main_exported_166(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_166 == 1) { skl_power_well_enable(skl_power_well_ops_group1, skl_power_well_ops_group0); ldv_state_variable_166 = 1; } else { } goto ldv_49391; case 1: ; if (ldv_state_variable_166 == 1) { skl_power_well_enabled(skl_power_well_ops_group1, skl_power_well_ops_group0); ldv_state_variable_166 = 1; } else { } goto ldv_49391; case 2: ; if (ldv_state_variable_166 == 1) { skl_power_well_sync_hw(skl_power_well_ops_group1, skl_power_well_ops_group0); ldv_state_variable_166 = 1; } else { } goto ldv_49391; case 3: ; if (ldv_state_variable_166 == 1) { skl_power_well_disable(skl_power_well_ops_group1, skl_power_well_ops_group0); ldv_state_variable_166 = 1; } else { } goto ldv_49391; default: ldv_stop(); } ldv_49391: ; return; } } void ldv_main_exported_164(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_164 == 1) { vlv_dpio_cmn_power_well_enable(vlv_dpio_cmn_power_well_ops_group1, vlv_dpio_cmn_power_well_ops_group0); ldv_state_variable_164 = 1; } else { } goto ldv_49400; case 1: ; if (ldv_state_variable_164 == 1) { vlv_power_well_enabled(vlv_dpio_cmn_power_well_ops_group1, vlv_dpio_cmn_power_well_ops_group0); ldv_state_variable_164 = 1; } else { } goto ldv_49400; case 2: ; if (ldv_state_variable_164 == 1) { vlv_power_well_sync_hw(vlv_dpio_cmn_power_well_ops_group1, vlv_dpio_cmn_power_well_ops_group0); ldv_state_variable_164 = 1; } else { } goto ldv_49400; case 3: ; if (ldv_state_variable_164 == 1) { vlv_dpio_cmn_power_well_disable(vlv_dpio_cmn_power_well_ops_group1, vlv_dpio_cmn_power_well_ops_group0); ldv_state_variable_164 = 1; } else { } goto ldv_49400; default: ldv_stop(); } ldv_49400: ; return; } } void ldv_main_exported_169(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_169 == 1) { chv_pipe_power_well_enable(chv_pipe_power_well_ops_group1, chv_pipe_power_well_ops_group0); ldv_state_variable_169 = 1; } else { } goto ldv_49409; case 1: ; if (ldv_state_variable_169 == 1) { chv_pipe_power_well_enabled(chv_pipe_power_well_ops_group1, chv_pipe_power_well_ops_group0); ldv_state_variable_169 = 1; } else { } goto ldv_49409; case 2: ; if (ldv_state_variable_169 == 1) { chv_pipe_power_well_sync_hw(chv_pipe_power_well_ops_group1, chv_pipe_power_well_ops_group0); ldv_state_variable_169 = 1; } else { } goto ldv_49409; case 3: ; if (ldv_state_variable_169 == 1) { chv_pipe_power_well_disable(chv_pipe_power_well_ops_group1, chv_pipe_power_well_ops_group0); ldv_state_variable_169 = 1; } else { } goto ldv_49409; default: ldv_stop(); } ldv_49409: ; return; } } bool ldv_queue_work_on_97(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_98(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_99(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_100(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_101(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static __u32 __arch_swab32(__u32 val ) { { __asm__ ("bswapl %0": "=r" (val): "0" (val)); return (val); } } __inline static __u32 __fswab32(__u32 val ) { __u32 tmp ; { tmp = __arch_swab32(val); return (tmp); } } bool ldv_queue_work_on_111(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_113(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_112(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_115(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_114(struct workqueue_struct *ldv_func_arg1 ) ; extern int request_firmware_nowait(struct module * , bool , char const * , struct device * , gfp_t , void * , void (*)(struct firmware const * , void * ) ) ; extern void release_firmware(struct firmware const * ) ; void intel_csr_ucode_init(struct drm_device *dev ) ; void intel_csr_ucode_fini(struct drm_device *dev ) ; static struct stepping_info const skl_stepping_info[9U] = { {65, 48}, {66, 48}, {67, 48}, {68, 48}, {69, 48}, {70, 48}, {71, 48}, {72, 48}, {73, 48}}; static char intel_get_stepping(struct drm_device *dev ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && (unsigned int )(dev->pdev)->revision <= 8U) { return ((char )skl_stepping_info[(int )(dev->pdev)->revision].stepping); } else { return (-61); } } } static char intel_get_substepping(struct drm_device *dev ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && (unsigned int )(dev->pdev)->revision <= 8U) { return ((char )skl_stepping_info[(int )(dev->pdev)->revision].substepping); } else { return (-61); } } } enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv ) { enum csr_state state ; { mutex_lock_nested(& dev_priv->csr_lock, 0U); state = dev_priv->csr.state; mutex_unlock(& dev_priv->csr_lock); return (state); } } void intel_csr_load_status_set(struct drm_i915_private *dev_priv , enum csr_state state ) { { mutex_lock_nested(& dev_priv->csr_lock, 0U); dev_priv->csr.state = state; mutex_unlock(& dev_priv->csr_lock); return; } } void intel_csr_load_program(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; __be32 *payload ; uint32_t i ; uint32_t fw_size ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; payload = dev_priv->csr.dmc_payload; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 9U) { drm_err("No CSR support available for this platform\n"); return; } else { } mutex_lock_nested(& dev_priv->csr_lock, 0U); fw_size = dev_priv->csr.dmc_fw_size; i = 0U; goto ldv_48095; ldv_48094: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i + 131072U) * 4U), *(payload + (unsigned long )i), 1); i = i + 1U; ldv_48095: ; if (i < fw_size) { goto ldv_48094; } else { } i = 0U; goto ldv_48098; ldv_48097: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )dev_priv->csr.mmioaddr[i], dev_priv->csr.mmiodata[i], 1); i = i + 1U; ldv_48098: ; if (dev_priv->csr.mmio_count > i) { goto ldv_48097; } else { } dev_priv->csr.state = 1; mutex_unlock(& dev_priv->csr_lock); return; } } static void finish_csr_load(struct firmware const *fw , void *context ) { struct drm_i915_private *dev_priv ; struct drm_device *dev ; struct intel_css_header *css_header ; struct intel_package_header *package_header ; struct intel_dmc_header *dmc_header ; struct intel_csr *csr ; char stepping ; char tmp ; char substepping ; char tmp___0 ; uint32_t dmc_offset ; uint32_t readcount ; uint32_t nbytes ; uint32_t i ; __be32 *dmc_payload ; bool fw_loaded ; void *tmp___1 ; uint32_t *tmp___2 ; __u32 tmp___3 ; { dev_priv = (struct drm_i915_private *)context; dev = dev_priv->dev; csr = & dev_priv->csr; tmp = intel_get_stepping(dev); stepping = tmp; tmp___0 = intel_get_substepping(dev); substepping = tmp___0; dmc_offset = 4294967295U; readcount = 0U; fw_loaded = 0; if ((unsigned long )fw == (unsigned long )((struct firmware const *)0)) { i915_firmware_load_error_print(csr->fw_path, 0); goto out; } else { } if ((int )((signed char )stepping) == -61 || (int )((signed char )substepping) == -61) { drm_err("Unknown stepping info, firmware loading failed\n"); goto out; } else { } css_header = (struct intel_css_header *)fw->data; if (css_header->header_len * 4U != 128U) { drm_err("Firmware has wrong CSS header length %u bytes\n", css_header->header_len * 4U); goto out; } else { } readcount = readcount + 128U; package_header = (struct intel_package_header *)fw->data + (unsigned long )readcount; if ((int )package_header->header_len * 4 != 256) { drm_err("Firmware has wrong package header length %u bytes\n", (int )package_header->header_len * 4); goto out; } else { } readcount = readcount + 256U; i = 0U; goto ldv_48121; ldv_48120: ; if ((int )((signed char )package_header->fw_info[i].substepping) == 42 && (int )((signed char )package_header->fw_info[i].stepping) == (int )((signed char )stepping)) { dmc_offset = package_header->fw_info[i].offset; goto ldv_48119; } else if ((int )((signed char )package_header->fw_info[i].stepping) == (int )((signed char )stepping) && (int )((signed char )package_header->fw_info[i].substepping) == (int )((signed char )substepping)) { dmc_offset = package_header->fw_info[i].offset; goto ldv_48119; } else if ((int )((signed char )package_header->fw_info[i].stepping) == 42 && (int )((signed char )package_header->fw_info[i].substepping) == 42) { dmc_offset = package_header->fw_info[i].offset; } else { } i = i + 1U; ldv_48121: ; if (package_header->num_entries > i) { goto ldv_48120; } else { } ldv_48119: ; if (dmc_offset == 4294967295U) { drm_err("Firmware not supported for %c stepping\n", (int )stepping); goto out; } else { } readcount = readcount + dmc_offset; dmc_header = (struct intel_dmc_header *)fw->data + (unsigned long )readcount; if ((unsigned int )dmc_header->header_len != 128U) { drm_err("Firmware has wrong dmc header length %u bytes\n", (int )dmc_header->header_len); goto out; } else { } readcount = readcount + 128U; if (dmc_header->mmio_count > 8U) { drm_err("Firmware has wrong mmio count %u\n", dmc_header->mmio_count); goto out; } else { } csr->mmio_count = dmc_header->mmio_count; i = 0U; goto ldv_48125; ldv_48124: ; if (dmc_header->mmioaddr[i] <= 524287U && dmc_header->mmioaddr[i] > 589823U) { drm_err(" Firmware has wrong mmio address 0x%x\n", dmc_header->mmioaddr[i]); goto out; } else { } csr->mmioaddr[i] = dmc_header->mmioaddr[i]; csr->mmiodata[i] = dmc_header->mmiodata[i]; i = i + 1U; ldv_48125: ; if (dmc_header->mmio_count > i) { goto ldv_48124; } else { } nbytes = dmc_header->fw_size * 4U; if (nbytes > 12287U) { drm_err("CSR firmware too big (%u) bytes\n", nbytes); goto out; } else { } csr->dmc_fw_size = dmc_header->fw_size; tmp___1 = kmalloc((size_t )nbytes, 208U); csr->dmc_payload = (__be32 *)tmp___1; if ((unsigned long )csr->dmc_payload == (unsigned long )((__be32 *)0U)) { drm_err("Memory allocation failed for dmc payload\n"); goto out; } else { } dmc_payload = csr->dmc_payload; i = 0U; goto ldv_48129; ldv_48128: tmp___2 = (uint32_t *)fw->data + (unsigned long )(i * 4U + readcount); tmp___3 = __fswab32(*tmp___2); *(dmc_payload + (unsigned long )i) = tmp___3; i = i + 1U; ldv_48129: ; if (dmc_header->fw_size > i) { goto ldv_48128; } else { } intel_csr_load_program(dev); fw_loaded = 1; out: ; if ((int )fw_loaded) { intel_runtime_pm_put(dev_priv); } else { intel_csr_load_status_set(dev_priv, 2); } release_firmware(fw); return; } } void intel_csr_ucode_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_csr *csr ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; csr = & dev_priv->csr; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { return; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { csr->fw_path = "i915/skl_dmc_ver1.bin"; } else { drm_err("Unexpected: no known CSR firmware for platform\n"); intel_csr_load_status_set(dev_priv, 2); return; } intel_runtime_pm_get(dev_priv); ret = request_firmware_nowait(& __this_module, 1, csr->fw_path, & ((dev_priv->dev)->pdev)->dev, 208U, (void *)dev_priv, & finish_csr_load); if (ret != 0) { i915_firmware_load_error_print(csr->fw_path, ret); intel_csr_load_status_set(dev_priv, 2); } else { } return; } } void intel_csr_ucode_fini(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { return; } else { } intel_csr_load_status_set(dev_priv, 2); kfree((void const *)dev_priv->csr.dmc_payload); return; } } void assert_csr_loaded(struct drm_i915_private *dev_priv ) { int __ret_warn_on ; enum csr_state tmp ; long tmp___0 ; int __ret_warn_on___0 ; uint32_t tmp___1 ; long tmp___2 ; int __ret_warn_on___1 ; uint32_t tmp___3 ; long tmp___4 ; int __ret_warn_on___2 ; uint32_t tmp___5 ; long tmp___6 ; { tmp = intel_csr_load_status_get(dev_priv); __ret_warn_on = (unsigned int )tmp != 1U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_csr.c", 462, "CSR is not loaded.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 524288L, 1); __ret_warn_on___0 = tmp___1 == 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_csr.c", 464, "CSR program storage start is NULL\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 585844L, 1); __ret_warn_on___1 = tmp___3 == 0U; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_csr.c", 465, "CSR SSP Base Not fine\n"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 585732L, 1); __ret_warn_on___2 = tmp___5 == 0U; tmp___6 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_csr.c", 466, "CSR HTP Not fine\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); return; } } bool ldv_queue_work_on_111(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_112(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_113(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_114(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_115(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern void __might_fault(char const * , int ) ; extern struct tss_struct cpu_tss ; __inline static unsigned long current_top_of_stack(void) { u64 pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& cpu_tss.x86_tss.sp0)); goto ldv_5437; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& cpu_tss.x86_tss.sp0)); goto ldv_5437; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& cpu_tss.x86_tss.sp0)); goto ldv_5437; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& cpu_tss.x86_tss.sp0)); goto ldv_5437; default: __bad_percpu_size(); } ldv_5437: ; return ((unsigned long )pfo_ret__); } } __inline static struct thread_info *current_thread_info(void) { unsigned long tmp ; { tmp = current_top_of_stack(); return ((struct thread_info *)(tmp - 32768UL)); } } bool ldv_queue_work_on_125(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_127(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_126(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_129(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_128(struct workqueue_struct *ldv_func_arg1 ) ; extern void *compat_alloc_user_space(unsigned long ) ; __inline static bool __chk_range_not_ok(unsigned long addr , unsigned long size , unsigned long limit ) { { addr = addr + size; if (addr < size) { return (1); } else { } return (addr > limit); } } extern void __put_user_bad(void) ; extern unsigned long _copy_from_user(void * , void const * , unsigned int ) ; extern void __copy_from_user_overflow(void) ; __inline static unsigned long copy_from_user(void *to , void const *from , unsigned long n ) { int sz ; unsigned long tmp ; long tmp___0 ; { tmp = __builtin_object_size((void const *)to, 0); sz = (int )tmp; __might_fault("./arch/x86/include/asm/uaccess.h", 697); tmp___0 = ldv__builtin_expect((long )(sz < 0 || (unsigned long )sz >= n), 1L); if (tmp___0 != 0L) { n = _copy_from_user(to, from, (unsigned int )n); } else { __copy_from_user_overflow(); } return (n); } } extern long drm_compat_ioctl(struct file * , unsigned int , unsigned long ) ; static int compat_i915_batchbuffer(struct file *file , unsigned int cmd , unsigned long arg ) { drm_i915_batchbuffer32_t batchbuffer32 ; drm_i915_batchbuffer_t *batchbuffer ; unsigned long tmp ; void *tmp___0 ; struct thread_info *tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; int __pu_err ; int __pu_err___0 ; int __pu_err___1 ; int __pu_err___2 ; int __pu_err___3 ; int __pu_err___4 ; long tmp___5 ; { tmp = copy_from_user((void *)(& batchbuffer32), (void const *)arg, 24UL); if (tmp != 0UL) { return (-14); } else { } tmp___0 = compat_alloc_user_space(32UL); batchbuffer = (drm_i915_batchbuffer_t *)tmp___0; tmp___1 = current_thread_info(); tmp___2 = __chk_range_not_ok((unsigned long )batchbuffer, 32UL, tmp___1->addr_limit.seg); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } tmp___4 = ldv__builtin_expect((long )tmp___3, 1L); if (tmp___4 == 0L) { return (-14); } else { __pu_err = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "iq" (batchbuffer32.start), "m" (*((struct __large_struct *)(& batchbuffer->start))), "i" (-14), "0" (__pu_err)); goto ldv_49424; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" (batchbuffer32.start), "m" (*((struct __large_struct *)(& batchbuffer->start))), "i" (-14), "0" (__pu_err)); goto ldv_49424; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" (batchbuffer32.start), "m" (*((struct __large_struct *)(& batchbuffer->start))), "i" (-14), "0" (__pu_err)); goto ldv_49424; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "er" (batchbuffer32.start), "m" (*((struct __large_struct *)(& batchbuffer->start))), "i" (-14), "0" (__pu_err)); goto ldv_49424; default: __put_user_bad(); } ldv_49424: ; if (__pu_err != 0) { return (-14); } else { __pu_err___0 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "iq" (batchbuffer32.used), "m" (*((struct __large_struct *)(& batchbuffer->used))), "i" (-14), "0" (__pu_err___0)); goto ldv_49432; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" (batchbuffer32.used), "m" (*((struct __large_struct *)(& batchbuffer->used))), "i" (-14), "0" (__pu_err___0)); goto ldv_49432; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" (batchbuffer32.used), "m" (*((struct __large_struct *)(& batchbuffer->used))), "i" (-14), "0" (__pu_err___0)); goto ldv_49432; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "er" (batchbuffer32.used), "m" (*((struct __large_struct *)(& batchbuffer->used))), "i" (-14), "0" (__pu_err___0)); goto ldv_49432; default: __put_user_bad(); } ldv_49432: ; if (__pu_err___0 != 0) { return (-14); } else { __pu_err___1 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "iq" (batchbuffer32.DR1), "m" (*((struct __large_struct *)(& batchbuffer->DR1))), "i" (-14), "0" (__pu_err___1)); goto ldv_49440; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "ir" (batchbuffer32.DR1), "m" (*((struct __large_struct *)(& batchbuffer->DR1))), "i" (-14), "0" (__pu_err___1)); goto ldv_49440; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "ir" (batchbuffer32.DR1), "m" (*((struct __large_struct *)(& batchbuffer->DR1))), "i" (-14), "0" (__pu_err___1)); goto ldv_49440; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "er" (batchbuffer32.DR1), "m" (*((struct __large_struct *)(& batchbuffer->DR1))), "i" (-14), "0" (__pu_err___1)); goto ldv_49440; default: __put_user_bad(); } ldv_49440: ; if (__pu_err___1 != 0) { return (-14); } else { __pu_err___2 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "iq" (batchbuffer32.DR4), "m" (*((struct __large_struct *)(& batchbuffer->DR4))), "i" (-14), "0" (__pu_err___2)); goto ldv_49448; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "ir" (batchbuffer32.DR4), "m" (*((struct __large_struct *)(& batchbuffer->DR4))), "i" (-14), "0" (__pu_err___2)); goto ldv_49448; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "ir" (batchbuffer32.DR4), "m" (*((struct __large_struct *)(& batchbuffer->DR4))), "i" (-14), "0" (__pu_err___2)); goto ldv_49448; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "er" (batchbuffer32.DR4), "m" (*((struct __large_struct *)(& batchbuffer->DR4))), "i" (-14), "0" (__pu_err___2)); goto ldv_49448; default: __put_user_bad(); } ldv_49448: ; if (__pu_err___2 != 0) { return (-14); } else { __pu_err___3 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___3): "iq" (batchbuffer32.num_cliprects), "m" (*((struct __large_struct *)(& batchbuffer->num_cliprects))), "i" (-14), "0" (__pu_err___3)); goto ldv_49456; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___3): "ir" (batchbuffer32.num_cliprects), "m" (*((struct __large_struct *)(& batchbuffer->num_cliprects))), "i" (-14), "0" (__pu_err___3)); goto ldv_49456; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___3): "ir" (batchbuffer32.num_cliprects), "m" (*((struct __large_struct *)(& batchbuffer->num_cliprects))), "i" (-14), "0" (__pu_err___3)); goto ldv_49456; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___3): "er" (batchbuffer32.num_cliprects), "m" (*((struct __large_struct *)(& batchbuffer->num_cliprects))), "i" (-14), "0" (__pu_err___3)); goto ldv_49456; default: __put_user_bad(); } ldv_49456: ; if (__pu_err___3 != 0) { return (-14); } else { __pu_err___4 = 0; switch (8UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___4): "iq" ((struct drm_clip_rect *)((unsigned long )batchbuffer32.cliprects)), "m" (*((struct __large_struct *)(& batchbuffer->cliprects))), "i" (-14), "0" (__pu_err___4)); goto ldv_49464; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___4): "ir" ((struct drm_clip_rect *)((unsigned long )batchbuffer32.cliprects)), "m" (*((struct __large_struct *)(& batchbuffer->cliprects))), "i" (-14), "0" (__pu_err___4)); goto ldv_49464; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___4): "ir" ((struct drm_clip_rect *)((unsigned long )batchbuffer32.cliprects)), "m" (*((struct __large_struct *)(& batchbuffer->cliprects))), "i" (-14), "0" (__pu_err___4)); goto ldv_49464; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___4): "er" ((struct drm_clip_rect *)((unsigned long )batchbuffer32.cliprects)), "m" (*((struct __large_struct *)(& batchbuffer->cliprects))), "i" (-14), "0" (__pu_err___4)); goto ldv_49464; default: __put_user_bad(); } ldv_49464: ; if (__pu_err___4 != 0) { return (-14); } else { } } } } } } } tmp___5 = drm_ioctl(file, 1075864643U, (unsigned long )batchbuffer); return ((int )tmp___5); } } static int compat_i915_cmdbuffer(struct file *file , unsigned int cmd , unsigned long arg ) { drm_i915_cmdbuffer32_t cmdbuffer32 ; drm_i915_cmdbuffer_t *cmdbuffer ; unsigned long tmp ; void *tmp___0 ; struct thread_info *tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; int __pu_err ; int __pu_err___0 ; int __pu_err___1 ; int __pu_err___2 ; int __pu_err___3 ; int __pu_err___4 ; long tmp___5 ; { tmp = copy_from_user((void *)(& cmdbuffer32), (void const *)arg, 24UL); if (tmp != 0UL) { return (-14); } else { } tmp___0 = compat_alloc_user_space(32UL); cmdbuffer = (drm_i915_cmdbuffer_t *)tmp___0; tmp___1 = current_thread_info(); tmp___2 = __chk_range_not_ok((unsigned long )cmdbuffer, 32UL, tmp___1->addr_limit.seg); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } tmp___4 = ldv__builtin_expect((long )tmp___3, 1L); if (tmp___4 == 0L) { return (-14); } else { __pu_err = 0; switch (8UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "iq" ((char *)((unsigned long )cmdbuffer32.buf)), "m" (*((struct __large_struct *)(& cmdbuffer->buf))), "i" (-14), "0" (__pu_err)); goto ldv_49488; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" ((char *)((unsigned long )cmdbuffer32.buf)), "m" (*((struct __large_struct *)(& cmdbuffer->buf))), "i" (-14), "0" (__pu_err)); goto ldv_49488; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" ((char *)((unsigned long )cmdbuffer32.buf)), "m" (*((struct __large_struct *)(& cmdbuffer->buf))), "i" (-14), "0" (__pu_err)); goto ldv_49488; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "er" ((char *)((unsigned long )cmdbuffer32.buf)), "m" (*((struct __large_struct *)(& cmdbuffer->buf))), "i" (-14), "0" (__pu_err)); goto ldv_49488; default: __put_user_bad(); } ldv_49488: ; if (__pu_err != 0) { return (-14); } else { __pu_err___0 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "iq" (cmdbuffer32.sz), "m" (*((struct __large_struct *)(& cmdbuffer->sz))), "i" (-14), "0" (__pu_err___0)); goto ldv_49496; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" (cmdbuffer32.sz), "m" (*((struct __large_struct *)(& cmdbuffer->sz))), "i" (-14), "0" (__pu_err___0)); goto ldv_49496; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" (cmdbuffer32.sz), "m" (*((struct __large_struct *)(& cmdbuffer->sz))), "i" (-14), "0" (__pu_err___0)); goto ldv_49496; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "er" (cmdbuffer32.sz), "m" (*((struct __large_struct *)(& cmdbuffer->sz))), "i" (-14), "0" (__pu_err___0)); goto ldv_49496; default: __put_user_bad(); } ldv_49496: ; if (__pu_err___0 != 0) { return (-14); } else { __pu_err___1 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "iq" (cmdbuffer32.DR1), "m" (*((struct __large_struct *)(& cmdbuffer->DR1))), "i" (-14), "0" (__pu_err___1)); goto ldv_49504; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "ir" (cmdbuffer32.DR1), "m" (*((struct __large_struct *)(& cmdbuffer->DR1))), "i" (-14), "0" (__pu_err___1)); goto ldv_49504; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "ir" (cmdbuffer32.DR1), "m" (*((struct __large_struct *)(& cmdbuffer->DR1))), "i" (-14), "0" (__pu_err___1)); goto ldv_49504; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "er" (cmdbuffer32.DR1), "m" (*((struct __large_struct *)(& cmdbuffer->DR1))), "i" (-14), "0" (__pu_err___1)); goto ldv_49504; default: __put_user_bad(); } ldv_49504: ; if (__pu_err___1 != 0) { return (-14); } else { __pu_err___2 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "iq" (cmdbuffer32.DR4), "m" (*((struct __large_struct *)(& cmdbuffer->DR4))), "i" (-14), "0" (__pu_err___2)); goto ldv_49512; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "ir" (cmdbuffer32.DR4), "m" (*((struct __large_struct *)(& cmdbuffer->DR4))), "i" (-14), "0" (__pu_err___2)); goto ldv_49512; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "ir" (cmdbuffer32.DR4), "m" (*((struct __large_struct *)(& cmdbuffer->DR4))), "i" (-14), "0" (__pu_err___2)); goto ldv_49512; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "er" (cmdbuffer32.DR4), "m" (*((struct __large_struct *)(& cmdbuffer->DR4))), "i" (-14), "0" (__pu_err___2)); goto ldv_49512; default: __put_user_bad(); } ldv_49512: ; if (__pu_err___2 != 0) { return (-14); } else { __pu_err___3 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___3): "iq" (cmdbuffer32.num_cliprects), "m" (*((struct __large_struct *)(& cmdbuffer->num_cliprects))), "i" (-14), "0" (__pu_err___3)); goto ldv_49520; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___3): "ir" (cmdbuffer32.num_cliprects), "m" (*((struct __large_struct *)(& cmdbuffer->num_cliprects))), "i" (-14), "0" (__pu_err___3)); goto ldv_49520; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___3): "ir" (cmdbuffer32.num_cliprects), "m" (*((struct __large_struct *)(& cmdbuffer->num_cliprects))), "i" (-14), "0" (__pu_err___3)); goto ldv_49520; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___3): "er" (cmdbuffer32.num_cliprects), "m" (*((struct __large_struct *)(& cmdbuffer->num_cliprects))), "i" (-14), "0" (__pu_err___3)); goto ldv_49520; default: __put_user_bad(); } ldv_49520: ; if (__pu_err___3 != 0) { return (-14); } else { __pu_err___4 = 0; switch (8UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___4): "iq" ((struct drm_clip_rect *)((unsigned long )cmdbuffer32.cliprects)), "m" (*((struct __large_struct *)(& cmdbuffer->cliprects))), "i" (-14), "0" (__pu_err___4)); goto ldv_49528; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___4): "ir" ((struct drm_clip_rect *)((unsigned long )cmdbuffer32.cliprects)), "m" (*((struct __large_struct *)(& cmdbuffer->cliprects))), "i" (-14), "0" (__pu_err___4)); goto ldv_49528; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___4): "ir" ((struct drm_clip_rect *)((unsigned long )cmdbuffer32.cliprects)), "m" (*((struct __large_struct *)(& cmdbuffer->cliprects))), "i" (-14), "0" (__pu_err___4)); goto ldv_49528; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___4): "er" ((struct drm_clip_rect *)((unsigned long )cmdbuffer32.cliprects)), "m" (*((struct __large_struct *)(& cmdbuffer->cliprects))), "i" (-14), "0" (__pu_err___4)); goto ldv_49528; default: __put_user_bad(); } ldv_49528: ; if (__pu_err___4 != 0) { return (-14); } else { } } } } } } } tmp___5 = drm_ioctl(file, 1075864651U, (unsigned long )cmdbuffer); return ((int )tmp___5); } } static int compat_i915_irq_emit(struct file *file , unsigned int cmd , unsigned long arg ) { drm_i915_irq_emit32_t req32 ; drm_i915_irq_emit_t *request ; unsigned long tmp ; void *tmp___0 ; struct thread_info *tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; int __pu_err ; long tmp___5 ; { tmp = copy_from_user((void *)(& req32), (void const *)arg, 4UL); if (tmp != 0UL) { return (-14); } else { } tmp___0 = compat_alloc_user_space(8UL); request = (drm_i915_irq_emit_t *)tmp___0; tmp___1 = current_thread_info(); tmp___2 = __chk_range_not_ok((unsigned long )request, 8UL, tmp___1->addr_limit.seg); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } tmp___4 = ldv__builtin_expect((long )tmp___3, 1L); if (tmp___4 == 0L) { return (-14); } else { __pu_err = 0; switch (8UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "iq" ((int *)((unsigned long )req32.irq_seq)), "m" (*((struct __large_struct *)(& request->irq_seq))), "i" (-14), "0" (__pu_err)); goto ldv_49547; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" ((int *)((unsigned long )req32.irq_seq)), "m" (*((struct __large_struct *)(& request->irq_seq))), "i" (-14), "0" (__pu_err)); goto ldv_49547; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" ((int *)((unsigned long )req32.irq_seq)), "m" (*((struct __large_struct *)(& request->irq_seq))), "i" (-14), "0" (__pu_err)); goto ldv_49547; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "er" ((int *)((unsigned long )req32.irq_seq)), "m" (*((struct __large_struct *)(& request->irq_seq))), "i" (-14), "0" (__pu_err)); goto ldv_49547; default: __put_user_bad(); } ldv_49547: ; if (__pu_err != 0) { return (-14); } else { } } tmp___5 = drm_ioctl(file, 3221775428U, (unsigned long )request); return ((int )tmp___5); } } static int compat_i915_getparam(struct file *file , unsigned int cmd , unsigned long arg ) { drm_i915_getparam32_t req32 ; drm_i915_getparam_t *request ; unsigned long tmp ; void *tmp___0 ; struct thread_info *tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; int __pu_err ; int __pu_err___0 ; long tmp___5 ; { tmp = copy_from_user((void *)(& req32), (void const *)arg, 8UL); if (tmp != 0UL) { return (-14); } else { } tmp___0 = compat_alloc_user_space(16UL); request = (drm_i915_getparam_t *)tmp___0; tmp___1 = current_thread_info(); tmp___2 = __chk_range_not_ok((unsigned long )request, 16UL, tmp___1->addr_limit.seg); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } tmp___4 = ldv__builtin_expect((long )tmp___3, 1L); if (tmp___4 == 0L) { return (-14); } else { __pu_err = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "iq" (req32.param), "m" (*((struct __large_struct *)(& request->param))), "i" (-14), "0" (__pu_err)); goto ldv_49567; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" (req32.param), "m" (*((struct __large_struct *)(& request->param))), "i" (-14), "0" (__pu_err)); goto ldv_49567; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" (req32.param), "m" (*((struct __large_struct *)(& request->param))), "i" (-14), "0" (__pu_err)); goto ldv_49567; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "er" (req32.param), "m" (*((struct __large_struct *)(& request->param))), "i" (-14), "0" (__pu_err)); goto ldv_49567; default: __put_user_bad(); } ldv_49567: ; if (__pu_err != 0) { return (-14); } else { __pu_err___0 = 0; switch (8UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "iq" ((int *)((unsigned long )req32.value)), "m" (*((struct __large_struct *)(& request->value))), "i" (-14), "0" (__pu_err___0)); goto ldv_49575; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" ((int *)((unsigned long )req32.value)), "m" (*((struct __large_struct *)(& request->value))), "i" (-14), "0" (__pu_err___0)); goto ldv_49575; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" ((int *)((unsigned long )req32.value)), "m" (*((struct __large_struct *)(& request->value))), "i" (-14), "0" (__pu_err___0)); goto ldv_49575; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "er" ((int *)((unsigned long )req32.value)), "m" (*((struct __large_struct *)(& request->value))), "i" (-14), "0" (__pu_err___0)); goto ldv_49575; default: __put_user_bad(); } ldv_49575: ; if (__pu_err___0 != 0) { return (-14); } else { } } } tmp___5 = drm_ioctl(file, 3222299718U, (unsigned long )request); return ((int )tmp___5); } } static int compat_i915_alloc(struct file *file , unsigned int cmd , unsigned long arg ) { drm_i915_mem_alloc32_t req32 ; drm_i915_mem_alloc_t *request ; unsigned long tmp ; void *tmp___0 ; struct thread_info *tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; int __pu_err ; int __pu_err___0 ; int __pu_err___1 ; int __pu_err___2 ; long tmp___5 ; { tmp = copy_from_user((void *)(& req32), (void const *)arg, 16UL); if (tmp != 0UL) { return (-14); } else { } tmp___0 = compat_alloc_user_space(24UL); request = (drm_i915_mem_alloc_t *)tmp___0; tmp___1 = current_thread_info(); tmp___2 = __chk_range_not_ok((unsigned long )request, 24UL, tmp___1->addr_limit.seg); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } tmp___4 = ldv__builtin_expect((long )tmp___3, 1L); if (tmp___4 == 0L) { return (-14); } else { __pu_err = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "iq" (req32.region), "m" (*((struct __large_struct *)(& request->region))), "i" (-14), "0" (__pu_err)); goto ldv_49597; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" (req32.region), "m" (*((struct __large_struct *)(& request->region))), "i" (-14), "0" (__pu_err)); goto ldv_49597; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" (req32.region), "m" (*((struct __large_struct *)(& request->region))), "i" (-14), "0" (__pu_err)); goto ldv_49597; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "er" (req32.region), "m" (*((struct __large_struct *)(& request->region))), "i" (-14), "0" (__pu_err)); goto ldv_49597; default: __put_user_bad(); } ldv_49597: ; if (__pu_err != 0) { return (-14); } else { __pu_err___0 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "iq" (req32.alignment), "m" (*((struct __large_struct *)(& request->alignment))), "i" (-14), "0" (__pu_err___0)); goto ldv_49605; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" (req32.alignment), "m" (*((struct __large_struct *)(& request->alignment))), "i" (-14), "0" (__pu_err___0)); goto ldv_49605; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" (req32.alignment), "m" (*((struct __large_struct *)(& request->alignment))), "i" (-14), "0" (__pu_err___0)); goto ldv_49605; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "er" (req32.alignment), "m" (*((struct __large_struct *)(& request->alignment))), "i" (-14), "0" (__pu_err___0)); goto ldv_49605; default: __put_user_bad(); } ldv_49605: ; if (__pu_err___0 != 0) { return (-14); } else { __pu_err___1 = 0; switch (4UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "iq" (req32.size), "m" (*((struct __large_struct *)(& request->size))), "i" (-14), "0" (__pu_err___1)); goto ldv_49613; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "ir" (req32.size), "m" (*((struct __large_struct *)(& request->size))), "i" (-14), "0" (__pu_err___1)); goto ldv_49613; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "ir" (req32.size), "m" (*((struct __large_struct *)(& request->size))), "i" (-14), "0" (__pu_err___1)); goto ldv_49613; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___1): "er" (req32.size), "m" (*((struct __large_struct *)(& request->size))), "i" (-14), "0" (__pu_err___1)); goto ldv_49613; default: __put_user_bad(); } ldv_49613: ; if (__pu_err___1 != 0) { return (-14); } else { __pu_err___2 = 0; switch (8UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "iq" ((int *)((unsigned long )req32.region_offset)), "m" (*((struct __large_struct *)(& request->region_offset))), "i" (-14), "0" (__pu_err___2)); goto ldv_49621; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "ir" ((int *)((unsigned long )req32.region_offset)), "m" (*((struct __large_struct *)(& request->region_offset))), "i" (-14), "0" (__pu_err___2)); goto ldv_49621; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "ir" ((int *)((unsigned long )req32.region_offset)), "m" (*((struct __large_struct *)(& request->region_offset))), "i" (-14), "0" (__pu_err___2)); goto ldv_49621; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___2): "er" ((int *)((unsigned long )req32.region_offset)), "m" (*((struct __large_struct *)(& request->region_offset))), "i" (-14), "0" (__pu_err___2)); goto ldv_49621; default: __put_user_bad(); } ldv_49621: ; if (__pu_err___2 != 0) { return (-14); } else { } } } } } tmp___5 = drm_ioctl(file, 3222824008U, (unsigned long )request); return ((int )tmp___5); } } static drm_ioctl_compat_t *i915_compat_ioctls[12U] = { 0, 0, 0, & compat_i915_batchbuffer, & compat_i915_irq_emit, 0, & compat_i915_getparam, 0, & compat_i915_alloc, 0, 0, & compat_i915_cmdbuffer}; long i915_compat_ioctl(struct file *filp , unsigned int cmd , unsigned long arg ) { unsigned int nr ; drm_ioctl_compat_t *fn ; int ret ; long tmp ; long tmp___0 ; { nr = cmd & 255U; fn = (drm_ioctl_compat_t *)0; if (nr <= 63U) { tmp = drm_compat_ioctl(filp, cmd, arg); return (tmp); } else { } if (nr <= 75U) { fn = i915_compat_ioctls[nr - 64U]; } else { } if ((unsigned long )fn != (unsigned long )((drm_ioctl_compat_t *)0)) { ret = (*fn)(filp, cmd, arg); } else { tmp___0 = drm_ioctl(filp, cmd, arg); ret = (int )tmp___0; } return ((long )ret); } } bool ldv_queue_work_on_125(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_126(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_127(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_128(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_129(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *__builtin_alloca(unsigned long ) ; __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct pv_cpu_ops pv_cpu_ops ; __inline static int constant_test_bit(long nr , unsigned long const volatile *addr ) { { return ((int )((unsigned long )*(addr + (unsigned long )(nr >> 6)) >> ((int )nr & 63)) & 1); } } __inline static unsigned int __arch_hweight32(unsigned int w ) { unsigned int res ; { res = 0U; __asm__ ("661:\n\tcall __sw_hweight32\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 4*32+23)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0xf3,0x40,0x0f,0xb8,0xc7\n6651:\n\t.popsection": "=a" (res): "D" (w)); return (res); } } extern int kstrtoint(char const * , unsigned int , int * ) ; extern int sscanf(char const * , char const * , ...) ; extern struct task_struct *current_task ; __inline static struct task_struct *get_current(void) { struct task_struct *pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& current_task)); goto ldv_3129; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_3129; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_3129; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_3129; default: __bad_percpu_size(); } ldv_3129: ; return (pfo_ret__); } } extern int strcmp(char const * , char const * ) ; extern char *skip_spaces(char const * ) ; __inline static u64 paravirt_read_msr(unsigned int msr , int *err ) { u64 __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_cpu_ops.read_msr == (unsigned long )((u64 (*)(unsigned int , int * ))0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (128), "i" (12UL)); ldv_4042: ; goto ldv_4042; } else { } __asm__ volatile ("771:\n\tcall *%c6;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c5\n .byte 772b-771b\n .short %c7\n.popsection\n": "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax): [paravirt_typenum] "i" (32UL), [paravirt_opptr] "i" (& pv_cpu_ops.read_msr), [paravirt_clobber] "i" (511), "D" ((unsigned long )msr), "S" ((unsigned long )err): "memory", "cc", "r8", "r9", "r10", "r11"); __ret = (unsigned long long )__eax; return (__ret); } } __inline static unsigned long arch_local_save_flags___2(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } extern void lock_acquire(struct lockdep_map * , unsigned int , int , int , int , struct lockdep_map * , unsigned long ) ; extern void lock_release(struct lockdep_map * , int , unsigned long ) ; __inline static bool static_key_false___0(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } extern unsigned long _raw_spin_lock_irqsave(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irqrestore(raw_spinlock_t * , unsigned long ) ; __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) { { _raw_spin_unlock_irqrestore(& lock->__annonCompField18.rlock, flags); return; } } extern void __init_waitqueue_head(wait_queue_head_t * , char const * , struct lock_class_key * ) ; extern long prepare_to_wait_event(wait_queue_head_t * , wait_queue_t * , int ) ; extern void finish_wait(wait_queue_head_t * , wait_queue_t * ) ; __inline static void rcu_lock_acquire(struct lockdep_map *map ) { { lock_acquire(map, 0U, 0, 2, 0, (struct lockdep_map *)0, 0UL); return; } } __inline static void rcu_lock_release(struct lockdep_map *map ) { { lock_release(map, 1, 0UL); return; } } extern struct lockdep_map rcu_lock_map ; __inline static int rcu_read_lock_sched_held___0(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___2(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } __inline static void rcu_read_lock(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { rcu_read_lock_sched_notrace(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 849, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 900, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_read_unlock_sched_notrace(); rcu_lock_release(& rcu_lock_map); return; } } bool ldv_queue_work_on_139(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_141(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_140(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_143(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_142(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_144(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_145(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_146(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_147(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_148(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_149(struct delayed_work *ldv_func_arg1 ) ; extern unsigned int work_busy(struct work_struct * ) ; __inline static unsigned int readl(void const volatile *addr ) { unsigned int ret ; { __asm__ volatile ("movl %1,%0": "=r" (ret): "m" (*((unsigned int volatile *)addr)): "memory"); return (ret); } } __inline static void memcpy_fromio(void *dst , void const volatile *src , size_t count ) { { memcpy(dst, (void const *)src, count); return; } } extern int idr_for_each(struct idr * , int (*)(int , void * , void * ) , void * ) ; extern struct task_struct *pid_task(struct pid * , enum pid_type ) ; extern struct task_struct *get_pid_task(struct pid * , enum pid_type ) ; extern void schedule(void) ; __inline static void *kmalloc_array(size_t n , size_t size , gfp_t flags ) { void *tmp ; { if (size != 0UL && 0xffffffffffffffffUL / size < n) { return ((void *)0); } else { } tmp = __kmalloc(n * size, flags); return (tmp); } } __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc_array(n, size, flags | 32768U); return (tmp); } } extern ssize_t seq_read(struct file * , char * , size_t , loff_t * ) ; extern loff_t seq_lseek(struct file * , loff_t , int ) ; extern int seq_putc(struct seq_file * , char ) ; extern int seq_puts(struct seq_file * , char const * ) ; extern int seq_write(struct seq_file * , void const * , size_t ) ; extern int seq_printf(struct seq_file * , char const * , ...) ; extern int single_open(struct file * , int (*)(struct seq_file * , void * ) , void * ) ; extern int single_release(struct inode * , struct file * ) ; extern unsigned char const _ctype[] ; extern loff_t default_llseek(struct file * , loff_t , int ) ; extern loff_t generic_file_llseek(struct file * , loff_t , int ) ; extern ssize_t simple_read_from_buffer(void * , size_t , loff_t * , void const * , size_t ) ; __inline static void __simple_attr_check_format(char const *fmt , ...) { { return; } } extern int simple_attr_open(struct inode * , struct file * , int (*)(void * , u64 * ) , int (*)(void * , u64 ) , char const * ) ; extern int simple_attr_release(struct inode * , struct file * ) ; extern ssize_t simple_attr_read(struct file * , char * , size_t , loff_t * ) ; extern ssize_t simple_attr_write(struct file * , char const * , size_t , loff_t * ) ; extern struct dentry *debugfs_create_file(char const * , umode_t , struct dentry * , void * , struct file_operations const * ) ; extern void debugfs_remove(struct dentry * ) ; extern void list_sort(void * , struct list_head * , int (*)(void * , struct list_head * , struct list_head * ) ) ; __inline static void *lowmem_page_address(struct page const *page ) { { return ((void *)((unsigned long )((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) + 0xffff880000000000UL)); } } __inline static struct page *sg_page(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_29262: ; goto ldv_29262; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_29263: ; goto ldv_29263; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } extern unsigned long _copy_to_user(void * , void const * , unsigned int ) ; extern void __copy_to_user_overflow(void) ; __inline static unsigned long copy_to_user(void *to , void const *from , unsigned long n ) { int sz ; unsigned long tmp ; long tmp___0 ; { tmp = __builtin_object_size(from, 0); sz = (int )tmp; __might_fault("./arch/x86/include/asm/uaccess.h", 732); tmp___0 = ldv__builtin_expect((long )(sz < 0 || (unsigned long )sz >= n), 1L); if (tmp___0 != 0L) { n = _copy_to_user(to, from, (unsigned int )n); } else { __copy_to_user_overflow(); } return (n); } } __inline static void pagefault_disabled_inc(void) { struct task_struct *tmp ; { tmp = get_current(); tmp->pagefault_disabled = tmp->pagefault_disabled + 1; return; } } __inline static void pagefault_disabled_dec(void) { struct task_struct *tmp ; int __ret_warn_on ; struct task_struct *tmp___0 ; long tmp___1 ; { tmp = get_current(); tmp->pagefault_disabled = tmp->pagefault_disabled - 1; tmp___0 = get_current(); __ret_warn_on = tmp___0->pagefault_disabled < 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_null("include/linux/uaccess.h", 15); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } __inline static void pagefault_disable(void) { { pagefault_disabled_inc(); __asm__ volatile ("": : : "memory"); return; } } __inline static void pagefault_enable(void) { { __asm__ volatile ("": : : "memory"); pagefault_disabled_dec(); return; } } __inline static void *kmap_atomic(struct page *page ) { void *tmp ; { __preempt_count_add(1); __asm__ volatile ("": : : "memory"); pagefault_disable(); tmp = lowmem_page_address((struct page const *)page); return (tmp); } } __inline static void __kunmap_atomic(void *addr ) { { pagefault_enable(); __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); return; } } extern int drm_modeset_lock(struct drm_modeset_lock * , struct drm_modeset_acquire_ctx * ) ; extern void drm_modeset_unlock(struct drm_modeset_lock * ) ; extern char const *drm_get_connector_status_name(enum drm_connector_status ) ; extern char const *drm_get_subpixel_order_name(enum subpixel_order ) ; __inline static bool drm_mm_node_allocated(struct drm_mm_node *node ) { { return ((int )node->allocated != 0); } } extern u32 drm_crtc_vblank_count(struct drm_crtc * ) ; extern int drm_debugfs_create_files(struct drm_info_list const * , int , struct dentry * , struct drm_minor * ) ; extern int drm_debugfs_remove_files(struct drm_info_list const * , int , struct drm_minor * ) ; u64 intel_ring_get_active_head(struct intel_engine_cs *ring ) ; u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj ) ; struct i915_ggtt_view const i915_ggtt_view_normal ; __inline static uint32_t i915_gem_request_get_seqno(struct drm_i915_gem_request *req ) { { return ((unsigned long )req != (unsigned long )((struct drm_i915_gem_request *)0) ? req->seqno : 0U); } } __inline static struct intel_engine_cs *i915_gem_request_get_ring(struct drm_i915_gem_request *req ) { { return ((unsigned long )req != (unsigned long )((struct drm_i915_gem_request *)0) ? req->ring : (struct intel_engine_cs *)0); } } struct tracepoint __tracepoint_i915_reg_rw ; __inline static void trace_i915_reg_rw(bool write , u32 reg , u64 val , int len , bool trace ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_407 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_409 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___0(& __tracepoint_i915_reg_rw.key); if ((int )tmp___1) { if (! trace) { return; } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_reg_rw.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___0(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 656, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_44823: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , bool , u32 , u64 , int , bool ))it_func))(__data, (int )write, reg, val, len, (int )trace); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_44823; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } if ((int )trace) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_reg_rw.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___0(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 656, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } return; } } void i915_handle_error(struct drm_device *dev , bool wedged , char const *fmt , ...) ; char const *intel_uncore_forcewake_domain_to_str(enum forcewake_domain_id const id ) ; int i915_gem_object_get_pages(struct drm_i915_gem_object *obj ) ; __inline static int __sg_page_count(struct scatterlist *sg ) { { return ((int )(sg->length >> 12)); } } __inline static struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj , int n ) { int __ret_warn_on ; long tmp ; long tmp___0 ; struct scatterlist *tmp___1 ; int tmp___2 ; long tmp___3 ; int tmp___4 ; struct page *tmp___5 ; { __ret_warn_on = (size_t )n >= obj->base.size >> 12; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2754, "WARN_ON(n >= obj->base.size >> PAGE_SHIFT)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return ((struct page *)0); } else { } if (obj->get_page.last > n) { obj->get_page.sg = (obj->pages)->sgl; obj->get_page.last = 0; } else { } goto ldv_45479; ldv_45478: tmp___1 = obj->get_page.sg; obj->get_page.sg = obj->get_page.sg + 1; tmp___2 = __sg_page_count(tmp___1); obj->get_page.last = obj->get_page.last + tmp___2; tmp___3 = ldv__builtin_expect((long )((int )(obj->get_page.sg)->page_link) & 1L, 0L); if (tmp___3 != 0L) { obj->get_page.sg = (struct scatterlist *)((obj->get_page.sg)->page_link & 0xfffffffffffffffcUL); } else { } ldv_45479: tmp___4 = __sg_page_count(obj->get_page.sg); if (obj->get_page.last + tmp___4 <= n) { goto ldv_45478; } else { } tmp___5 = sg_page(obj->get_page.sg); return ((struct page *)-24189255811072L + ((unsigned long )(((long )tmp___5 + 24189255811072L) / 64L) + (unsigned long )(n - obj->get_page.last))); } } __inline static bool i915_gem_request_completed___0(struct drm_i915_gem_request *req , bool lazy_coherency ) { u32 seqno ; long tmp ; bool tmp___0 ; { tmp = ldv__builtin_expect((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h"), "i" (2806), "i" (12UL)); ldv_45515: ; goto ldv_45515; } else { } seqno = (*((req->ring)->get_seqno))(req->ring, (int )lazy_coherency); tmp___0 = i915_seqno_passed(seqno, req->seqno); return (tmp___0); } } int i915_gem_set_seqno(struct drm_device *dev , u32 seqno ) ; bool i915_gem_retire_requests(struct drm_device *dev ) ; __inline static bool i915_reset_in_progress(struct i915_gpu_error *error ) { int tmp ; long tmp___0 ; { tmp = atomic_read((atomic_t const *)(& error->reset_counter)); tmp___0 = ldv__builtin_expect((tmp & -2147483647) != 0, 0L); return (tmp___0 != 0L); } } unsigned long i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o , struct i915_ggtt_view const *view ) ; __inline static unsigned long i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o ) { unsigned long tmp ; { tmp = i915_gem_obj_ggtt_offset_view(o, & i915_ggtt_view_normal); return (tmp); } } bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o , struct i915_ggtt_view const *view ) ; unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o , struct i915_address_space *vm ) ; struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) ; __inline static struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj ) { struct i915_vma *tmp ; { tmp = i915_gem_obj_to_ggtt_view(obj, & i915_ggtt_view_normal); return (tmp); } } bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj ) ; __inline static bool i915_is_ggtt(struct i915_address_space *vm ) { struct i915_address_space *ggtt ; { ggtt = & ((struct drm_i915_private *)(vm->dev)->dev_private)->gtt.base; return ((unsigned long )vm == (unsigned long )ggtt); } } __inline static bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj ) { bool tmp ; { tmp = i915_gem_obj_ggtt_bound_view(obj, & i915_ggtt_view_normal); return (tmp); } } __inline static unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj ) { unsigned long tmp ; { tmp = i915_gem_obj_size(obj, & ((struct drm_i915_private *)(obj->base.dev)->dev_private)->gtt.base); return (tmp); } } __inline static bool i915_gem_context_is_default(struct intel_context const *c ) { { return ((int )c->user_handle == 0); } } unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv , long target , unsigned int flags ) ; int i915_debugfs_connector_add(struct drm_connector *connector ) ; void intel_display_crc_init(struct drm_device *dev ) ; char const *i915_cache_level_str(struct drm_i915_private *i915___0 , int type ) ; extern ssize_t drm_dp_dpcd_read(struct drm_dp_aux * , unsigned int , void * , size_t ) ; extern void drm_dp_mst_dump_topology(struct seq_file * , struct drm_dp_mst_topology_mgr * ) ; __inline static struct drm_crtc *intel_get_crtc_for_pipe(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; return (dev_priv->pipe_to_crtc_mapping[pipe]); } } __inline static struct intel_encoder *intel_attached_encoder(struct drm_connector *connector ) { struct drm_connector const *__mptr ; { __mptr = (struct drm_connector const *)connector; return (((struct intel_connector *)__mptr)->encoder); } } __inline static struct intel_digital_port *enc_to_dig_port(struct drm_encoder *encoder ) { struct drm_encoder const *__mptr ; { __mptr = (struct drm_encoder const *)encoder; return ((struct intel_digital_port *)__mptr); } } __inline static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder ) { struct intel_digital_port *tmp ; { tmp = enc_to_dig_port(encoder); return (& tmp->dp); } } void intel_crtc_reset(struct intel_crtc *crtc ) ; void hsw_enable_ips(struct intel_crtc *crtc ) ; void hsw_disable_ips(struct intel_crtc *crtc ) ; int intel_dp_sink_crc(struct intel_dp *intel_dp , u8 *crc ) ; struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder ) ; static char const *yesno(int v ) { { return (v != 0 ? "yes" : "no"); } } static int drm_add_fake_info_node(struct drm_minor *minor , struct dentry *ent , void const *key ) { struct drm_info_node *node ; void *tmp ; { tmp = kmalloc(40UL, 208U); node = (struct drm_info_node *)tmp; if ((unsigned long )node == (unsigned long )((struct drm_info_node *)0)) { debugfs_remove(ent); return (-12); } else { } node->minor = minor; node->dent = ent; node->info_ent = (struct drm_info_list const *)key; mutex_lock_nested(& minor->debugfs_lock, 0U); list_add(& node->list, & minor->debugfs_list); mutex_unlock(& minor->debugfs_lock); return (0); } } static int i915_capabilities(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct intel_device_info const *info ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; char const *tmp ; char const *tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; char const *tmp___5 ; char const *tmp___6 ; char const *tmp___7 ; char const *tmp___8 ; char const *tmp___9 ; char const *tmp___10 ; char const *tmp___11 ; char const *tmp___12 ; char const *tmp___13 ; char const *tmp___14 ; char const *tmp___15 ; char const *tmp___16 ; char const *tmp___17 ; char const *tmp___18 ; char const *tmp___19 ; char const *tmp___20 ; char const *tmp___21 ; char const *tmp___22 ; char const *tmp___23 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; __p = to_i915((struct drm_device const *)dev); info = & __p->info; seq_printf(m, "gen: %d\n", (int )info->gen); __p___0 = to_i915((struct drm_device const *)dev); seq_printf(m, "pch: %d\n", (unsigned int )__p___0->pch_type); tmp = yesno((int )info->is_mobile); seq_printf(m, "is_mobile: %s\n", tmp); tmp___0 = yesno((int )info->is_i85x); seq_printf(m, "is_i85x: %s\n", tmp___0); tmp___1 = yesno((int )info->is_i915g); seq_printf(m, "is_i915g: %s\n", tmp___1); tmp___2 = yesno((int )info->is_i945gm); seq_printf(m, "is_i945gm: %s\n", tmp___2); tmp___3 = yesno((int )info->is_g33); seq_printf(m, "is_g33: %s\n", tmp___3); tmp___4 = yesno((int )info->need_gfx_hws); seq_printf(m, "need_gfx_hws: %s\n", tmp___4); tmp___5 = yesno((int )info->is_g4x); seq_printf(m, "is_g4x: %s\n", tmp___5); tmp___6 = yesno((int )info->is_pineview); seq_printf(m, "is_pineview: %s\n", tmp___6); tmp___7 = yesno((int )info->is_broadwater); seq_printf(m, "is_broadwater: %s\n", tmp___7); tmp___8 = yesno((int )info->is_crestline); seq_printf(m, "is_crestline: %s\n", tmp___8); tmp___9 = yesno((int )info->is_ivybridge); seq_printf(m, "is_ivybridge: %s\n", tmp___9); tmp___10 = yesno((int )info->is_valleyview); seq_printf(m, "is_valleyview: %s\n", tmp___10); tmp___11 = yesno((int )info->is_haswell); seq_printf(m, "is_haswell: %s\n", tmp___11); tmp___12 = yesno((int )info->is_skylake); seq_printf(m, "is_skylake: %s\n", tmp___12); tmp___13 = yesno((int )info->is_preliminary); seq_printf(m, "is_preliminary: %s\n", tmp___13); tmp___14 = yesno((int )info->has_fbc); seq_printf(m, "has_fbc: %s\n", tmp___14); tmp___15 = yesno((int )info->has_pipe_cxsr); seq_printf(m, "has_pipe_cxsr: %s\n", tmp___15); tmp___16 = yesno((int )info->has_hotplug); seq_printf(m, "has_hotplug: %s\n", tmp___16); tmp___17 = yesno((int )info->cursor_needs_physical); seq_printf(m, "cursor_needs_physical: %s\n", tmp___17); tmp___18 = yesno((int )info->has_overlay); seq_printf(m, "has_overlay: %s\n", tmp___18); tmp___19 = yesno((int )info->overlay_needs_physical); seq_printf(m, "overlay_needs_physical: %s\n", tmp___19); tmp___20 = yesno((int )info->supports_tv); seq_printf(m, "supports_tv: %s\n", tmp___20); tmp___21 = yesno((int )info->has_llc); seq_printf(m, "has_llc: %s\n", tmp___21); tmp___22 = yesno((int )info->has_ddi); seq_printf(m, "has_ddi: %s\n", tmp___22); tmp___23 = yesno((int )info->has_fpga_dbg); seq_printf(m, "has_fpga_dbg: %s\n", tmp___23); return (0); } } static char const *get_pin_flag(struct drm_i915_gem_object *obj ) { { if (obj->pin_display != 0U) { return ("p"); } else { return (" "); } } } static char const *get_tiling_flag(struct drm_i915_gem_object *obj ) { { switch ((int )obj->tiling_mode) { default: ; case 0: ; return (" "); case 1: ; return ("X"); case 2: ; return ("Y"); } } } __inline static char const *get_global_flag(struct drm_i915_gem_object *obj ) { struct i915_vma *tmp ; { tmp = i915_gem_obj_to_ggtt(obj); return ((unsigned long )tmp != (unsigned long )((struct i915_vma *)0) ? "g" : " "); } } static void describe_obj(struct seq_file *m , struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct intel_engine_cs *ring ; struct i915_vma *vma ; int pin_count ; int i ; char const *tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; uint32_t tmp___3 ; bool tmp___4 ; struct drm_i915_private *tmp___5 ; char const *tmp___6 ; uint32_t tmp___7 ; uint32_t tmp___8 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; bool tmp___9 ; int tmp___10 ; struct list_head const *__mptr___2 ; char s[3U] ; char *t ; char *tmp___11 ; char *tmp___12 ; struct intel_engine_cs *tmp___13 ; { tmp = to_i915((struct drm_device const *)obj->base.dev); dev_priv = tmp; pin_count = 0; tmp___0 = get_global_flag(obj); tmp___1 = get_tiling_flag(obj); tmp___2 = get_pin_flag(obj); seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", & obj->base, (unsigned int )*((unsigned char *)obj + 408UL) != 0U ? (char *)"*" : (char *)" ", tmp___2, tmp___1, tmp___0, obj->base.size / 1024UL, obj->base.read_domains, obj->base.write_domain); i = 0; goto ldv_48213; ldv_48212: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___4 = intel_ring_initialized(ring); if ((int )tmp___4) { tmp___3 = i915_gem_request_get_seqno(obj->last_read_req[i]); seq_printf(m, "%x ", tmp___3); } else { } i = i + 1; ldv_48213: ; if (i <= 4) { goto ldv_48212; } else { } tmp___5 = to_i915((struct drm_device const *)obj->base.dev); tmp___6 = i915_cache_level_str(tmp___5, (int )obj->cache_level); tmp___7 = i915_gem_request_get_seqno(obj->last_fenced_req); tmp___8 = i915_gem_request_get_seqno(obj->last_write_req); seq_printf(m, "] %x %x%s%s%s", tmp___8, tmp___7, tmp___6, (unsigned int )*((unsigned char *)obj + 408UL) != 0U ? (char *)" dirty" : (char *)"", (unsigned int )*((unsigned char *)obj + 409UL) == 16U ? (char *)" purgeable" : (char *)""); if (obj->base.name != 0) { seq_printf(m, " (name: %d)", obj->base.name); } else { } __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_48220; ldv_48219: ; if ((int )vma->pin_count > 0) { pin_count = pin_count + 1; } else { } __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_48220: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_48219; } else { } seq_printf(m, " (pinned x %d)", pin_count); if (obj->pin_display != 0U) { seq_printf(m, " (display)"); } else { } if ((unsigned int )*((unsigned short *)obj + 204UL) != 4032U) { seq_printf(m, " (fence: %d)", (int )obj->fence_reg); } else { } __mptr___1 = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr___1 + 0xffffffffffffff58UL; goto ldv_48227; ldv_48226: tmp___9 = i915_is_ggtt(vma->vm); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } if (tmp___10) { seq_puts(m, " (pp"); } else { seq_puts(m, " (g"); } seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)", vma->node.start, vma->node.size, (unsigned int )vma->ggtt_view.type); __mptr___2 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___2 + 0xffffffffffffff58UL; ldv_48227: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_48226; } else { } if ((unsigned long )obj->stolen != (unsigned long )((struct drm_mm_node *)0)) { seq_printf(m, " (stolen: %08llx)", (obj->stolen)->start); } else { } if (obj->pin_display != 0U || (unsigned int )*((unsigned char *)obj + 410UL) != 0U) { t = (char *)(& s); if (obj->pin_display != 0U) { tmp___11 = t; t = t + 1; *tmp___11 = 112; } else { } if ((unsigned int )*((unsigned char *)obj + 410UL) != 0U) { tmp___12 = t; t = t + 1; *tmp___12 = 102; } else { } *t = 0; seq_printf(m, " (%s mappable)", (char *)(& s)); } else { } if ((unsigned long )obj->last_write_req != (unsigned long )((struct drm_i915_gem_request *)0)) { tmp___13 = i915_gem_request_get_ring(obj->last_write_req); seq_printf(m, " (%s)", tmp___13->name); } else { } if ((unsigned int )*((unsigned short *)obj + 206UL) != 0U) { seq_printf(m, " (frontbuffer: 0x%03x)", (int )obj->frontbuffer_bits); } else { } return; } } static void describe_ctx(struct seq_file *m , struct intel_context *ctx ) { { seq_putc(m, (int )ctx->legacy_hw_ctx.initialized ? 73 : 105); seq_putc(m, (unsigned int )ctx->remap_slice != 0U ? 82 : 114); seq_putc(m, 32); return; } } static int i915_gem_object_list_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; uintptr_t list ; struct list_head *head ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct i915_address_space *vm ; struct i915_vma *vma ; size_t total_obj_size ; size_t total_gtt_size ; int count ; int ret ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { node = (struct drm_info_node *)m->private; list = (unsigned long )(node->info_ent)->data; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; vm = & dev_priv->gtt.base; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } switch (list) { case 0UL: seq_puts(m, "Active:\n"); head = & vm->active_list; goto ldv_48251; case 1UL: seq_puts(m, "Inactive:\n"); head = & vm->inactive_list; goto ldv_48251; default: mutex_unlock(& dev->struct_mutex); return (-22); } ldv_48251: count = 0; total_gtt_size = 0UL; total_obj_size = total_gtt_size; __mptr = (struct list_head const *)head->next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff68UL; goto ldv_48259; ldv_48258: seq_printf(m, " "); describe_obj(m, vma->obj); seq_printf(m, "\n"); total_obj_size = (vma->obj)->base.size + total_obj_size; total_gtt_size = (size_t )(vma->node.size + (unsigned long long )total_gtt_size); count = count + 1; __mptr___0 = (struct list_head const *)vma->mm_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff68UL; ldv_48259: ; if ((unsigned long )(& vma->mm_list) != (unsigned long )head) { goto ldv_48258; } else { } mutex_unlock(& dev->struct_mutex); seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", count, total_obj_size, total_gtt_size); return (0); } } static int obj_rank_by_stolen(void *priv , struct list_head *A , struct list_head *B ) { struct drm_i915_gem_object *a ; struct list_head const *__mptr ; struct drm_i915_gem_object *b ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)A; a = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffe88UL; __mptr___0 = (struct list_head const *)B; b = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffe88UL; return ((int )((unsigned int )(a->stolen)->start - (unsigned int )(b->stolen)->start)); } } static int i915_gem_stolen_list_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; size_t total_obj_size ; size_t total_gtt_size ; struct list_head stolen ; int count ; int ret ; struct list_head const *__mptr ; unsigned long tmp ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; int tmp___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; stolen.next = & stolen; stolen.prev = & stolen; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } count = 0; total_gtt_size = 0UL; total_obj_size = total_gtt_size; __mptr = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; goto ldv_48291; ldv_48290: ; if ((unsigned long )obj->stolen == (unsigned long )((struct drm_mm_node *)0)) { goto ldv_48289; } else { } list_add(& obj->obj_exec_link, & stolen); total_obj_size = obj->base.size + total_obj_size; tmp = i915_gem_obj_ggtt_size(obj); total_gtt_size = tmp + total_gtt_size; count = count + 1; ldv_48289: __mptr___0 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; ldv_48291: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_48290; } else { } __mptr___1 = (struct list_head const *)dev_priv->mm.unbound_list.next; obj = (struct drm_i915_gem_object *)__mptr___1 + 0xfffffffffffffee8UL; goto ldv_48299; ldv_48298: ; if ((unsigned long )obj->stolen == (unsigned long )((struct drm_mm_node *)0)) { goto ldv_48297; } else { } list_add(& obj->obj_exec_link, & stolen); total_obj_size = obj->base.size + total_obj_size; count = count + 1; ldv_48297: __mptr___2 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___2 + 0xfffffffffffffee8UL; ldv_48299: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.unbound_list)) { goto ldv_48298; } else { } list_sort((void *)0, & stolen, & obj_rank_by_stolen); seq_puts(m, "Stolen:\n"); goto ldv_48304; ldv_48303: __mptr___3 = (struct list_head const *)stolen.next; obj = (struct drm_i915_gem_object *)__mptr___3 + 0xfffffffffffffe88UL; seq_puts(m, " "); describe_obj(m, obj); seq_putc(m, 10); list_del_init(& obj->obj_exec_link); ldv_48304: tmp___0 = list_empty((struct list_head const *)(& stolen)); if (tmp___0 == 0) { goto ldv_48303; } else { } mutex_unlock(& dev->struct_mutex); seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", count, total_obj_size, total_gtt_size); return (0); } } static int per_file_stats(int id , void *ptr , void *data ) { struct drm_i915_gem_object *obj ; struct file_stats *stats ; struct i915_vma *vma ; struct list_head const *__mptr ; struct i915_hw_ppgtt *ppgtt ; bool tmp ; int tmp___0 ; bool tmp___1 ; struct i915_address_space const *__mptr___0 ; struct list_head const *__mptr___1 ; bool tmp___2 ; int tmp___3 ; { obj = (struct drm_i915_gem_object *)ptr; stats = (struct file_stats *)data; stats->count = stats->count + 1; stats->total = stats->total + obj->base.size; if (obj->base.name != 0 || (unsigned long )obj->base.dma_buf != (unsigned long )((struct dma_buf *)0)) { stats->shared = stats->shared + obj->base.size; } else { } if (i915.enable_ppgtt == 2) { __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_48332; ldv_48331: tmp = drm_mm_node_allocated(& vma->node); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_48328; } else { } tmp___1 = i915_is_ggtt(vma->vm); if ((int )tmp___1) { stats->global = stats->global + obj->base.size; goto ldv_48328; } else { } __mptr___0 = (struct i915_address_space const *)vma->vm; ppgtt = (struct i915_hw_ppgtt *)__mptr___0; if ((unsigned long )ppgtt->file_priv != (unsigned long )stats->file_priv) { goto ldv_48328; } else { } if ((unsigned int )*((unsigned char *)obj + 408UL) != 0U) { stats->active = stats->active + obj->base.size; } else { stats->inactive = stats->inactive + obj->base.size; } return (0); ldv_48328: __mptr___1 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___1 + 0xffffffffffffff58UL; ldv_48332: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_48331; } else { } } else { tmp___2 = i915_gem_obj_ggtt_bound(obj); if ((int )tmp___2) { stats->global = stats->global + obj->base.size; if ((unsigned int )*((unsigned char *)obj + 408UL) != 0U) { stats->active = stats->active + obj->base.size; } else { stats->inactive = stats->inactive + obj->base.size; } return (0); } else { } } tmp___3 = list_empty((struct list_head const *)(& obj->global_list)); if (tmp___3 == 0) { stats->unbound = stats->unbound + obj->base.size; } else { } return (0); } } static void print_batch_pool_stats(struct seq_file *m , struct drm_i915_private *dev_priv ) { struct drm_i915_gem_object *obj ; struct file_stats stats ; struct intel_engine_cs *ring ; int i ; int j ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; bool tmp ; { memset((void *)(& stats), 0, 64UL); i = 0; goto ldv_48356; ldv_48355: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { j = 0; goto ldv_48353; ldv_48352: __mptr = (struct list_head const *)((struct list_head *)(& ring->batch_pool.cache_list) + (unsigned long )j)->next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffe78UL; goto ldv_48350; ldv_48349: per_file_stats(0, (void *)obj, (void *)(& stats)); __mptr___0 = (struct list_head const *)obj->batch_pool_link.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffe78UL; ldv_48350: ; if ((unsigned long )(& obj->batch_pool_link) != (unsigned long )((struct list_head *)(& ring->batch_pool.cache_list) + (unsigned long )j)) { goto ldv_48349; } else { } j = j + 1; ldv_48353: ; if ((unsigned int )j <= 3U) { goto ldv_48352; } else { } } else { } i = i + 1; ldv_48356: ; if (i <= 4) { goto ldv_48355; } else { } if (stats.count != 0) { seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", (char *)"[k]batch pool", stats.count, stats.total, stats.active, stats.inactive, stats.global, stats.shared, stats.unbound); } else { } return; } } static int i915_gem_object_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 count ; u32 mappable_count ; u32 purgeable_count ; size_t size ; size_t mappable_size ; size_t purgeable_size ; struct drm_i915_gem_object *obj ; struct i915_address_space *vm ; struct drm_file *file ; struct i915_vma *vma ; int ret ; struct list_head const *__mptr ; unsigned long tmp ; unsigned long tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; unsigned long tmp___1 ; unsigned long tmp___2 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; unsigned long tmp___3 ; unsigned long tmp___4 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; struct list_head const *__mptr___7 ; unsigned long tmp___5 ; unsigned long tmp___6 ; struct list_head const *__mptr___8 ; struct list_head const *__mptr___9 ; struct file_stats stats ; struct task_struct *task ; struct list_head const *__mptr___10 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; vm = & dev_priv->gtt.base; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } seq_printf(m, "%u objects, %zu bytes\n", dev_priv->mm.object_count, dev_priv->mm.object_memory); mappable_count = 0U; mappable_size = 0UL; count = (u32 )mappable_size; size = (size_t )count; __mptr = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; goto ldv_48381; ldv_48380: tmp = i915_gem_obj_ggtt_size(obj); size = tmp + size; count = count + 1U; if ((unsigned int )*((unsigned char *)obj + 410UL) != 0U) { tmp___0 = i915_gem_obj_ggtt_size(obj); mappable_size = tmp___0 + mappable_size; mappable_count = mappable_count + 1U; } else { } __mptr___0 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; ldv_48381: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_48380; } else { } seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", count, mappable_count, size, mappable_size); mappable_count = 0U; mappable_size = 0UL; count = (u32 )mappable_size; size = (size_t )count; __mptr___1 = (struct list_head const *)vm->active_list.next; vma = (struct i915_vma *)__mptr___1 + 0xffffffffffffff68UL; goto ldv_48388; ldv_48387: tmp___1 = i915_gem_obj_ggtt_size(vma->obj); size = tmp___1 + size; count = count + 1U; if ((unsigned int )*((unsigned char *)vma->obj + 410UL) != 0U) { tmp___2 = i915_gem_obj_ggtt_size(vma->obj); mappable_size = tmp___2 + mappable_size; mappable_count = mappable_count + 1U; } else { } __mptr___2 = (struct list_head const *)vma->mm_list.next; vma = (struct i915_vma *)__mptr___2 + 0xffffffffffffff68UL; ldv_48388: ; if ((unsigned long )(& vma->mm_list) != (unsigned long )(& vm->active_list)) { goto ldv_48387; } else { } seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); mappable_count = 0U; mappable_size = 0UL; count = (u32 )mappable_size; size = (size_t )count; __mptr___3 = (struct list_head const *)vm->inactive_list.next; vma = (struct i915_vma *)__mptr___3 + 0xffffffffffffff68UL; goto ldv_48395; ldv_48394: tmp___3 = i915_gem_obj_ggtt_size(vma->obj); size = tmp___3 + size; count = count + 1U; if ((unsigned int )*((unsigned char *)vma->obj + 410UL) != 0U) { tmp___4 = i915_gem_obj_ggtt_size(vma->obj); mappable_size = tmp___4 + mappable_size; mappable_count = mappable_count + 1U; } else { } __mptr___4 = (struct list_head const *)vma->mm_list.next; vma = (struct i915_vma *)__mptr___4 + 0xffffffffffffff68UL; ldv_48395: ; if ((unsigned long )(& vma->mm_list) != (unsigned long )(& vm->inactive_list)) { goto ldv_48394; } else { } seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); purgeable_count = 0U; purgeable_size = 0UL; count = (u32 )purgeable_size; size = (size_t )count; __mptr___5 = (struct list_head const *)dev_priv->mm.unbound_list.next; obj = (struct drm_i915_gem_object *)__mptr___5 + 0xfffffffffffffee8UL; goto ldv_48402; ldv_48401: size = obj->base.size + size; count = count + 1U; if ((unsigned int )*((unsigned char *)obj + 409UL) == 16U) { purgeable_size = obj->base.size + purgeable_size; purgeable_count = purgeable_count + 1U; } else { } __mptr___6 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___6 + 0xfffffffffffffee8UL; ldv_48402: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.unbound_list)) { goto ldv_48401; } else { } seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); mappable_count = 0U; mappable_size = 0UL; count = (u32 )mappable_size; size = (size_t )count; __mptr___7 = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr___7 + 0xfffffffffffffee8UL; goto ldv_48409; ldv_48408: ; if ((unsigned int )*((unsigned char *)obj + 410UL) != 0U) { tmp___5 = i915_gem_obj_ggtt_size(obj); size = tmp___5 + size; count = count + 1U; } else { } if (obj->pin_display != 0U) { tmp___6 = i915_gem_obj_ggtt_size(obj); mappable_size = tmp___6 + mappable_size; mappable_count = mappable_count + 1U; } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) == 16U) { purgeable_size = obj->base.size + purgeable_size; purgeable_count = purgeable_count + 1U; } else { } __mptr___8 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___8 + 0xfffffffffffffee8UL; ldv_48409: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_48408; } else { } seq_printf(m, "%u purgeable objects, %zu bytes\n", purgeable_count, purgeable_size); seq_printf(m, "%u pinned mappable objects, %zu bytes\n", mappable_count, mappable_size); seq_printf(m, "%u fault mappable objects, %zu bytes\n", count, size); seq_printf(m, "%zu [%lu] gtt total\n", dev_priv->gtt.base.total, dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); seq_putc(m, 10); print_batch_pool_stats(m, dev_priv); __mptr___9 = (struct list_head const *)dev->filelist.prev; file = (struct drm_file *)__mptr___9 + 0xffffffffffffffe8UL; goto ldv_48418; ldv_48417: memset((void *)(& stats), 0, 64UL); stats.file_priv = (struct drm_i915_file_private *)file->driver_priv; spin_lock(& file->table_lock); idr_for_each(& file->object_idr, & per_file_stats, (void *)(& stats)); spin_unlock(& file->table_lock); rcu_read_lock(); task = pid_task(file->pid, 0); if (stats.count != 0) { seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", (unsigned long )task != (unsigned long )((struct task_struct *)0) ? (char *)(& task->comm) : (char *)"", stats.count, stats.total, stats.active, stats.inactive, stats.global, stats.shared, stats.unbound); } else { } rcu_read_unlock(); __mptr___10 = (struct list_head const *)file->lhead.prev; file = (struct drm_file *)__mptr___10 + 0xffffffffffffffe8UL; ldv_48418: ; if ((unsigned long )(& file->lhead) != (unsigned long )(& dev->filelist)) { goto ldv_48417; } else { } mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_gem_gtt_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; uintptr_t list ; struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; size_t total_obj_size ; size_t total_gtt_size ; int count ; int ret ; struct list_head const *__mptr ; bool tmp ; int tmp___0 ; unsigned long tmp___1 ; struct list_head const *__mptr___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; list = (unsigned long )(node->info_ent)->data; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } count = 0; total_gtt_size = 0UL; total_obj_size = total_gtt_size; __mptr = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; goto ldv_48439; ldv_48438: ; if (list == 2UL) { tmp = i915_gem_obj_is_pinned(obj); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_48437; } else { } } else { } seq_puts(m, " "); describe_obj(m, obj); seq_putc(m, 10); total_obj_size = obj->base.size + total_obj_size; tmp___1 = i915_gem_obj_ggtt_size(obj); total_gtt_size = tmp___1 + total_gtt_size; count = count + 1; ldv_48437: __mptr___0 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; ldv_48439: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_48438; } else { } mutex_unlock(& dev->struct_mutex); seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", count, total_obj_size, total_gtt_size); return (0); } } static int i915_gem_pageflip_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; int ret ; struct list_head const *__mptr ; char pipe ; char plane ; struct intel_unpin_work *work ; u32 addr ; int tmp ; struct intel_engine_cs *ring ; struct intel_engine_cs *tmp___0 ; bool tmp___1 ; u32 tmp___2 ; uint32_t tmp___3 ; u32 tmp___4 ; int tmp___5 ; uint32_t tmp___6 ; struct drm_i915_private *__p ; struct list_head const *__mptr___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_48466; ldv_48465: pipe = (char const )((unsigned int )((unsigned char )crtc->pipe) + 65U); plane = (char const )((unsigned int )((unsigned char )crtc->plane) + 65U); spin_lock_irq(& dev->event_lock); work = crtc->unpin_work; if ((unsigned long )work == (unsigned long )((struct intel_unpin_work *)0)) { seq_printf(m, "No flip due on pipe %c (plane %c)\n", (int )pipe, (int )plane); } else { tmp = atomic_read((atomic_t const *)(& work->pending)); if (tmp <= 1) { seq_printf(m, "Flip queued on pipe %c (plane %c)\n", (int )pipe, (int )plane); } else { seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", (int )pipe, (int )plane); } if ((unsigned long )work->flip_queued_req != (unsigned long )((struct drm_i915_gem_request *)0)) { tmp___0 = i915_gem_request_get_ring(work->flip_queued_req); ring = tmp___0; tmp___1 = i915_gem_request_completed___0(work->flip_queued_req, 1); tmp___2 = (*(ring->get_seqno))(ring, 1); tmp___3 = i915_gem_request_get_seqno(work->flip_queued_req); seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", ring->name, tmp___3, dev_priv->next_seqno, tmp___2, (int )tmp___1); } else { seq_printf(m, "Flip not associated with any ring\n"); } tmp___4 = drm_crtc_vblank_count(& crtc->base); seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", work->flip_queued_vblank, work->flip_ready_vblank, tmp___4); if ((int )work->enable_stall_check) { seq_puts(m, "Stall check enabled, "); } else { seq_puts(m, "Stall check waiting for page flip ioctl, "); } tmp___5 = atomic_read((atomic_t const *)(& work->pending)); seq_printf(m, "%d prepares\n", tmp___5); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )crtc->plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 1); addr = tmp___6 & 4294963200U; } else { addr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )crtc->plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), 1); } seq_printf(m, "Current scanout address 0x%08x\n", addr); if ((unsigned long )work->pending_flip_obj != (unsigned long )((struct drm_i915_gem_object *)0)) { seq_printf(m, "New framebuffer address 0x%08lx\n", (long )work->gtt_offset); seq_printf(m, "MMIO update completed? %d\n", work->gtt_offset == addr); } else { } } spin_unlock_irq(& dev->event_lock); __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_48466: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_48465; } else { } mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_gem_batch_pool_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; struct intel_engine_cs *ring ; int total ; int ret ; int i ; int j ; int count ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; bool tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; total = 0; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } i = 0; goto ldv_48502; ldv_48501: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { j = 0; goto ldv_48499; ldv_48498: count = 0; __mptr = (struct list_head const *)((struct list_head *)(& ring->batch_pool.cache_list) + (unsigned long )j)->next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffe78UL; goto ldv_48489; ldv_48488: count = count + 1; __mptr___0 = (struct list_head const *)obj->batch_pool_link.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffe78UL; ldv_48489: ; if ((unsigned long )(& obj->batch_pool_link) != (unsigned long )((struct list_head *)(& ring->batch_pool.cache_list) + (unsigned long )j)) { goto ldv_48488; } else { } seq_printf(m, "%s cache[%d]: %d objects\n", ring->name, j, count); __mptr___1 = (struct list_head const *)((struct list_head *)(& ring->batch_pool.cache_list) + (unsigned long )j)->next; obj = (struct drm_i915_gem_object *)__mptr___1 + 0xfffffffffffffe78UL; goto ldv_48496; ldv_48495: seq_puts(m, " "); describe_obj(m, obj); seq_putc(m, 10); __mptr___2 = (struct list_head const *)obj->batch_pool_link.next; obj = (struct drm_i915_gem_object *)__mptr___2 + 0xfffffffffffffe78UL; ldv_48496: ; if ((unsigned long )(& obj->batch_pool_link) != (unsigned long )((struct list_head *)(& ring->batch_pool.cache_list) + (unsigned long )j)) { goto ldv_48495; } else { } total = total + count; j = j + 1; ldv_48499: ; if ((unsigned int )j <= 3U) { goto ldv_48498; } else { } } else { } i = i + 1; ldv_48502: ; if (i <= 4) { goto ldv_48501; } else { } seq_printf(m, "total: %d\n", total); mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_gem_request_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; struct drm_i915_gem_request *req ; int ret ; int any ; int i ; int count ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct task_struct *task ; struct list_head const *__mptr___2 ; bool tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } any = 0; i = 0; goto ldv_48534; ldv_48533: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { count = 0; __mptr = (struct list_head const *)ring->request_list.next; req = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL; goto ldv_48522; ldv_48521: count = count + 1; __mptr___0 = (struct list_head const *)req->list.next; req = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffffb8UL; ldv_48522: ; if ((unsigned long )(& req->list) != (unsigned long )(& ring->request_list)) { goto ldv_48521; } else { } if (count == 0) { goto ldv_48524; } else { } seq_printf(m, "%s requests: %d\n", ring->name, count); __mptr___1 = (struct list_head const *)ring->request_list.next; req = (struct drm_i915_gem_request *)__mptr___1 + 0xffffffffffffffb8UL; goto ldv_48531; ldv_48530: rcu_read_lock(); task = (struct task_struct *)0; if ((unsigned long )req->pid != (unsigned long )((struct pid *)0)) { task = pid_task(req->pid, 0); } else { } seq_printf(m, " %x @ %d: %s [%d]\n", req->seqno, (int )((unsigned int )jiffies - (unsigned int )req->emitted_jiffies), (unsigned long )task != (unsigned long )((struct task_struct *)0) ? (char *)(& task->comm) : (char *)"", (unsigned long )task != (unsigned long )((struct task_struct *)0) ? task->pid : -1); rcu_read_unlock(); __mptr___2 = (struct list_head const *)req->list.next; req = (struct drm_i915_gem_request *)__mptr___2 + 0xffffffffffffffb8UL; ldv_48531: ; if ((unsigned long )(& req->list) != (unsigned long )(& ring->request_list)) { goto ldv_48530; } else { } any = any + 1; } else { } ldv_48524: i = i + 1; ldv_48534: ; if (i <= 4) { goto ldv_48533; } else { } mutex_unlock(& dev->struct_mutex); if (any == 0) { seq_puts(m, "No requests\n"); } else { } return (0); } } static void i915_ring_seqno_info(struct seq_file *m , struct intel_engine_cs *ring ) { u32 tmp ; { if ((unsigned long )ring->get_seqno != (unsigned long )((u32 (*)(struct intel_engine_cs * , bool ))0)) { tmp = (*(ring->get_seqno))(ring, 0); seq_printf(m, "Current sequence (%s): %x\n", ring->name, tmp); } else { } return; } } static int i915_gem_seqno_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int ret ; int i ; bool tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); i = 0; goto ldv_48551; ldv_48550: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { i915_ring_seqno_info(m, ring); } else { } i = i + 1; ldv_48551: ; if (i <= 4) { goto ldv_48550; } else { } intel_runtime_pm_put(dev_priv); mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_interrupt_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int ret ; int i ; int pipe ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; struct drm_i915_private *__p ; uint32_t tmp___5 ; uint32_t tmp___6 ; uint32_t tmp___7 ; uint32_t tmp___8 ; uint32_t tmp___9 ; uint32_t tmp___10 ; uint32_t tmp___11 ; uint32_t tmp___12 ; uint32_t tmp___13 ; uint32_t tmp___14 ; uint32_t tmp___15 ; uint32_t tmp___16 ; uint32_t tmp___17 ; bool tmp___18 ; int tmp___19 ; uint32_t tmp___20 ; uint32_t tmp___21 ; uint32_t tmp___22 ; struct drm_i915_private *__p___0 ; uint32_t tmp___23 ; uint32_t tmp___24 ; uint32_t tmp___25 ; uint32_t tmp___26 ; uint32_t tmp___27 ; uint32_t tmp___28 ; uint32_t tmp___29 ; uint32_t tmp___30 ; uint32_t tmp___31 ; uint32_t tmp___32 ; uint32_t tmp___33 ; uint32_t tmp___34 ; uint32_t tmp___35 ; uint32_t tmp___36 ; struct drm_i915_private *__p___1 ; uint32_t tmp___37 ; uint32_t tmp___38 ; uint32_t tmp___39 ; uint32_t tmp___40 ; uint32_t tmp___41 ; uint32_t tmp___42 ; uint32_t tmp___43 ; uint32_t tmp___44 ; uint32_t tmp___45 ; uint32_t tmp___46 ; uint32_t tmp___47 ; uint32_t tmp___48 ; uint32_t tmp___49 ; uint32_t tmp___50 ; struct drm_i915_private *__p___2 ; uint32_t tmp___51 ; uint32_t tmp___52 ; uint32_t tmp___53 ; uint32_t tmp___54 ; uint32_t tmp___55 ; uint32_t tmp___56 ; uint32_t tmp___57 ; uint32_t tmp___58 ; uint32_t tmp___59 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; uint32_t tmp___60 ; struct drm_i915_private *__p___8 ; bool tmp___61 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U) { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) == 8U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279040L, 1); seq_printf(m, "Master Interrupt Control:\t%08x\n", tmp); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581216L, 1); seq_printf(m, "Display IER:\t%08x\n", tmp___0); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581220L, 1); seq_printf(m, "Display IIR:\t%08x\n", tmp___1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581188L, 1); seq_printf(m, "Display IIR_RW:\t%08x\n", tmp___2); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581224L, 1); seq_printf(m, "Display IMR:\t%08x\n", tmp___3); pipe = 0; goto ldv_48583; ldv_48582: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 1); seq_printf(m, "Pipe %c stat:\t%08x\n", pipe + 65, tmp___4); pipe = pipe + 1; ldv_48583: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_48582; } else { } tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 1); seq_printf(m, "Port hotplug:\t%08x\n", tmp___5); tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 2031656L, 1); seq_printf(m, "DPFLIPSTAT:\t%08x\n", tmp___6); tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 2031660L, 1); seq_printf(m, "DPINVGTT:\t%08x\n", tmp___7); i = 0; goto ldv_48586; ldv_48585: tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(i * 16 + 279300), 1); seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", i, tmp___8); tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(i * 16 + 279304), 1); seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", i, tmp___9); tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(i * 16 + 279308), 1); seq_printf(m, "GT Interrupt IER %d:\t%08x\n", i, tmp___10); i = i + 1; ldv_48586: ; if (i <= 3) { goto ldv_48585; } else { } tmp___11 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279780L, 1); seq_printf(m, "PCU interrupt mask:\t%08x\n", tmp___11); tmp___12 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279784L, 1); seq_printf(m, "PCU interrupt identity:\t%08x\n", tmp___12); tmp___13 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279788L, 1); seq_printf(m, "PCU interrupt enable:\t%08x\n", tmp___13); } else { goto _L; } } else { _L: /* CIL Label */ __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) > 7U) { tmp___14 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279040L, 1); seq_printf(m, "Master Interrupt Control:\t%08x\n", tmp___14); i = 0; goto ldv_48595; ldv_48594: tmp___15 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(i * 16 + 279300), 1); seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", i, tmp___15); tmp___16 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(i * 16 + 279304), 1); seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", i, tmp___16); tmp___17 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(i * 16 + 279308), 1); seq_printf(m, "GT Interrupt IER %d:\t%08x\n", i, tmp___17); i = i + 1; ldv_48595: ; if (i <= 3) { goto ldv_48594; } else { } pipe = 0; goto ldv_48605; ldv_48604: tmp___18 = intel_display_power_is_enabled(dev_priv, (enum intel_display_power_domain )pipe); if (tmp___18) { tmp___19 = 0; } else { tmp___19 = 1; } if (tmp___19) { seq_printf(m, "Pipe %c power disabled\n", pipe + 65); goto ldv_48603; } else { } tmp___20 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279556), 1); seq_printf(m, "Pipe %c IMR:\t%08x\n", pipe + 65, tmp___20); tmp___21 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279560), 1); seq_printf(m, "Pipe %c IIR:\t%08x\n", pipe + 65, tmp___21); tmp___22 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279564), 1); seq_printf(m, "Pipe %c IER:\t%08x\n", pipe + 65, tmp___22); ldv_48603: pipe = pipe + 1; ldv_48605: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > pipe) { goto ldv_48604; } else { } tmp___23 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279620L, 1); seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", tmp___23); tmp___24 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279624L, 1); seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", tmp___24); tmp___25 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279628L, 1); seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", tmp___25); tmp___26 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279652L, 1); seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", tmp___26); tmp___27 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279656L, 1); seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", tmp___27); tmp___28 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279660L, 1); seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", tmp___28); tmp___29 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279780L, 1); seq_printf(m, "PCU interrupt mask:\t%08x\n", tmp___29); tmp___30 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279784L, 1); seq_printf(m, "PCU interrupt identity:\t%08x\n", tmp___30); tmp___31 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279788L, 1); seq_printf(m, "PCU interrupt enable:\t%08x\n", tmp___31); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { tmp___32 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581216L, 1); seq_printf(m, "Display IER:\t%08x\n", tmp___32); tmp___33 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581220L, 1); seq_printf(m, "Display IIR:\t%08x\n", tmp___33); tmp___34 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581188L, 1); seq_printf(m, "Display IIR_RW:\t%08x\n", tmp___34); tmp___35 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581224L, 1); seq_printf(m, "Display IMR:\t%08x\n", tmp___35); pipe = 0; goto ldv_48620; ldv_48619: tmp___36 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 1); seq_printf(m, "Pipe %c stat:\t%08x\n", pipe + 65, tmp___36); pipe = pipe + 1; ldv_48620: __p___1 = dev_priv; if ((int )__p___1->info.num_pipes > pipe) { goto ldv_48619; } else { } tmp___37 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278540L, 1); seq_printf(m, "Master IER:\t%08x\n", tmp___37); tmp___38 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278556L, 1); seq_printf(m, "Render IER:\t%08x\n", tmp___38); tmp___39 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278552L, 1); seq_printf(m, "Render IIR:\t%08x\n", tmp___39); tmp___40 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278548L, 1); seq_printf(m, "Render IMR:\t%08x\n", tmp___40); tmp___41 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278572L, 1); seq_printf(m, "PM IER:\t\t%08x\n", tmp___41); tmp___42 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 1); seq_printf(m, "PM IIR:\t\t%08x\n", tmp___42); tmp___43 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278564L, 1); seq_printf(m, "PM IMR:\t\t%08x\n", tmp___43); tmp___44 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 1); seq_printf(m, "Port hotplug:\t%08x\n", tmp___44); tmp___45 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 2031656L, 1); seq_printf(m, "DPFLIPSTAT:\t%08x\n", tmp___45); tmp___46 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 2031660L, 1); seq_printf(m, "DPINVGTT:\t%08x\n", tmp___46); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type == 0U) { tmp___47 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 1); seq_printf(m, "Interrupt enable: %08x\n", tmp___47); tmp___48 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8356L, 1); seq_printf(m, "Interrupt identity: %08x\n", tmp___48); tmp___49 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8360L, 1); seq_printf(m, "Interrupt mask: %08x\n", tmp___49); pipe = 0; goto ldv_48635; ldv_48634: tmp___50 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 1); seq_printf(m, "Pipe %c stat: %08x\n", pipe + 65, tmp___50); pipe = pipe + 1; ldv_48635: __p___2 = dev_priv; if ((int )__p___2->info.num_pipes > pipe) { goto ldv_48634; } else { } } else { tmp___51 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278540L, 1); seq_printf(m, "North Display Interrupt enable:\t\t%08x\n", tmp___51); tmp___52 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278536L, 1); seq_printf(m, "North Display Interrupt identity:\t%08x\n", tmp___52); tmp___53 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278532L, 1); seq_printf(m, "North Display Interrupt mask:\t\t%08x\n", tmp___53); tmp___54 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802828L, 1); seq_printf(m, "South Display Interrupt enable:\t\t%08x\n", tmp___54); tmp___55 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802824L, 1); seq_printf(m, "South Display Interrupt identity:\t%08x\n", tmp___55); tmp___56 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802820L, 1); seq_printf(m, "South Display Interrupt mask:\t\t%08x\n", tmp___56); tmp___57 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278556L, 1); seq_printf(m, "Graphics Interrupt enable:\t\t%08x\n", tmp___57); tmp___58 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278552L, 1); seq_printf(m, "Graphics Interrupt identity:\t\t%08x\n", tmp___58); tmp___59 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278548L, 1); seq_printf(m, "Graphics Interrupt mask:\t\t%08x\n", tmp___59); } } } } i = 0; goto ldv_48644; ldv_48643: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___61 = intel_ring_initialized(ring); if ((int )tmp___61) { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) > 5U) { tmp___60 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 168U), 1); seq_printf(m, "Graphics Interrupt mask (%s):\t%08x\n", ring->name, tmp___60); } else { } i915_ring_seqno_info(m, ring); } else { } i = i + 1; ldv_48644: ; if (i <= 4) { goto ldv_48643; } else { } intel_runtime_pm_put(dev_priv); mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_gem_fence_regs_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int i ; int ret ; struct drm_i915_gem_object *obj ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); i = 0; goto ldv_48657; ldv_48656: obj = dev_priv->fence_regs[i].obj; seq_printf(m, "Fence %d, pin count = %d, object = ", i, dev_priv->fence_regs[i].pin_count); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { seq_puts(m, "unused"); } else { describe_obj(m, obj); } seq_putc(m, 10); i = i + 1; ldv_48657: ; if (dev_priv->num_fence_regs > i) { goto ldv_48656; } else { } mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_hws_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; u32 const *hws ; int i ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )(node->info_ent)->data; hws = (u32 const *)ring->status_page.page_addr; if ((unsigned long )hws == (unsigned long )((u32 const *)0U)) { return (0); } else { } i = 0; goto ldv_48670; ldv_48669: seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4, *(hws + (unsigned long )i), *(hws + ((unsigned long )i + 1UL)), *(hws + ((unsigned long )i + 2UL)), *(hws + ((unsigned long )i + 3UL))); i = i + 4; ldv_48670: ; if ((unsigned int )i <= 255U) { goto ldv_48669; } else { } return (0); } } static ssize_t i915_error_state_write(struct file *filp , char const *ubuf , size_t cnt , loff_t *ppos ) { struct i915_error_state_file_priv *error_priv ; struct drm_device *dev ; int ret ; long tmp ; { error_priv = (struct i915_error_state_file_priv *)filp->private_data; dev = error_priv->dev; tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_error_state_write", "Resetting error state\n"); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return ((ssize_t )ret); } else { } i915_destroy_error_state(dev); mutex_unlock(& dev->struct_mutex); return ((ssize_t )cnt); } } static int i915_error_state_open(struct inode *inode , struct file *file ) { struct drm_device *dev ; struct i915_error_state_file_priv *error_priv ; void *tmp ; { dev = (struct drm_device *)inode->i_private; tmp = kzalloc(16UL, 208U); error_priv = (struct i915_error_state_file_priv *)tmp; if ((unsigned long )error_priv == (unsigned long )((struct i915_error_state_file_priv *)0)) { return (-12); } else { } error_priv->dev = dev; i915_error_state_get(dev, error_priv); file->private_data = (void *)error_priv; return (0); } } static int i915_error_state_release(struct inode *inode , struct file *file ) { struct i915_error_state_file_priv *error_priv ; { error_priv = (struct i915_error_state_file_priv *)file->private_data; i915_error_state_put(error_priv); kfree((void const *)error_priv); return (0); } } static ssize_t i915_error_state_read(struct file *file , char *userbuf , size_t count , loff_t *pos ) { struct i915_error_state_file_priv *error_priv ; struct drm_i915_error_state_buf error_str ; loff_t tmp_pos ; ssize_t ret_count ; int ret ; struct drm_i915_private *tmp ; { error_priv = (struct i915_error_state_file_priv *)file->private_data; tmp_pos = 0LL; ret_count = 0L; tmp = to_i915((struct drm_device const *)error_priv->dev); ret = i915_error_state_buf_init(& error_str, tmp, count, *pos); if (ret != 0) { return ((ssize_t )ret); } else { } ret = i915_error_state_to_str(& error_str, (struct i915_error_state_file_priv const *)error_priv); if (ret != 0) { goto out; } else { } ret_count = simple_read_from_buffer((void *)userbuf, count, & tmp_pos, (void const *)error_str.buf, (size_t )error_str.bytes); if (ret_count < 0L) { ret = (int )ret_count; } else { *pos = error_str.start + (long long )ret_count; } out: i915_error_state_buf_release(& error_str); return (ret != 0 ? (ssize_t )ret : ret_count); } } static struct file_operations const i915_error_state_fops = {& __this_module, & default_llseek, & i915_error_state_read, & i915_error_state_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_error_state_open, 0, & i915_error_state_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_next_seqno_get(void *data , u64 *val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } *val = (u64 )dev_priv->next_seqno; mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_next_seqno_set(void *data , u64 val ) { struct drm_device *dev ; int ret ; { dev = (struct drm_device *)data; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } ret = i915_gem_set_seqno(dev, (u32 )val); mutex_unlock(& dev->struct_mutex); return (ret); } } static int i915_next_seqno_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("0x%llx\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_next_seqno_get, & i915_next_seqno_set, "0x%llx\n"); return (tmp); } } static struct file_operations const i915_next_seqno_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_next_seqno_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_frequency_info(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; u16 rgvswctl ; uint16_t tmp ; u16 rgvstat ; uint16_t tmp___0 ; u32 gt_perf_status ; uint32_t tmp___1 ; u32 rp_state_limits ; uint32_t tmp___2 ; u32 rp_state_cap ; uint32_t tmp___3 ; u32 rpmodectl ; u32 rpinclimit ; u32 rpdeclimit ; u32 rpstat ; u32 cagf ; u32 reqf ; u32 rpupei ; u32 rpcurup ; u32 rpprevup ; u32 rpdownei ; u32 rpcurdown ; u32 rpprevdown ; u32 pm_ier ; u32 pm_imr ; u32 pm_isr ; u32 pm_iir ; u32 pm_mask ; int max_freq ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int tmp___4 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; int tmp___5 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; int tmp___6 ; struct drm_i915_private *__p___11 ; int tmp___7 ; struct drm_i915_private *__p___12 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; u32 freq_sts ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; struct drm_i915_private *__p___13 ; struct drm_i915_private *__p___14 ; struct drm_i915_private *__p___15 ; struct drm_i915_private *__p___16 ; struct drm_i915_private *__p___17 ; struct drm_i915_private *__p___18 ; struct drm_i915_private *__p___19 ; struct drm_i915_private *__p___20 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; intel_runtime_pm_get(dev_priv); ldv_flush_delayed_work_144(& dev_priv->rps.delayed_resume_work); __p___20 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___20->info.gen) == 5U) { tmp = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 70000L, 1); rgvswctl = tmp; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 70136L, 1); rgvstat = tmp___0; seq_printf(m, "Requested P-state: %d\n", ((int )rgvswctl >> 8) & 15); seq_printf(m, "Requested VID: %d\n", (int )rgvswctl & 63); seq_printf(m, "Current VID: %d\n", ((int )rgvstat & 32512) >> 8); seq_printf(m, "Current P-state: %d\n", ((int )rgvstat & 248) >> 3); } else { __p___14 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___14->info.gen) == 6U) { goto _L; } else { __p___15 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___15->info.gen) == 7U) { __p___16 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___16 + 45UL) == 0U) { goto _L; } else { goto _L___1; } } else { _L___1: /* CIL Label */ __p___17 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___17 + 45UL) == 0U) { __p___18 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___18->info.gen) == 8U) { goto _L; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___19 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___19->info.gen) == 9U) { _L: /* CIL Label */ tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1333576L, 1); gt_perf_status = tmp___1; tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1333652L, 1); rp_state_limits = tmp___2; tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1333656L, 1); rp_state_cap = tmp___3; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { goto out; } else { } intel_uncore_forcewake_get(dev_priv, 7); reqf = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 40968L, 1); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { reqf = reqf >> 23; } else { reqf = reqf & 2147483647U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { reqf = reqf >> 24; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { reqf = reqf >> 24; } else { reqf = reqf >> 25; } } else { reqf = reqf >> 25; } } } tmp___4 = intel_gpu_freq(dev_priv, (int )reqf); reqf = (u32 )tmp___4; rpmodectl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 40996L, 1); rpinclimit = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41004L, 1); rpdeclimit = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41008L, 1); rpstat = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 40988L, 1); rpupei = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41040L, 1); rpcurup = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41044L, 1); rpprevup = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41048L, 1); rpdownei = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41052L, 1); rpcurdown = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41056L, 1); rpprevdown = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41060L, 1); __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 9U) { cagf = rpstat >> 23; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { cagf = (rpstat & 16256U) >> 7; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 8U) { cagf = (rpstat & 16256U) >> 7; } else { cagf = (rpstat & 32512U) >> 8; } } else { cagf = (rpstat & 32512U) >> 8; } } } tmp___5 = intel_gpu_freq(dev_priv, (int )cagf); cagf = (u32 )tmp___5; intel_uncore_forcewake_put(dev_priv, 7); mutex_unlock(& dev->struct_mutex); __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) == 6U) { pm_ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278572L, 1); pm_imr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278564L, 1); pm_isr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278560L, 1); pm_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 1); pm_mask = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41320L, 1); } else { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) == 7U) { pm_ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278572L, 1); pm_imr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278564L, 1); pm_isr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278560L, 1); pm_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 1); pm_mask = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41320L, 1); } else { pm_ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279340L, 1); pm_imr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279332L, 1); pm_isr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279328L, 1); pm_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279336L, 1); pm_mask = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41320L, 1); } } seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); __p___9 = to_i915((struct drm_device const *)dev); seq_printf(m, "Render p-state ratio: %d\n", (((unsigned int )((unsigned char )__p___9->info.gen) == 9U ? 130816U : 65280U) & gt_perf_status) >> 8); seq_printf(m, "Render p-state VID: %d\n", gt_perf_status & 255U); seq_printf(m, "Render p-state limit: %d\n", rp_state_limits & 255U); seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); seq_printf(m, "CAGF: %dMHz\n", cagf); seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 16777215U); seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 16777215U); seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 16777215U); seq_printf(m, "Up threshold: %d%%\n", (int )dev_priv->rps.up_threshold); seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 16777215U); seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 16777215U); seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 16777215U); seq_printf(m, "Down threshold: %d%%\n", (int )dev_priv->rps.down_threshold); max_freq = (int )((rp_state_cap & 16711680U) >> 16); __p___10 = to_i915((struct drm_device const *)dev); max_freq = ((unsigned int )*((unsigned char *)__p___10 + 45UL) != 0U ? 3 : 1) * max_freq; tmp___6 = intel_gpu_freq(dev_priv, max_freq); seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", tmp___6); max_freq = (int )((rp_state_cap & 65280U) >> 8); __p___11 = to_i915((struct drm_device const *)dev); max_freq = ((unsigned int )*((unsigned char *)__p___11 + 45UL) != 0U ? 3 : 1) * max_freq; tmp___7 = intel_gpu_freq(dev_priv, max_freq); seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", tmp___7); max_freq = (int )rp_state_cap & 255; __p___12 = to_i915((struct drm_device const *)dev); max_freq = ((unsigned int )*((unsigned char *)__p___12 + 45UL) != 0U ? 3 : 1) * max_freq; tmp___8 = intel_gpu_freq(dev_priv, max_freq); seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", tmp___8); tmp___9 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.max_freq); seq_printf(m, "Max overclocked frequency: %dMHz\n", tmp___9); tmp___10 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.cur_freq); seq_printf(m, "Current freq: %d MHz\n", tmp___10); seq_printf(m, "Actual freq: %d MHz\n", cagf); tmp___11 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.idle_freq); seq_printf(m, "Idle freq: %d MHz\n", tmp___11); tmp___12 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.min_freq); seq_printf(m, "Min freq: %d MHz\n", tmp___12); tmp___13 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.max_freq); seq_printf(m, "Max freq: %d MHz\n", tmp___13); tmp___14 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.efficient_freq); seq_printf(m, "efficient (RPe) frequency: %d MHz\n", tmp___14); } else { __p___13 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___13 + 45UL) != 0U) { mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); freq_sts = vlv_punit_read(dev_priv, 216U); seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); tmp___15 = intel_gpu_freq(dev_priv, (int )(freq_sts >> 8) & 255); seq_printf(m, "actual GPU freq: %d MHz\n", tmp___15); tmp___16 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.cur_freq); seq_printf(m, "current GPU freq: %d MHz\n", tmp___16); tmp___17 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.max_freq); seq_printf(m, "max GPU freq: %d MHz\n", tmp___17); tmp___18 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.min_freq); seq_printf(m, "min GPU freq: %d MHz\n", tmp___18); tmp___19 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.idle_freq); seq_printf(m, "idle GPU freq: %d MHz\n", tmp___19); tmp___20 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.efficient_freq); seq_printf(m, "efficient (RPe) frequency: %d MHz\n", tmp___20); mutex_unlock(& dev_priv->rps.hw_lock); } else { seq_puts(m, "no P-state info available\n"); } } } } } } out: intel_runtime_pm_put(dev_priv); return (ret); } } static int i915_hangcheck_info(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; u64 acthd[5U] ; u32 seqno[5U] ; int i ; bool tmp ; unsigned int tmp___0 ; int tmp___1 ; bool tmp___2 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (! i915.enable_hangcheck) { seq_printf(m, "Hangcheck disabled\n"); return (0); } else { } intel_runtime_pm_get(dev_priv); i = 0; goto ldv_48901; ldv_48900: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { seqno[i] = (*(ring->get_seqno))(ring, 0); acthd[i] = intel_ring_get_active_head(ring); } else { } i = i + 1; ldv_48901: ; if (i <= 4) { goto ldv_48900; } else { } intel_runtime_pm_put(dev_priv); tmp___1 = constant_test_bit(0L, (unsigned long const volatile *)(& dev_priv->gpu_error.hangcheck_work.work.data)); if (tmp___1 != 0) { tmp___0 = jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - (unsigned long )jiffies); seq_printf(m, "Hangcheck active, fires in %dms\n", tmp___0); } else { seq_printf(m, "Hangcheck inactive\n"); } i = 0; goto ldv_48904; ldv_48903: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___2 = intel_ring_initialized(ring); if ((int )tmp___2) { seq_printf(m, "%s:\n", ring->name); seq_printf(m, "\tseqno = %x [current %x]\n", ring->hangcheck.seqno, seqno[i]); seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long )ring->hangcheck.acthd, (long long )acthd[i]); seq_printf(m, "\tmax ACTHD = 0x%08llx\n", (long long )ring->hangcheck.max_acthd); seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); seq_printf(m, "\taction = %d\n", (unsigned int )ring->hangcheck.action); } else { } i = i + 1; ldv_48904: ; if (i <= 4) { goto ldv_48903; } else { } return (0); } } static int ironlake_drpc_info(struct seq_file *m ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 rgvmodectl ; u32 rstdbyctl ; u16 crstandvid ; int ret ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); rgvmodectl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70032L, 1); rstdbyctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70072L, 1); crstandvid = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 69888L, 1); intel_runtime_pm_put(dev_priv); mutex_unlock(& dev->struct_mutex); seq_printf(m, "HD boost: %s\n", (int )rgvmodectl < 0 ? (char *)"yes" : (char *)"no"); seq_printf(m, "Boost freq: %d\n", (rgvmodectl & 251658240U) >> 24); seq_printf(m, "HW control enabled: %s\n", (rgvmodectl & 32768U) != 0U ? (char *)"yes" : (char *)"no"); seq_printf(m, "SW control enabled: %s\n", (rgvmodectl & 16384U) != 0U ? (char *)"yes" : (char *)"no"); seq_printf(m, "Gated voltage change: %s\n", (rgvmodectl & 8192U) != 0U ? (char *)"yes" : (char *)"no"); seq_printf(m, "Starting frequency: P%d\n", (rgvmodectl & 3840U) >> 8); seq_printf(m, "Max P-state: P%d\n", (rgvmodectl & 240U) >> 4); seq_printf(m, "Min P-state: P%d\n", rgvmodectl & 15U); seq_printf(m, "RS1 VID: %d\n", (int )crstandvid & 63); seq_printf(m, "RS2 VID: %d\n", ((int )crstandvid >> 8) & 63); seq_printf(m, "Render standby enabled: %s\n", (rstdbyctl & 8388608U) != 0U ? (char *)"no" : (char *)"yes"); seq_puts(m, "Current RS state: "); switch (rstdbyctl & 7340032U) { case 0U: seq_puts(m, "on\n"); goto ldv_48917; case 1048576U: seq_puts(m, "RC1\n"); goto ldv_48917; case 2097152U: seq_puts(m, "RC1E\n"); goto ldv_48917; case 3145728U: seq_puts(m, "RS1\n"); goto ldv_48917; case 4194304U: seq_puts(m, "RS2 (RC6)\n"); goto ldv_48917; case 6291456U: seq_puts(m, "RC3 (RC6+)\n"); goto ldv_48917; default: seq_puts(m, "unknown\n"); goto ldv_48917; } ldv_48917: ; return (0); } } static int i915_forcewake_domains(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_uncore_forcewake_domain *fw_domain ; int i ; char const *tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; spin_lock_irq(& dev_priv->uncore.lock); i = 0; fw_domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48934; ldv_48933: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )(1 << i)) & 7U) != 0U) { tmp = intel_uncore_forcewake_domain_to_str((enum forcewake_domain_id const )i); seq_printf(m, "%s.wake_count = %u\n", tmp, fw_domain->wake_count); } else { } i = i + 1; fw_domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )i; ldv_48934: ; if (i <= 2) { goto ldv_48933; } else { } spin_unlock_irq(& dev_priv->uncore.lock); return (0); } } static int vlv_drpc_info(struct seq_file *m ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 rpmodectl1 ; u32 rcctl1 ; u32 pw_status ; char const *tmp ; char const *tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; uint32_t tmp___4 ; uint32_t tmp___5 ; int tmp___6 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_runtime_pm_get(dev_priv); pw_status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); rpmodectl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 40996L, 1); rcctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41104L, 1); intel_runtime_pm_put(dev_priv); tmp = yesno((int )rpmodectl1 & 2048); seq_printf(m, "Video Turbo Mode: %s\n", tmp); tmp___0 = yesno((int )rpmodectl1 & 128); seq_printf(m, "Turbo enabled: %s\n", tmp___0); tmp___1 = yesno((int )rpmodectl1 & 128); seq_printf(m, "HW control enabled: %s\n", tmp___1); tmp___2 = yesno((rpmodectl1 & 1536U) == 0U); seq_printf(m, "SW control enabled: %s\n", tmp___2); tmp___3 = yesno((int )rcctl1 & 402653184); seq_printf(m, "RC6 Enabled: %s\n", tmp___3); seq_printf(m, "Render Power Well: %s\n", (pw_status & 128U) != 0U ? (char *)"Up" : (char *)"Down"); seq_printf(m, "Media Power Well: %s\n", (pw_status & 32U) != 0U ? (char *)"Up" : (char *)"Down"); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278216L, 1); seq_printf(m, "Render RC6 residency since boot: %u\n", tmp___4); tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278220L, 1); seq_printf(m, "Media RC6 residency since boot: %u\n", tmp___5); tmp___6 = i915_forcewake_domains(m, (void *)0); return (tmp___6); } } static int gen6_drpc_info(struct seq_file *m ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 rpmodectl1 ; u32 gt_core_status ; u32 rcctl1 ; u32 rc6vids ; unsigned int forcewake_count ; int count ; int ret ; int tmp ; uint32_t tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; char const *tmp___5 ; char const *tmp___6 ; char const *tmp___7 ; char const *tmp___8 ; char const *tmp___9 ; uint32_t tmp___10 ; uint32_t tmp___11 ; uint32_t tmp___12 ; uint32_t tmp___13 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; rc6vids = 0U; count = 0; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); spin_lock_irq(& dev_priv->uncore.lock); forcewake_count = dev_priv->uncore.fw_domain[0].wake_count; spin_unlock_irq(& dev_priv->uncore.lock); if (forcewake_count != 0U) { seq_puts(m, "RC information inaccurate because somebody holds a forcewake reference \n"); } else { goto ldv_48959; ldv_48958: __const_udelay(42950UL); ldv_48959: tmp = count; count = count + 1; if (tmp <= 49) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245328L, 0); if ((int )tmp___0 & 1) { goto ldv_48958; } else { goto ldv_48960; } } else { } ldv_48960: tmp___1 = yesno(count <= 50); seq_printf(m, "RC information accurate: %s\n", tmp___1); } gt_core_status = readl((void const volatile *)dev_priv->regs + 1278048U); trace_i915_reg_rw(0, 1278048U, (u64 )gt_core_status, 4, 1); rpmodectl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 40996L, 1); rcctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41104L, 1); mutex_unlock(& dev->struct_mutex); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); sandybridge_pcode_read(dev_priv, 5U, & rc6vids); mutex_unlock(& dev_priv->rps.hw_lock); intel_runtime_pm_put(dev_priv); tmp___2 = yesno((int )rpmodectl1 & 2048); seq_printf(m, "Video Turbo Mode: %s\n", tmp___2); tmp___3 = yesno((int )rpmodectl1 & 128); seq_printf(m, "HW control enabled: %s\n", tmp___3); tmp___4 = yesno((rpmodectl1 & 1536U) == 0U); seq_printf(m, "SW control enabled: %s\n", tmp___4); tmp___5 = yesno((int )rcctl1 & 1048576); seq_printf(m, "RC1e Enabled: %s\n", tmp___5); tmp___6 = yesno((int )rcctl1 & 262144); seq_printf(m, "RC6 Enabled: %s\n", tmp___6); tmp___7 = yesno((int )rcctl1 & 131072); seq_printf(m, "Deep RC6 Enabled: %s\n", tmp___7); tmp___8 = yesno((int )rcctl1 & 65536); seq_printf(m, "Deepest RC6 Enabled: %s\n", tmp___8); seq_puts(m, "Current RC state: "); switch (gt_core_status & 7U) { case 0U: ; if ((gt_core_status & 112U) != 0U) { seq_puts(m, "Core Power Down\n"); } else { seq_puts(m, "on\n"); } goto ldv_48962; case 2U: seq_puts(m, "RC3\n"); goto ldv_48962; case 3U: seq_puts(m, "RC6\n"); goto ldv_48962; case 4U: seq_puts(m, "RC7\n"); goto ldv_48962; default: seq_puts(m, "Unknown\n"); goto ldv_48962; } ldv_48962: tmp___9 = yesno((int )gt_core_status & 112); seq_printf(m, "Core Power Down: %s\n", tmp___9); tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278212L, 1); seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", tmp___10); tmp___11 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278216L, 1); seq_printf(m, "RC6 residency since boot: %u\n", tmp___11); tmp___12 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278220L, 1); seq_printf(m, "RC6+ residency since boot: %u\n", tmp___12); tmp___13 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278224L, 1); seq_printf(m, "RC6++ residency since boot: %u\n", tmp___13); seq_printf(m, "RC6 voltage: %dmV\n", (rc6vids & 255U) * 5U + 245U); seq_printf(m, "RC6+ voltage: %dmV\n", ((rc6vids >> 8) & 255U) * 5U + 245U); seq_printf(m, "RC6++ voltage: %dmV\n", ((rc6vids >> 16) & 255U) * 5U + 245U); return (0); } } static int i915_drpc_info(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; int tmp ; int tmp___0 ; int tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { tmp = vlv_drpc_info(m); return (tmp); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { tmp___0 = gen6_drpc_info(m); return (tmp___0); } else { tmp___1 = ironlake_drpc_info(m); return (tmp___1); } } } } static int i915_fbc_status(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; bool tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { seq_puts(m, "FBC unsupported on this chipset\n"); return (0); } else { } intel_runtime_pm_get(dev_priv); tmp = intel_fbc_enabled(dev); if ((int )tmp) { seq_puts(m, "FBC enabled\n"); } else { seq_puts(m, "FBC disabled: "); switch ((unsigned int )dev_priv->fbc.no_fbc_reason) { case 0U: seq_puts(m, "FBC actived, but currently disabled in hardware"); goto ldv_48999; case 1U: seq_puts(m, "unsupported by this chipset"); goto ldv_48999; case 2U: seq_puts(m, "no outputs"); goto ldv_48999; case 3U: seq_puts(m, "not enough stolen memory"); goto ldv_48999; case 4U: seq_puts(m, "mode not supported"); goto ldv_48999; case 5U: seq_puts(m, "mode too large"); goto ldv_48999; case 6U: seq_puts(m, "FBC unsupported on plane"); goto ldv_48999; case 7U: seq_puts(m, "scanout buffer not tiled"); goto ldv_48999; case 8U: seq_puts(m, "multiple pipes are enabled"); goto ldv_48999; case 9U: seq_puts(m, "disabled per module param (default off)"); goto ldv_48999; case 10U: seq_puts(m, "disabled per chip default"); goto ldv_48999; default: seq_puts(m, "unknown reason"); } ldv_48999: seq_putc(m, 10); } intel_runtime_pm_put(dev_priv); return (0); } } static int i915_fbc_fc_get(void *data , u64 *val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 6U) { return (-19); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { return (-19); } else { } } drm_modeset_lock_all(dev); *val = (u64 )dev_priv->fbc.false_color; drm_modeset_unlock_all(dev); return (0); } } static int i915_fbc_fc_set(void *data , u64 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 reg ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 6U) { return (-19); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { return (-19); } else { } } drm_modeset_lock_all(dev); reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 274952L, 1); dev_priv->fbc.false_color = val != 0ULL; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 274952L, val != 0ULL ? reg | 1024U : reg & 4294966271U, 1); drm_modeset_unlock_all(dev); return (0); } } static int i915_fbc_fc_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("%llu\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_fbc_fc_get, & i915_fbc_fc_set, "%llu\n"); return (tmp); } } static struct file_operations const i915_fbc_fc_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_fbc_fc_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_ips_status(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; char const *tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p___3 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if (((int )__p___0->info.device_id & 65280) != 2560) { _L: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { seq_puts(m, "not supported\n"); return (0); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { seq_puts(m, "not supported\n"); return (0); } else { } } } else { } } intel_runtime_pm_get(dev_priv); tmp = yesno(i915.enable_ips); seq_printf(m, "Enabled by kernel parameter: %s\n", tmp); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 7U) { seq_puts(m, "Currently: unknown\n"); } else { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 275464L, 1); if ((int )tmp___0 < 0) { seq_puts(m, "Currently: enabled\n"); } else { seq_puts(m, "Currently: disabled\n"); } } intel_runtime_pm_put(dev_priv); return (0); } } static int i915_sr_status(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; bool sr_enabled ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; sr_enabled = 0; intel_runtime_pm_get(dev_priv); __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___6->pch_type != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282888L, 1); sr_enabled = (tmp & 2147483648U) != 0U; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8416L, 1); sr_enabled = (tmp___0 & 32768U) != 0U; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8416L, 1); sr_enabled = (tmp___0 & 32768U) != 0U; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___4->info.device_id) == 10098U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8416L, 1); sr_enabled = (tmp___0 & 32768U) != 0U; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 44UL) != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8416L, 1); sr_enabled = (tmp___0 & 32768U) != 0U; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) == 9618U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8384L, 1); sr_enabled = (tmp___1 & 4096U) != 0U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458812U), 1); sr_enabled = (tmp___2 & 1073741824U) != 0U; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1598720L, 1); sr_enabled = (tmp___3 & 32768U) != 0U; } else { } } } } } } } } intel_runtime_pm_put(dev_priv); seq_printf(m, "self-refresh: %s\n", (int )sr_enabled ? (char *)"enabled" : (char *)"disabled"); return (0); } } static int i915_emon_status(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long temp ; unsigned long chipset ; unsigned long gfx ; int ret ; struct drm_i915_private *__p ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 5U) { return (-19); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } temp = i915_mch_val(dev_priv); chipset = i915_chipset_val(dev_priv); gfx = i915_gfx_val(dev_priv); mutex_unlock(& dev->struct_mutex); seq_printf(m, "GMCH temp: %ld\n", temp); seq_printf(m, "Chipset power: %ld\n", chipset); seq_printf(m, "GFX power: %ld\n", gfx); seq_printf(m, "Total power: %ld\n", chipset + gfx); return (0); } } static int i915_ring_freq_table(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; int gpu_freq ; int ia_freq ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 7U) { seq_puts(m, "unsupported on this chipset\n"); return (0); } else { } } else { } intel_runtime_pm_get(dev_priv); ldv_flush_delayed_work_145(& dev_priv->rps.delayed_resume_work); ret = mutex_lock_interruptible_nested(& dev_priv->rps.hw_lock, 0U); if (ret != 0) { goto out; } else { } seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); gpu_freq = (int )dev_priv->rps.min_freq_softlimit; goto ldv_49187; ldv_49186: ia_freq = gpu_freq; sandybridge_pcode_read(dev_priv, 9U, (u32 *)(& ia_freq)); tmp = intel_gpu_freq(dev_priv, gpu_freq); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", tmp, (ia_freq & 255) * 100, ((ia_freq >> 8) & 255) * 100); gpu_freq = gpu_freq + 1; ldv_49187: ; if ((int )dev_priv->rps.max_freq_softlimit >= gpu_freq) { goto ldv_49186; } else { } mutex_unlock(& dev_priv->rps.hw_lock); out: intel_runtime_pm_put(dev_priv); return (ret); } } static int i915_opregion(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_opregion *opregion ; void *data ; void *tmp ; int ret ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; opregion = & dev_priv->opregion; tmp = kmalloc(8192UL, 208U); data = tmp; if ((unsigned long )data == (unsigned long )((void *)0)) { return (-12); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { goto out; } else { } if ((unsigned long )opregion->header != (unsigned long )((struct opregion_header *)0)) { memcpy_fromio(data, (void const volatile *)opregion->header, 8192UL); seq_write(m, (void const *)data, 8192UL); } else { } mutex_unlock(& dev->struct_mutex); out: kfree((void const *)data); return (0); } } static int i915_gem_framebuffer_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct intel_fbdev *ifbdev ; struct intel_framebuffer *fb ; struct drm_i915_private *dev_priv ; struct drm_framebuffer const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; int tmp___0 ; struct list_head const *__mptr___1 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; ifbdev = (struct intel_fbdev *)0; dev_priv = (struct drm_i915_private *)dev->dev_private; ifbdev = dev_priv->fbdev; __mptr = (struct drm_framebuffer const *)ifbdev->helper.fb; fb = (struct intel_framebuffer *)__mptr; tmp = atomic_read((atomic_t const *)(& fb->base.refcount.refcount)); seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fb->base.width, fb->base.height, fb->base.depth, fb->base.bits_per_pixel, fb->base.modifier[0], tmp); describe_obj(m, fb->obj); seq_putc(m, 10); mutex_lock_nested(& dev->mode_config.fb_lock, 0U); __mptr___0 = (struct list_head const *)dev->mode_config.fb_list.next; fb = (struct intel_framebuffer *)__mptr___0 + 0xfffffffffffffff0UL; goto ldv_49217; ldv_49216: ; if ((unsigned long )ifbdev != (unsigned long )((struct intel_fbdev *)0) && (unsigned long )(& fb->base) == (unsigned long )ifbdev->helper.fb) { goto ldv_49215; } else { } tmp___0 = atomic_read((atomic_t const *)(& fb->base.refcount.refcount)); seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fb->base.width, fb->base.height, fb->base.depth, fb->base.bits_per_pixel, fb->base.modifier[0], tmp___0); describe_obj(m, fb->obj); seq_putc(m, 10); ldv_49215: __mptr___1 = (struct list_head const *)fb->base.head.next; fb = (struct intel_framebuffer *)__mptr___1 + 0xfffffffffffffff0UL; ldv_49217: ; if ((unsigned long )(& fb->base.head) != (unsigned long )(& dev->mode_config.fb_list)) { goto ldv_49216; } else { } mutex_unlock(& dev->mode_config.fb_lock); return (0); } } static void describe_ctx_ringbuf(struct seq_file *m , struct intel_ringbuffer *ringbuf ) { { seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", ringbuf->space, ringbuf->head, ringbuf->tail, ringbuf->last_retired_head); return; } } static int i915_context_status(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; struct intel_context *ctx ; int ret ; int i ; struct list_head const *__mptr ; bool tmp ; struct drm_i915_gem_object *ctx_obj ; struct intel_ringbuffer *ringbuf ; bool tmp___0 ; struct list_head const *__mptr___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } __mptr = (struct list_head const *)dev_priv->context_list.next; ctx = (struct intel_context *)__mptr + 0xffffffffffffff30UL; goto ldv_49248; ldv_49247: ; if (i915.enable_execlists == 0 && (unsigned long )ctx->legacy_hw_ctx.rcs_state == (unsigned long )((struct drm_i915_gem_object *)0)) { goto ldv_49238; } else { } seq_puts(m, "HW context "); describe_ctx(m, ctx); i = 0; goto ldv_49240; ldv_49239: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { if ((unsigned long )ring->default_context == (unsigned long )ctx) { seq_printf(m, "(default context %s) ", ring->name); } else { } } else { } i = i + 1; ldv_49240: ; if (i <= 4) { goto ldv_49239; } else { } if (i915.enable_execlists != 0) { seq_putc(m, 10); i = 0; goto ldv_49245; ldv_49244: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(ring); if ((int )tmp___0) { ctx_obj = ctx->engine[i].state; ringbuf = ctx->engine[i].ringbuf; seq_printf(m, "%s: ", ring->name); if ((unsigned long )ctx_obj != (unsigned long )((struct drm_i915_gem_object *)0)) { describe_obj(m, ctx_obj); } else { } if ((unsigned long )ringbuf != (unsigned long )((struct intel_ringbuffer *)0)) { describe_ctx_ringbuf(m, ringbuf); } else { } seq_putc(m, 10); } else { } i = i + 1; ldv_49245: ; if (i <= 4) { goto ldv_49244; } else { } } else { describe_obj(m, ctx->legacy_hw_ctx.rcs_state); } seq_putc(m, 10); ldv_49238: __mptr___0 = (struct list_head const *)ctx->link.next; ctx = (struct intel_context *)__mptr___0 + 0xffffffffffffff30UL; ldv_49248: ; if ((unsigned long )(& ctx->link) != (unsigned long )(& dev_priv->context_list)) { goto ldv_49247; } else { } mutex_unlock(& dev->struct_mutex); return (0); } } static void i915_dump_lrc_obj(struct seq_file *m , struct intel_engine_cs *ring , struct drm_i915_gem_object *ctx_obj ) { struct page *page ; uint32_t *reg_state ; int j ; unsigned long ggtt_offset ; u32 tmp ; bool tmp___0 ; int tmp___1 ; int tmp___2 ; void *tmp___3 ; int __ret_warn_on ; long tmp___4 ; long tmp___5 ; { ggtt_offset = 0UL; if ((unsigned long )ctx_obj == (unsigned long )((struct drm_i915_gem_object *)0)) { seq_printf(m, "Context on %s with no gem object\n", ring->name); return; } else { } tmp = intel_execlists_ctx_id(ctx_obj); seq_printf(m, "CONTEXT: %s %u\n", ring->name, tmp); tmp___0 = i915_gem_obj_ggtt_bound(ctx_obj); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { seq_puts(m, "\tNot bound in GGTT\n"); } else { ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); } tmp___2 = i915_gem_object_get_pages(ctx_obj); if (tmp___2 != 0) { seq_puts(m, "\tFailed to get pages for context object\n"); return; } else { } page = i915_gem_object_get_page(ctx_obj, 1); __ret_warn_on = (unsigned long )page == (unsigned long )((struct page *)0); tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_debugfs.c", 1979, "WARN_ON(page == NULL)"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___5 == 0L) { tmp___3 = kmap_atomic(page); reg_state = (uint32_t *)tmp___3; j = 0; goto ldv_49262; ldv_49261: seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", ((unsigned long )(j * 4) + ggtt_offset) + 4096UL, *(reg_state + (unsigned long )j), *(reg_state + ((unsigned long )j + 1UL)), *(reg_state + ((unsigned long )j + 2UL)), *(reg_state + ((unsigned long )j + 3UL))); j = j + 4; ldv_49262: ; if ((unsigned int )j <= 95U) { goto ldv_49261; } else { } __kunmap_atomic((void *)reg_state); } else { } seq_putc(m, 10); return; } } static int i915_dump_lrc(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; struct intel_context *ctx ; int ret ; int i ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (i915.enable_execlists == 0) { seq_printf(m, "Logical Ring Contexts are disabled\n"); return (0); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } __mptr = (struct list_head const *)dev_priv->context_list.next; ctx = (struct intel_context *)__mptr + 0xffffffffffffff30UL; goto ldv_49283; ldv_49282: i = 0; goto ldv_49280; ldv_49279: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { if ((unsigned long )ring->default_context != (unsigned long )ctx) { i915_dump_lrc_obj(m, ring, ctx->engine[i].state); } else { } } else { } i = i + 1; ldv_49280: ; if (i <= 4) { goto ldv_49279; } else { } __mptr___0 = (struct list_head const *)ctx->link.next; ctx = (struct intel_context *)__mptr___0 + 0xffffffffffffff30UL; ldv_49283: ; if ((unsigned long )(& ctx->link) != (unsigned long )(& dev_priv->context_list)) { goto ldv_49282; } else { } mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_execlists(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; u32 status_pointer ; u8 read_pointer ; u8 write_pointer ; u32 status ; u32 ctx_id ; struct list_head *cursor ; int ring_id ; int i ; int ret ; struct drm_i915_gem_request *head_req ; int count ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; int tmp___1 ; struct drm_i915_gem_object *ctx_obj ; u32 tmp___2 ; bool tmp___3 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (i915.enable_execlists == 0) { seq_puts(m, "Logical Ring Contexts are disabled\n"); return (0); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); ring_id = 0; goto ldv_49318; ldv_49317: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )ring_id; tmp___3 = intel_ring_initialized(ring); if ((int )tmp___3) { head_req = (struct drm_i915_gem_request *)0; count = 0; seq_printf(m, "%s\n", ring->name); status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 564U), 1); ctx_id = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 568U), 1); seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", status, ctx_id); status_pointer = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 928U), 1); seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); read_pointer = ring->next_context_status_buffer; write_pointer = (unsigned int )((u8 )status_pointer) & 7U; if ((int )read_pointer > (int )write_pointer) { write_pointer = (unsigned int )write_pointer + 6U; } else { } seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", (int )read_pointer, (int )write_pointer); i = 0; goto ldv_49306; ldv_49305: status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((ring->mmio_base + (u32 )(i * 8)) + 880U), 1); ctx_id = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((ring->mmio_base + (u32 )(i * 8)) + 884U), 1); seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", i, status, ctx_id); i = i + 1; ldv_49306: ; if (i <= 5) { goto ldv_49305; } else { } tmp = spinlock_check(& ring->execlist_lock); flags = _raw_spin_lock_irqsave(tmp); cursor = ring->execlist_queue.next; goto ldv_49312; ldv_49311: count = count + 1; cursor = cursor->next; ldv_49312: ; if ((unsigned long )(& ring->execlist_queue) != (unsigned long )cursor) { goto ldv_49311; } else { } tmp___1 = list_empty((struct list_head const *)(& ring->execlist_queue)); if (tmp___1 == 0) { __mptr = (struct list_head const *)ring->execlist_queue.next; head_req = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffff88UL; } else { head_req = (struct drm_i915_gem_request *)0; } spin_unlock_irqrestore(& ring->execlist_lock, flags); seq_printf(m, "\t%d requests in queue\n", count); if ((unsigned long )head_req != (unsigned long )((struct drm_i915_gem_request *)0)) { ctx_obj = (head_req->ctx)->engine[ring_id].state; tmp___2 = intel_execlists_ctx_id(ctx_obj); seq_printf(m, "\tHead request id: %u\n", tmp___2); seq_printf(m, "\tHead request tail: %u\n", head_req->tail); } else { } seq_putc(m, 10); } else { } ring_id = ring_id + 1; ldv_49318: ; if (ring_id <= 4) { goto ldv_49317; } else { } intel_runtime_pm_put(dev_priv); mutex_unlock(& dev->struct_mutex); return (0); } } static char const *swizzle_string(unsigned int swizzle ) { { switch (swizzle) { case 0U: ; return ("none"); case 1U: ; return ("bit9"); case 2U: ; return ("bit9/bit10"); case 3U: ; return ("bit9/bit11"); case 4U: ; return ("bit9/bit10/bit11"); case 6U: ; return ("bit9/bit17"); case 7U: ; return ("bit9/bit10/bit17"); case 5U: ; return ("unknown"); } return ("bug"); } } static int i915_swizzle_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; char const *tmp ; char const *tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint16_t tmp___3 ; uint16_t tmp___4 ; uint32_t tmp___5 ; uint32_t tmp___6 ; uint32_t tmp___7 ; uint32_t tmp___8 ; uint32_t tmp___9 ; uint32_t tmp___10 ; struct drm_i915_private *__p ; uint32_t tmp___11 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); tmp = swizzle_string(dev_priv->mm.bit_6_swizzle_x); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", tmp); tmp___0 = swizzle_string(dev_priv->mm.bit_6_swizzle_y); seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", tmp___0); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 3U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 4U) { _L: /* CIL Label */ tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 66048L, 1); seq_printf(m, "DDC = 0x%08x\n", tmp___1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 66052L, 1); seq_printf(m, "DDC2 = 0x%08x\n", tmp___2); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 66054L, 1); seq_printf(m, "C0DRB3 = 0x%04x\n", (int )tmp___3); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 67078L, 1); seq_printf(m, "C1DRB3 = 0x%04x\n", (int )tmp___4); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 5U) { tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1331204L, 1); seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", tmp___5); tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1331208L, 1); seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", tmp___6); tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1331212L, 1); seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", tmp___7); tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1052672L, 1); seq_printf(m, "TILECTL = 0x%08x\n", tmp___8); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 18952L, 1); seq_printf(m, "GAMTARBMODE = 0x%08x\n", tmp___9); } else { tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16432L, 1); seq_printf(m, "ARB_MODE = 0x%08x\n", tmp___10); } tmp___11 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282624L, 1); seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", tmp___11); } else { } } } if ((dev_priv->quirks & 32UL) != 0UL) { seq_puts(m, "L-shaped memory detected\n"); } else { } intel_runtime_pm_put(dev_priv); mutex_unlock(& dev->struct_mutex); return (0); } } static int per_file_ctx(int id , void *ptr , void *data ) { struct intel_context *ctx ; struct seq_file *m ; struct i915_hw_ppgtt *ppgtt ; bool tmp ; { ctx = (struct intel_context *)ptr; m = (struct seq_file *)data; ppgtt = ctx->ppgtt; if ((unsigned long )ppgtt == (unsigned long )((struct i915_hw_ppgtt *)0)) { seq_printf(m, " no ppgtt for context %d\n", ctx->user_handle); return (0); } else { } tmp = i915_gem_context_is_default((struct intel_context const *)ctx); if ((int )tmp) { seq_puts(m, " default context:\n"); } else { seq_printf(m, " context %d:\n", ctx->user_handle); } (*(ppgtt->debug_dump))(ppgtt, m); return (0); } } static void gen8_ppgtt_info(struct seq_file *m , struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; struct i915_hw_ppgtt *ppgtt ; int unused ; int i ; u32 offset ; u64 pdp ; uint32_t tmp ; uint32_t tmp___0 ; bool tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ppgtt = dev_priv->mm.aliasing_ppgtt; if ((unsigned long )ppgtt == (unsigned long )((struct i915_hw_ppgtt *)0)) { return; } else { } unused = 0; goto ldv_49386; ldv_49385: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )unused; tmp___1 = intel_ring_initialized(ring); if ((int )tmp___1) { seq_printf(m, "%s\n", ring->name); i = 0; goto ldv_49383; ldv_49382: offset = (u32 )((i + 78) * 8); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((ring->mmio_base + offset) + 4U), 1); pdp = (u64 )tmp; pdp = pdp << 32; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + offset), 1); pdp = (u64 )tmp___0 | pdp; seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); i = i + 1; ldv_49383: ; if (i <= 3) { goto ldv_49382; } else { } } else { } unused = unused + 1; ldv_49386: ; if (unused <= 4) { goto ldv_49385; } else { } return; } } static void gen6_ppgtt_info(struct seq_file *m , struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; struct drm_file *file ; int i ; uint32_t tmp ; struct drm_i915_private *__p ; uint32_t tmp___0 ; struct drm_i915_private *__p___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; bool tmp___4 ; struct i915_hw_ppgtt *ppgtt ; struct list_head const *__mptr ; struct drm_i915_file_private *file_priv ; struct task_struct *tmp___5 ; struct list_head const *__mptr___0 ; uint32_t tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 9504L, 1); seq_printf(m, "GFX_MODE: 0x%08x\n", tmp); } else { } i = 0; goto ldv_49409; ldv_49408: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___4 = intel_ring_initialized(ring); if ((int )tmp___4) { seq_printf(m, "%s\n", ring->name); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 7U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 668U), 1); seq_printf(m, "GFX_MODE: 0x%08x\n", tmp___0); } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 552U), 1); seq_printf(m, "PP_DIR_BASE: 0x%08x\n", tmp___1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 1304U), 1); seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", tmp___2); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 544U), 1); seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", tmp___3); } else { } i = i + 1; ldv_49409: ; if (i <= 4) { goto ldv_49408; } else { } if ((unsigned long )dev_priv->mm.aliasing_ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0)) { ppgtt = dev_priv->mm.aliasing_ppgtt; seq_puts(m, "aliasing PPGTT:\n"); seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->__annonCompField80.pd.__annonCompField79.pd_offset); (*(ppgtt->debug_dump))(ppgtt, m); } else { } __mptr = (struct list_head const *)dev->filelist.prev; file = (struct drm_file *)__mptr + 0xffffffffffffffe8UL; goto ldv_49418; ldv_49417: file_priv = (struct drm_i915_file_private *)file->driver_priv; tmp___5 = get_pid_task(file->pid, 0); seq_printf(m, "proc: %s\n", (char *)(& tmp___5->comm)); idr_for_each(& file_priv->context_idr, & per_file_ctx, (void *)m); __mptr___0 = (struct list_head const *)file->lhead.prev; file = (struct drm_file *)__mptr___0 + 0xffffffffffffffe8UL; ldv_49418: ; if ((unsigned long )(& file->lhead) != (unsigned long )(& dev->filelist)) { goto ldv_49417; } else { } tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16528L, 1); seq_printf(m, "ECOCHK: 0x%08x\n", tmp___6); return; } } static int i915_ppgtt_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; int tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); ret = tmp; if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { gen8_ppgtt_info(m, dev); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { gen6_ppgtt_info(m, dev); } else { } } intel_runtime_pm_put(dev_priv); mutex_unlock(& dev->struct_mutex); return (0); } } static int count_irq_waiters(struct drm_i915_private *i915___0 ) { struct intel_engine_cs *ring ; int count ; int i ; bool tmp ; { count = 0; i = 0; goto ldv_49447; ldv_49446: ring = (struct intel_engine_cs *)(& i915___0->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { count = (int )(ring->irq_refcount + (unsigned int )count); } else { } i = i + 1; ldv_49447: ; if (i <= 4) { goto ldv_49446; } else { } return (count); } } static int i915_rps_boost_info(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_file *file ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; struct list_head const *__mptr ; struct drm_i915_file_private *file_priv ; struct task_struct *task ; int tmp___5 ; struct list_head const *__mptr___0 ; int tmp___6 ; int tmp___7 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; seq_printf(m, "RPS enabled? %d\n", (int )dev_priv->rps.enabled); seq_printf(m, "GPU busy? %d\n", (int )dev_priv->mm.busy); tmp = count_irq_waiters(dev_priv); seq_printf(m, "CPU waiting? %d\n", tmp); tmp___0 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.max_freq); tmp___1 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.max_freq_softlimit); tmp___2 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.min_freq_softlimit); tmp___3 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.min_freq); tmp___4 = intel_gpu_freq(dev_priv, (int )dev_priv->rps.cur_freq); seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", tmp___4, tmp___3, tmp___2, tmp___1, tmp___0); spin_lock(& dev_priv->rps.client_lock); __mptr = (struct list_head const *)dev->filelist.prev; file = (struct drm_file *)__mptr + 0xffffffffffffffe8UL; goto ldv_49464; ldv_49463: file_priv = (struct drm_i915_file_private *)file->driver_priv; rcu_read_lock(); task = pid_task(file->pid, 0); tmp___5 = list_empty((struct list_head const *)(& file_priv->rps.link)); seq_printf(m, "%s [%d]: %d boosts%s\n", (unsigned long )task != (unsigned long )((struct task_struct *)0) ? (char *)(& task->comm) : (char *)"", (unsigned long )task != (unsigned long )((struct task_struct *)0) ? task->pid : -1, file_priv->rps.boosts, tmp___5 != 0 ? (char *)"" : (char *)", active"); rcu_read_unlock(); __mptr___0 = (struct list_head const *)file->lhead.prev; file = (struct drm_file *)__mptr___0 + 0xffffffffffffffe8UL; ldv_49464: ; if ((unsigned long )(& file->lhead) != (unsigned long )(& dev->filelist)) { goto ldv_49463; } else { } tmp___6 = list_empty((struct list_head const *)(& dev_priv->rps.semaphores.link)); seq_printf(m, "Semaphore boosts: %d%s\n", dev_priv->rps.semaphores.boosts, tmp___6 != 0 ? (char *)"" : (char *)", active"); tmp___7 = list_empty((struct list_head const *)(& dev_priv->rps.mmioflips.link)); seq_printf(m, "MMIO flip boosts: %d%s\n", dev_priv->rps.mmioflips.boosts, tmp___7 != 0 ? (char *)"" : (char *)", active"); seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); spin_unlock(& dev_priv->rps.client_lock); return (0); } } static int i915_llc(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; char const *tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); tmp = yesno((int )__p->info.has_llc); seq_printf(m, "LLC: %s\n", tmp); seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); return (0); } } static int i915_edp_psr_status(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 psrperf ; u32 stat[3U] ; enum pipe pipe ; bool enabled ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; char const *tmp ; char const *tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; unsigned int tmp___3 ; char const *tmp___4 ; struct drm_i915_private *__p___6 ; uint32_t tmp___5 ; uint32_t tmp___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; char const *tmp___7 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; uint32_t tmp___8 ; struct drm_i915_private *__p___12 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; psrperf = 0U; enabled = 0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { goto _L___0; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) != 8U) { _L___0: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { goto _L; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) != 8U) { _L: /* CIL Label */ __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { seq_puts(m, "PSR not supported\n"); return (0); } else { } } else { } } } else { } } else { } } } else { } intel_runtime_pm_get(dev_priv); mutex_lock_nested(& dev_priv->psr.lock, 0U); tmp = yesno((int )dev_priv->psr.sink_support); seq_printf(m, "Sink_Support: %s\n", tmp); tmp___0 = yesno((int )dev_priv->psr.source_ok); seq_printf(m, "Source_OK: %s\n", tmp___0); tmp___1 = yesno((unsigned long )dev_priv->psr.enabled != (unsigned long )((struct intel_dp *)0)); seq_printf(m, "Enabled: %s\n", tmp___1); tmp___2 = yesno((int )dev_priv->psr.active); seq_printf(m, "Active: %s\n", tmp___2); seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", dev_priv->psr.busy_frontbuffer_bits); tmp___3 = work_busy(& dev_priv->psr.work.work); tmp___4 = yesno((int )tmp___3); seq_printf(m, "Re-enable work scheduled: %s\n", tmp___4); __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 46UL) != 0U) { __p___6 = to_i915((struct drm_device const *)dev); tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U ? 411648L : 456704L, 1); enabled = (tmp___5 & 2147483648U) != 0U; } else { pipe = 0; goto ldv_49551; ldv_49550: tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 1966228), 1); stat[(int )pipe] = tmp___6 & 7U; if (stat[(int )pipe] == 3U || stat[(int )pipe] == 4U) { enabled = 1; } else { } pipe = (enum pipe )((int )pipe + 1); ldv_49551: __p___7 = dev_priv; if ((int )__p___7->info.num_pipes > (int )pipe) { goto ldv_49550; } else { } } tmp___7 = yesno((int )enabled); seq_printf(m, "HW Enabled & Active bit: %s", tmp___7); __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 46UL) == 0U) { pipe = 0; goto ldv_49566; ldv_49565: ; if (stat[(int )pipe] == 3U || stat[(int )pipe] == 4U) { seq_printf(m, " pipe %c", (int )pipe + 65); } else { } pipe = (enum pipe )((int )pipe + 1); ldv_49566: __p___9 = dev_priv; if ((int )__p___9->info.num_pipes > (int )pipe) { goto ldv_49565; } else { } } else { } seq_puts(m, "\n"); __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___12 + 46UL) != 0U) { __p___11 = to_i915((struct drm_device const *)dev); tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p___11 + 45UL) != 0U ? 411716L : 456772L, 1); psrperf = tmp___8 & 16777215U; seq_printf(m, "Performance_Counter: %u\n", psrperf); } else { } mutex_unlock(& dev_priv->psr.lock); intel_runtime_pm_put(dev_priv); return (0); } } static int i915_sink_crc(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct intel_encoder *encoder ; struct intel_connector *connector ; struct intel_dp *intel_dp ; int ret ; u8 crc[6U] ; struct list_head const *__mptr ; struct drm_encoder const *__mptr___0 ; struct list_head const *__mptr___1 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; intel_dp = (struct intel_dp *)0; drm_modeset_lock_all(dev); __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_49600; ldv_49599: ; if (connector->base.dpms != 0) { goto ldv_49595; } else { } if ((unsigned long )connector->base.encoder == (unsigned long )((struct drm_encoder *)0)) { goto ldv_49595; } else { } __mptr___0 = (struct drm_encoder const *)connector->base.encoder; encoder = (struct intel_encoder *)__mptr___0; if ((unsigned int )encoder->type != 8U) { goto ldv_49595; } else { } intel_dp = enc_to_intel_dp(& encoder->base); ret = intel_dp_sink_crc(intel_dp, (u8 *)(& crc)); if (ret != 0) { goto out; } else { } seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", (int )crc[0], (int )crc[1], (int )crc[2], (int )crc[3], (int )crc[4], (int )crc[5]); goto out; ldv_49595: __mptr___1 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_49600: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_49599; } else { } ret = -19; out: drm_modeset_unlock_all(dev); return (ret); } } static int i915_energy_uJ(struct seq_file *m , void *data ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u64 power ; u32 units ; struct drm_i915_private *__p ; int _err ; uint32_t tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return (-19); } else { } intel_runtime_pm_get(dev_priv); power = paravirt_read_msr(1542U, & _err); power = (power & 7936ULL) >> 8; units = (u32 )(1000000 >> (int )power); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1333548L, 1); power = (u64 )tmp; power = (u64 )units * power; intel_runtime_pm_put(dev_priv); seq_printf(m, "%llu", power); return (0); } } static int i915_pc8_status(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; char const *tmp ; bool tmp___0 ; int tmp___1 ; char const *tmp___2 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { seq_puts(m, "not supported\n"); return (0); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) != 8U) { seq_puts(m, "not supported\n"); return (0); } else { } } } else { } tmp = yesno(! dev_priv->mm.busy); seq_printf(m, "GPU idle: %s\n", tmp); tmp___0 = intel_irqs_enabled(dev_priv); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } tmp___2 = yesno(tmp___1); seq_printf(m, "IRQs disabled: %s\n", tmp___2); return (0); } } static char const *power_domain_str(enum intel_display_power_domain domain ) { int __ret_warn_on ; long tmp ; { switch ((unsigned int )domain) { case 0U: ; return ("PIPE_A"); case 1U: ; return ("PIPE_B"); case 2U: ; return ("PIPE_C"); case 3U: ; return ("PIPE_A_PANEL_FITTER"); case 4U: ; return ("PIPE_B_PANEL_FITTER"); case 5U: ; return ("PIPE_C_PANEL_FITTER"); case 6U: ; return ("TRANSCODER_A"); case 7U: ; return ("TRANSCODER_B"); case 8U: ; return ("TRANSCODER_C"); case 9U: ; return ("TRANSCODER_EDP"); case 10U: ; return ("PORT_DDI_A_2_LANES"); case 11U: ; return ("PORT_DDI_A_4_LANES"); case 12U: ; return ("PORT_DDI_B_2_LANES"); case 13U: ; return ("PORT_DDI_B_4_LANES"); case 14U: ; return ("PORT_DDI_C_2_LANES"); case 15U: ; return ("PORT_DDI_C_4_LANES"); case 16U: ; return ("PORT_DDI_D_2_LANES"); case 17U: ; return ("PORT_DDI_D_4_LANES"); case 18U: ; return ("PORT_DSI"); case 19U: ; return ("PORT_CRT"); case 20U: ; return ("PORT_OTHER"); case 21U: ; return ("VGA"); case 22U: ; return ("AUDIO"); case 23U: ; return ("PLLS"); case 24U: ; return ("AUX_A"); case 25U: ; return ("AUX_B"); case 26U: ; return ("AUX_C"); case 27U: ; return ("AUX_D"); case 28U: ; return ("INIT"); default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_debugfs.c", 2562, "Missing switch case (%lu) in %s\n", (long )domain, "power_domain_str"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return ("?"); } } } static int i915_power_domain_info(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct i915_power_domains *power_domains ; int i ; struct i915_power_well *power_well ; enum intel_display_power_domain power_domain ; char const *tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; power_domains = & dev_priv->power_domains; mutex_lock_nested(& power_domains->lock, 0U); seq_printf(m, "%-25s %s\n", (char *)"Power well/domain", (char *)"Use count"); i = 0; goto ldv_49695; ldv_49694: power_well = power_domains->power_wells + (unsigned long )i; seq_printf(m, "%-25s %d\n", power_well->name, power_well->count); power_domain = 0; goto ldv_49692; ldv_49691: ; if (((power_well->domains >> (int )power_domain) & 1UL) == 0UL) { goto ldv_49690; } else { } tmp = power_domain_str(power_domain); seq_printf(m, " %-23s %d\n", tmp, power_domains->domain_use_count[(unsigned int )power_domain]); ldv_49690: power_domain = (enum intel_display_power_domain )((unsigned int )power_domain + 1U); ldv_49692: ; if ((unsigned int )power_domain <= 28U) { goto ldv_49691; } else { } i = i + 1; ldv_49695: ; if (power_domains->power_well_count > i) { goto ldv_49694; } else { } mutex_unlock(& power_domains->lock); return (0); } } static void intel_seq_print_mode(struct seq_file *m , int tabs , struct drm_display_mode *mode ) { int i ; { i = 0; goto ldv_49704; ldv_49703: seq_putc(m, 9); i = i + 1; ldv_49704: ; if (i < tabs) { goto ldv_49703; } else { } seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", mode->base.id, (char *)(& mode->name), mode->vrefresh, mode->clock, mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, mode->type, mode->flags); return; } } static void intel_encoder_info(struct seq_file *m , struct intel_crtc *intel_crtc , struct intel_encoder *intel_encoder ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_crtc *crtc ; struct intel_connector *intel_connector ; struct drm_encoder *encoder ; struct list_head const *__mptr ; struct drm_connector *connector ; char const *tmp ; struct drm_display_mode *mode ; struct list_head const *__mptr___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; crtc = & intel_crtc->base; encoder = & intel_encoder->base; seq_printf(m, "\tencoder %d: type: %s, connectors:\n", encoder->base.id, encoder->name); __mptr = (struct list_head const *)dev->mode_config.connector_list.next; intel_connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_49723; ldv_49722: ; if ((unsigned long )intel_connector->base.encoder == (unsigned long )encoder) { connector = & intel_connector->base; tmp = drm_get_connector_status_name(connector->status); seq_printf(m, "\t\tconnector %d: type: %s, status: %s", connector->base.id, connector->name, tmp); if ((unsigned int )connector->status == 1U) { mode = & crtc->mode; seq_printf(m, ", mode:\n"); intel_seq_print_mode(m, 2, mode); } else { seq_putc(m, 10); } } else { } __mptr___0 = (struct list_head const *)intel_connector->base.head.next; intel_connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_49723: ; if ((unsigned long )(& intel_connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_49722; } else { } return; } } static void intel_crtc_info(struct seq_file *m , struct intel_crtc *intel_crtc ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_crtc *crtc ; struct intel_encoder *intel_encoder ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; crtc = & intel_crtc->base; if ((unsigned long )(crtc->primary)->fb != (unsigned long )((struct drm_framebuffer *)0)) { seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", ((crtc->primary)->fb)->base.id, crtc->x, crtc->y, ((crtc->primary)->fb)->width, ((crtc->primary)->fb)->height); } else { seq_puts(m, "\tprimary plane disabled\n"); } __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_49738; ldv_49737: ; if ((unsigned long )intel_encoder->base.crtc == (unsigned long )crtc) { intel_encoder_info(m, intel_crtc, intel_encoder); } else { } __mptr___0 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49738: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_49737; } else { } return; } } static void intel_panel_info(struct seq_file *m , struct intel_panel *panel ) { struct drm_display_mode *mode ; { mode = panel->fixed_mode; seq_printf(m, "\tfixed mode:\n"); intel_seq_print_mode(m, 2, mode); return; } } static void intel_dp_info(struct seq_file *m , struct intel_connector *intel_connector ) { struct intel_encoder *intel_encoder ; struct intel_dp *intel_dp ; struct intel_dp *tmp ; { intel_encoder = intel_connector->encoder; tmp = enc_to_intel_dp(& intel_encoder->base); intel_dp = tmp; seq_printf(m, "\tDPCD rev: %x\n", (int )intel_dp->dpcd[0]); seq_printf(m, "\taudio support: %s\n", (int )intel_dp->has_audio ? (char *)"yes" : (char *)"no"); if ((unsigned int )intel_encoder->type == 8U) { intel_panel_info(m, & intel_connector->panel); } else { } return; } } static void intel_hdmi_info(struct seq_file *m , struct intel_connector *intel_connector ) { struct intel_encoder *intel_encoder ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; { intel_encoder = intel_connector->encoder; tmp = enc_to_intel_hdmi(& intel_encoder->base); intel_hdmi = tmp; seq_printf(m, "\taudio support: %s\n", (int )intel_hdmi->has_audio ? (char *)"yes" : (char *)"no"); return; } } static void intel_lvds_info(struct seq_file *m , struct intel_connector *intel_connector ) { { intel_panel_info(m, & intel_connector->panel); return; } } static void intel_connector_info(struct seq_file *m , struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct intel_encoder *intel_encoder ; struct drm_display_mode *mode ; char const *tmp ; char const *tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; intel_encoder = intel_connector->encoder; tmp = drm_get_connector_status_name(connector->status); seq_printf(m, "connector %d: type %s, status: %s\n", connector->base.id, connector->name, tmp); if ((unsigned int )connector->status == 1U) { seq_printf(m, "\tname: %s\n", (char *)(& connector->display_info.name)); seq_printf(m, "\tphysical dimensions: %dx%dmm\n", connector->display_info.width_mm, connector->display_info.height_mm); tmp___0 = drm_get_subpixel_order_name(connector->display_info.subpixel_order); seq_printf(m, "\tsubpixel order: %s\n", tmp___0); seq_printf(m, "\tCEA rev: %d\n", (int )connector->display_info.cea_rev); } else { } if ((unsigned long )intel_encoder != (unsigned long )((struct intel_encoder *)0)) { if ((unsigned int )intel_encoder->type == 7U || (unsigned int )intel_encoder->type == 8U) { intel_dp_info(m, intel_connector); } else if ((unsigned int )intel_encoder->type == 6U) { intel_hdmi_info(m, intel_connector); } else if ((unsigned int )intel_encoder->type == 4U) { intel_lvds_info(m, intel_connector); } else { } } else { } seq_printf(m, "\tmodes:\n"); __mptr___0 = (struct list_head const *)connector->modes.next; mode = (struct drm_display_mode *)__mptr___0; goto ldv_49775; ldv_49774: intel_seq_print_mode(m, 2, mode); __mptr___1 = (struct list_head const *)mode->head.next; mode = (struct drm_display_mode *)__mptr___1; ldv_49775: ; if ((unsigned long )(& mode->head) != (unsigned long )(& connector->modes)) { goto ldv_49774; } else { } return; } } static bool cursor_active(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; u32 state ; uint32_t tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 9570U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 458880L, 1); state = tmp & 2147483648U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 9586U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 458880L, 1); state = tmp & 2147483648U; } else { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458880U), 1); state = tmp___0 & 39U; } } return (state != 0U); } } static bool cursor_position(struct drm_device *dev , int pipe , int *x , int *y ) { struct drm_i915_private *dev_priv ; u32 pos ; bool tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pos = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458888U), 1); *x = (int )pos & 2047; if ((pos & 32768U) != 0U) { *x = - *x; } else { } *y = (int )(pos >> 16) & 2047; if ((int )pos < 0) { *y = - *y; } else { } tmp = cursor_active(dev, pipe); return (tmp); } } static int i915_display_info(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_connector *connector ; struct list_head const *__mptr ; bool active ; int x ; int y ; char const *tmp ; char const *tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_runtime_pm_get(dev_priv); drm_modeset_lock_all(dev); seq_printf(m, "CRTC info\n"); seq_printf(m, "---------\n"); __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_49820; ldv_49819: tmp = yesno((int )crtc->active); seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", crtc->base.base.id, (int )crtc->pipe + 65, tmp, (crtc->config)->pipe_src_w, (crtc->config)->pipe_src_h); if ((int )crtc->active) { intel_crtc_info(m, crtc); active = cursor_position(dev, (int )crtc->pipe, & x, & y); tmp___0 = yesno((int )active); tmp___1 = yesno((int )crtc->cursor_base); seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", tmp___1, x, y, ((crtc->base.cursor)->state)->crtc_w, ((crtc->base.cursor)->state)->crtc_h, crtc->cursor_addr, tmp___0); } else { } tmp___2 = yesno(! crtc->pch_fifo_underrun_disabled); tmp___3 = yesno(! crtc->cpu_fifo_underrun_disabled); seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", tmp___3, tmp___2); __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_49820: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_49819; } else { } seq_printf(m, "\n"); seq_printf(m, "Connector info\n"); seq_printf(m, "--------------\n"); __mptr___1 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct drm_connector *)__mptr___1 + 0xffffffffffffffe8UL; goto ldv_49827; ldv_49826: intel_connector_info(m, connector); __mptr___2 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___2 + 0xffffffffffffffe8UL; ldv_49827: ; if ((unsigned long )(& connector->head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_49826; } else { } drm_modeset_unlock_all(dev); intel_runtime_pm_put(dev_priv); return (0); } } static int i915_semaphore_status(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int num_rings ; struct drm_i915_private *__p ; unsigned int tmp ; int i ; int j ; int ret ; bool tmp___0 ; int tmp___1 ; struct page *page ; uint64_t *seqno ; void *tmp___2 ; uint64_t offset ; bool tmp___3 ; uint32_t tmp___4 ; bool tmp___5 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; bool tmp___6 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); tmp = __arch_hweight32((unsigned int )__p->info.ring_mask); num_rings = (int )tmp; tmp___0 = i915_semaphore_is_enabled(dev); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { seq_puts(m, "Semaphores are disabled\n"); return (0); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); tmp___2 = kmap_atomic(page); seqno = (uint64_t *)tmp___2; i = 0; goto ldv_50067; ldv_50066: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___3 = intel_ring_initialized(ring); if ((int )tmp___3) { seq_printf(m, "%s\n", ring->name); seq_puts(m, " Last signal:"); j = 0; goto ldv_50061; ldv_50060: offset = (uint64_t )(i * 5 + j); seq_printf(m, "0x%08llx (0x%02llx) ", *(seqno + offset), offset * 8ULL); j = j + 1; ldv_50061: ; if (j < num_rings) { goto ldv_50060; } else { } seq_putc(m, 10); seq_puts(m, " Last wait: "); j = 0; goto ldv_50064; ldv_50063: offset = (uint64_t )(j * 5 + i); seq_printf(m, "0x%08llx (0x%02llx) ", *(seqno + offset), offset * 8ULL); j = j + 1; ldv_50064: ; if (j < num_rings) { goto ldv_50063; } else { } seq_putc(m, 10); } else { } i = i + 1; ldv_50067: ; if (i <= 4) { goto ldv_50066; } else { } __kunmap_atomic((void *)seqno); } else { goto _L; } } else { _L: /* CIL Label */ seq_puts(m, " Last signal:"); i = 0; goto ldv_50073; ldv_50072: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___5 = intel_ring_initialized(ring); if ((int )tmp___5) { j = 0; goto ldv_50070; ldv_50069: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ring->semaphore.__annonCompField77.mbox.signal[j], 1); seq_printf(m, "0x%08x\n", tmp___4); j = j + 1; ldv_50070: ; if (j < num_rings) { goto ldv_50069; } else { } } else { } i = i + 1; ldv_50073: ; if (i <= 4) { goto ldv_50072; } else { } seq_putc(m, 10); } seq_puts(m, "\nSync seqno:\n"); i = 0; goto ldv_50079; ldv_50078: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___6 = intel_ring_initialized(ring); if ((int )tmp___6) { j = 0; goto ldv_50076; ldv_50075: seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); j = j + 1; ldv_50076: ; if (j < num_rings) { goto ldv_50075; } else { } seq_putc(m, 10); } else { } i = i + 1; ldv_50079: ; if (i <= 4) { goto ldv_50078; } else { } seq_putc(m, 10); intel_runtime_pm_put(dev_priv); mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_shared_dplls_info(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int i ; struct intel_shared_dpll *pll ; char const *tmp ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; drm_modeset_lock_all(dev); i = 0; goto ldv_50091; ldv_50090: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, (int )pll->id); tmp = yesno((int )pll->on); seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n", pll->config.crtc_mask, pll->active, tmp); seq_printf(m, " tracked hardware state:\n"); seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); seq_printf(m, " dpll_md: 0x%08x\n", pll->config.hw_state.dpll_md); seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0); seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1); seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll); i = i + 1; ldv_50091: ; if (dev_priv->num_shared_dpll > i) { goto ldv_50090; } else { } drm_modeset_unlock_all(dev); return (0); } } static int i915_wa_registers(struct seq_file *m , void *unused ) { int i ; int ret ; struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 addr ; u32 mask ; u32 value ; u32 read ; bool ok ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); i = 0; goto ldv_50108; ldv_50107: addr = dev_priv->workarounds.reg[i].addr; mask = dev_priv->workarounds.reg[i].mask; value = dev_priv->workarounds.reg[i].value; read = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )addr, 1); ok = ((value ^ read) & mask) == 0U; seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", addr, value, mask, read, (int )ok ? (char *)"OK" : (char *)"FAIL"); i = i + 1; ldv_50108: ; if ((u32 )i < dev_priv->workarounds.count) { goto ldv_50107; } else { } intel_runtime_pm_put(dev_priv); mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_ddb_info(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct skl_ddb_allocation *ddb ; struct skl_ddb_entry *entry ; enum pipe pipe ; int plane ; struct drm_i915_private *__p ; uint16_t tmp ; struct drm_i915_private *__p___0 ; uint16_t tmp___0 ; struct drm_i915_private *__p___1 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 8U) { return (0); } else { } drm_modeset_lock_all(dev); ddb = & dev_priv->wm.__annonCompField83.skl_hw.ddb; seq_printf(m, "%-15s%8s%8s%8s\n", (char *)"", (char *)"Start", (char *)"End", (char *)"Size"); pipe = 0; goto ldv_50143; ldv_50142: seq_printf(m, "Pipe %c\n", (int )pipe + 65); plane = 0; goto ldv_50140; ldv_50139: entry = (struct skl_ddb_entry *)(& ddb->plane) + ((unsigned long )pipe + (unsigned long )plane); tmp = skl_ddb_entry_size((struct skl_ddb_entry const *)entry); seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, (int )entry->start, (int )entry->end, (int )tmp); plane = plane + 1; ldv_50140: __p___0 = dev_priv; if ((int )__p___0->info.num_sprites[(int )pipe] + 1 > plane) { goto ldv_50139; } else { } entry = (struct skl_ddb_entry *)(& ddb->cursor) + (unsigned long )pipe; tmp___0 = skl_ddb_entry_size((struct skl_ddb_entry const *)entry); seq_printf(m, " %-13s%8u%8u%8u\n", (char *)"Cursor", (int )entry->start, (int )entry->end, (int )tmp___0); pipe = (enum pipe )((int )pipe + 1); ldv_50143: __p___1 = dev_priv; if ((int )__p___1->info.num_pipes > (int )pipe) { goto ldv_50142; } else { } drm_modeset_unlock_all(dev); return (0); } } static void drrs_status_per_crtc(struct seq_file *m , struct drm_device *dev , struct intel_crtc *intel_crtc ) { struct intel_encoder *intel_encoder ; struct drm_i915_private *dev_priv ; struct i915_drrs *drrs ; int vrefresh ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct intel_panel *panel ; { dev_priv = (struct drm_i915_private *)dev->dev_private; drrs = & dev_priv->drrs; vrefresh = 0; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_50165; ldv_50164: ; if ((unsigned long )intel_encoder->base.crtc == (unsigned long )(& intel_crtc->base)) { switch ((unsigned int )intel_encoder->type) { case 8U: seq_puts(m, "eDP:\n"); goto ldv_50159; case 9U: seq_puts(m, "DSI:\n"); goto ldv_50159; case 6U: seq_puts(m, "HDMI:\n"); goto ldv_50159; case 7U: seq_puts(m, "DP:\n"); goto ldv_50159; default: seq_printf(m, "Other encoder (id=%d).\n", (unsigned int )intel_encoder->type); return; } ldv_50159: ; } else { } __mptr___0 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_50165: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_50164; } else { } if ((unsigned int )dev_priv->vbt.drrs_type == 1U) { seq_puts(m, "\tVBT: DRRS_type: Static"); } else if ((unsigned int )dev_priv->vbt.drrs_type == 2U) { seq_puts(m, "\tVBT: DRRS_type: Seamless"); } else if ((unsigned int )dev_priv->vbt.drrs_type == 0U) { seq_puts(m, "\tVBT: DRRS_type: None"); } else { seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); } seq_puts(m, "\n\n"); if ((int )(intel_crtc->config)->has_drrs) { mutex_lock_nested(& drrs->mutex, 0U); seq_puts(m, "\tDRRS Supported: Yes\n"); if ((unsigned long )drrs->dp == (unsigned long )((struct intel_dp *)0)) { seq_puts(m, "Idleness DRRS: Disabled"); mutex_unlock(& drrs->mutex); return; } else { } panel = & ((drrs->dp)->attached_connector)->panel; seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", drrs->busy_frontbuffer_bits); seq_puts(m, "\n\t\t"); if ((unsigned int )drrs->refresh_rate_type == 0U) { seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); vrefresh = (panel->fixed_mode)->vrefresh; } else if ((unsigned int )drrs->refresh_rate_type == 1U) { seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); vrefresh = (panel->downclock_mode)->vrefresh; } else { seq_printf(m, "DRRS_State: Unknown(%d)\n", (unsigned int )drrs->refresh_rate_type); mutex_unlock(& drrs->mutex); return; } seq_printf(m, "\t\tVrefresh: %d", vrefresh); seq_puts(m, "\n\t\t"); mutex_unlock(& drrs->mutex); } else { seq_puts(m, "\tDRRS Supported : No"); } seq_puts(m, "\n"); return; } } static int i915_drrs_status(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct intel_crtc *intel_crtc ; int active_crtc_cnt ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; active_crtc_cnt = 0; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; intel_crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_50181; ldv_50180: drm_modeset_lock(& intel_crtc->base.mutex, (struct drm_modeset_acquire_ctx *)0); if ((int )intel_crtc->active) { active_crtc_cnt = active_crtc_cnt + 1; seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); drrs_status_per_crtc(m, dev, intel_crtc); } else { } drm_modeset_unlock(& intel_crtc->base.mutex); __mptr___0 = (struct list_head const *)intel_crtc->base.head.next; intel_crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_50181: ; if ((unsigned long )(& intel_crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50180; } else { } if (active_crtc_cnt == 0) { seq_puts(m, "No active crtc found\n"); } else { } return (0); } } static int i915_dp_mst_info(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct drm_encoder *encoder ; struct intel_encoder *intel_encoder ; struct intel_digital_port *intel_dig_port ; struct list_head const *__mptr ; struct drm_encoder const *__mptr___0 ; struct list_head const *__mptr___1 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; drm_modeset_lock_all(dev); __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct drm_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_50204; ldv_50203: __mptr___0 = (struct drm_encoder const *)encoder; intel_encoder = (struct intel_encoder *)__mptr___0; if ((unsigned int )intel_encoder->type != 7U) { goto ldv_50202; } else { } intel_dig_port = enc_to_dig_port(encoder); if (! intel_dig_port->dp.can_mst) { goto ldv_50202; } else { } drm_dp_mst_dump_topology(m, & intel_dig_port->dp.mst_mgr); ldv_50202: __mptr___1 = (struct list_head const *)encoder->head.next; encoder = (struct drm_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_50204: ; if ((unsigned long )(& encoder->head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_50203; } else { } drm_modeset_unlock_all(dev); return (0); } } static int i915_pipe_crc_open(struct inode *inode , struct file *filep ) { struct pipe_crc_info *info ; struct drm_i915_private *dev_priv ; struct intel_pipe_crc *pipe_crc ; struct drm_i915_private *__p ; { info = (struct pipe_crc_info *)inode->i_private; dev_priv = (struct drm_i915_private *)(info->dev)->dev_private; pipe_crc = (struct intel_pipe_crc *)(& dev_priv->pipe_crc) + (unsigned long )info->pipe; __p = to_i915((struct drm_device const *)info->dev); if ((int )info->pipe >= (int )__p->info.num_pipes) { return (-19); } else { } spin_lock_irq(& pipe_crc->lock); if ((int )pipe_crc->opened) { spin_unlock_irq(& pipe_crc->lock); return (-16); } else { } pipe_crc->opened = 1; filep->private_data = inode->i_private; spin_unlock_irq(& pipe_crc->lock); return (0); } } static int i915_pipe_crc_release(struct inode *inode , struct file *filep ) { struct pipe_crc_info *info ; struct drm_i915_private *dev_priv ; struct intel_pipe_crc *pipe_crc ; { info = (struct pipe_crc_info *)inode->i_private; dev_priv = (struct drm_i915_private *)(info->dev)->dev_private; pipe_crc = (struct intel_pipe_crc *)(& dev_priv->pipe_crc) + (unsigned long )info->pipe; spin_lock_irq(& pipe_crc->lock); pipe_crc->opened = 0; spin_unlock_irq(& pipe_crc->lock); return (0); } } static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc ) { int tmp ; long tmp___0 ; { tmp = queued_spin_is_locked(& pipe_crc->lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_debugfs.c"), "i" (3170), "i" (12UL)); ldv_50229: ; goto ldv_50229; } else { } return ((pipe_crc->head - pipe_crc->tail) & 127); } } static ssize_t i915_pipe_crc_read(struct file *filep , char *user_buf , size_t count , loff_t *pos ) { struct pipe_crc_info *info ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_pipe_crc *pipe_crc ; char buf[55U] ; int n_entries ; ssize_t bytes_read ; int ret ; int __ret ; wait_queue_t __wait ; long __ret___0 ; long __int ; long tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; struct intel_pipe_crc_entry *entry ; int ret___0 ; int tmp___3 ; unsigned long tmp___4 ; { info = (struct pipe_crc_info *)filep->private_data; dev = info->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe_crc = (struct intel_pipe_crc *)(& dev_priv->pipe_crc) + (unsigned long )info->pipe; if (count <= 53UL) { return (-22L); } else { } if ((unsigned int )pipe_crc->source == 0U) { return (0L); } else { } spin_lock_irq(& pipe_crc->lock); goto ldv_50254; ldv_50253: ; if ((filep->f_flags & 2048U) != 0U) { spin_unlock_irq(& pipe_crc->lock); return (-11L); } else { } __ret = 0; tmp___1 = pipe_crc_data_count(pipe_crc); if (tmp___1 == 0) { __ret___0 = 0L; INIT_LIST_HEAD(& __wait.task_list); __wait.flags = 0U; ldv_50250: tmp = prepare_to_wait_event(& pipe_crc->wq, & __wait, 1); __int = tmp; tmp___0 = pipe_crc_data_count(pipe_crc); if (tmp___0 != 0) { goto ldv_50249; } else { } if (__int != 0L) { __ret___0 = __int; goto ldv_50249; } else { } spin_unlock_irq(& pipe_crc->lock); schedule(); spin_lock_irq(& pipe_crc->lock); goto ldv_50250; ldv_50249: finish_wait(& pipe_crc->wq, & __wait); __ret = (int )__ret___0; } else { } ret = __ret; if (ret != 0) { spin_unlock_irq(& pipe_crc->lock); return ((ssize_t )ret); } else { } ldv_50254: tmp___2 = pipe_crc_data_count(pipe_crc); if (tmp___2 == 0) { goto ldv_50253; } else { } n_entries = (int )(count / 54UL); bytes_read = 0L; goto ldv_50260; ldv_50259: entry = pipe_crc->entries + (unsigned long )pipe_crc->tail; if (((pipe_crc->head - pipe_crc->tail) & 127) <= 0) { goto ldv_50258; } else { } pipe_crc->tail = (pipe_crc->tail + 1) & 127; tmp___3 = snprintf((char *)(& buf), 55UL, "%8u %8x %8x %8x %8x %8x\n", entry->frame, entry->crc[0], entry->crc[1], entry->crc[2], entry->crc[3], entry->crc[4]); bytes_read = (ssize_t )tmp___3 + bytes_read; spin_unlock_irq(& pipe_crc->lock); tmp___4 = copy_to_user((void *)user_buf, (void const *)(& buf), 54UL); ret___0 = (int )tmp___4; if (ret___0 == 54) { return (-14L); } else { } user_buf = user_buf + 54UL; n_entries = n_entries - 1; spin_lock_irq(& pipe_crc->lock); ldv_50260: ; if (n_entries > 0) { goto ldv_50259; } else { } ldv_50258: spin_unlock_irq(& pipe_crc->lock); return (bytes_read); } } static struct file_operations const i915_pipe_crc_fops = {& __this_module, 0, & i915_pipe_crc_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, & i915_pipe_crc_open, 0, & i915_pipe_crc_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static struct pipe_crc_info i915_pipe_crc_data[3U] = { {"i915_pipe_A_crc", 0, 0}, {"i915_pipe_B_crc", 0, 1}, {"i915_pipe_C_crc", 0, 2}}; static int i915_pipe_crc_create(struct dentry *root , struct drm_minor *minor , enum pipe pipe ) { struct drm_device *dev ; struct dentry *ent ; struct pipe_crc_info *info ; int tmp ; { dev = minor->dev; info = (struct pipe_crc_info *)(& i915_pipe_crc_data) + (unsigned long )pipe; info->dev = dev; ent = debugfs_create_file(info->name, 292, root, (void *)info, & i915_pipe_crc_fops); if ((unsigned long )ent == (unsigned long )((struct dentry *)0)) { return (-12); } else { } tmp = drm_add_fake_info_node(minor, ent, (void const *)info); return (tmp); } } static char const * const pipe_crc_sources[10U] = { "none", "plane1", "plane2", "pf", "pipe", "TV", "DP-B", "DP-C", "DP-D", "auto"}; static char const *pipe_crc_source_name(enum intel_pipe_crc_source source ) { { return ((char const *)pipe_crc_sources[(unsigned int )source]); } } static int display_crc_ctl_show(struct seq_file *m , void *data ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int i ; char const *tmp ; { dev = (struct drm_device *)m->private; dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_50283; ldv_50282: tmp = pipe_crc_source_name(dev_priv->pipe_crc[i].source); seq_printf(m, "%c %s\n", i + 65, tmp); i = i + 1; ldv_50283: ; if (i <= 2) { goto ldv_50282; } else { } return (0); } } static int display_crc_ctl_open(struct inode *inode , struct file *file ) { struct drm_device *dev ; int tmp ; { dev = (struct drm_device *)inode->i_private; tmp = single_open(file, & display_crc_ctl_show, (void *)dev); return (tmp); } } static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source , uint32_t *val ) { { if ((unsigned int )*source == 9U) { *source = 4; } else { } switch ((unsigned int )*source) { case 4U: *val = 3221225472U; goto ldv_50295; case 0U: *val = 0U; goto ldv_50295; default: ; return (-22); } ldv_50295: ; return (0); } } static int i9xx_pipe_crc_auto_source(struct drm_device *dev , enum pipe pipe , enum intel_pipe_crc_source *source ) { struct intel_encoder *encoder ; struct intel_crtc *crtc ; struct intel_digital_port *dig_port ; int ret ; struct list_head const *__mptr ; struct drm_crtc const *__mptr___0 ; int __ret_warn_on ; long tmp ; struct list_head const *__mptr___1 ; { ret = 0; *source = 4; drm_modeset_lock_all(dev); __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_50327; ldv_50326: ; if ((unsigned long )encoder->base.crtc == (unsigned long )((struct drm_crtc *)0)) { goto ldv_50311; } else { } __mptr___0 = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr___0; if ((int )crtc->pipe != (int )pipe) { goto ldv_50311; } else { } switch ((unsigned int )encoder->type) { case 5U: *source = 5; goto ldv_50315; case 7U: ; case 8U: dig_port = enc_to_dig_port(& encoder->base); switch ((unsigned int )dig_port->port) { case 1U: *source = 6; goto ldv_50319; case 2U: *source = 7; goto ldv_50319; case 3U: *source = 8; goto ldv_50319; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_debugfs.c", 3390, "nonexisting DP port %c\n", (unsigned int )dig_port->port + 65U); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); goto ldv_50319; } ldv_50319: ; goto ldv_50315; default: ; goto ldv_50315; } ldv_50315: ; ldv_50311: __mptr___1 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_50327: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_50326; } else { } drm_modeset_unlock_all(dev); return (ret); } } static int vlv_pipe_crc_ctl_reg(struct drm_device *dev , enum pipe pipe , enum intel_pipe_crc_source *source , uint32_t *val ) { struct drm_i915_private *dev_priv ; bool need_stable_symbols ; int ret ; int tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; need_stable_symbols = 0; if ((unsigned int )*source == 9U) { tmp = i9xx_pipe_crc_auto_source(dev, pipe, source); ret = tmp; if (ret != 0) { return (ret); } else { } } else { } switch ((unsigned int )*source) { case 4U: *val = 2147483648U; goto ldv_50339; case 6U: *val = 2952790016U; need_stable_symbols = 1; goto ldv_50339; case 7U: *val = 3087007744U; need_stable_symbols = 1; goto ldv_50339; case 8U: __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { return (-22); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 8U) { return (-22); } else { } } *val = 2550136832U; need_stable_symbols = 1; goto ldv_50339; case 0U: *val = 0U; goto ldv_50339; default: ; return (-22); } ldv_50339: ; if ((int )need_stable_symbols) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397652U), 1); tmp___0 = tmp___1; tmp___0 = tmp___0 | 2147483648U; switch ((int )pipe) { case 0: tmp___0 = tmp___0 | 1U; goto ldv_50359; case 1: tmp___0 = tmp___0 | 2U; goto ldv_50359; case 2: tmp___0 = tmp___0 | 16384U; goto ldv_50359; default: ; return (-22); } ldv_50359: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397652U), tmp___0, 1); } else { } return (0); } } static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev , enum pipe pipe , enum intel_pipe_crc_source *source , uint32_t *val ) { struct drm_i915_private *dev_priv ; bool need_stable_symbols ; int ret ; int tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; uint32_t tmp___0 ; uint32_t tmp___1 ; int __ret_warn_on ; struct drm_i915_private *__p___3 ; long tmp___2 ; uint32_t tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; need_stable_symbols = 0; if ((unsigned int )*source == 9U) { tmp = i9xx_pipe_crc_auto_source(dev, pipe, source); ret = tmp; if (ret != 0) { return (ret); } else { } } else { } switch ((unsigned int )*source) { case 4U: *val = 2147483648U; goto ldv_50373; case 5U: __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { return (-22); } else { } *val = 3221225472U; goto ldv_50373; case 6U: __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) == 0U) { return (-22); } else { } *val = 3758096384U; need_stable_symbols = 1; goto ldv_50373; case 7U: __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) == 0U) { return (-22); } else { } *val = 4026531840U; need_stable_symbols = 1; goto ldv_50373; case 8U: __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 44UL) == 0U) { return (-22); } else { } *val = 2952790016U; need_stable_symbols = 1; goto ldv_50373; case 0U: *val = 0U; goto ldv_50373; default: ; return (-22); } ldv_50373: ; if ((int )need_stable_symbols) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397652U), 1); tmp___0 = tmp___1; __p___3 = to_i915((struct drm_device const *)dev); __ret_warn_on = (unsigned int )*((unsigned char *)__p___3 + 44UL) == 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_debugfs.c", 3534, "WARN_ON(!IS_G4X(dev))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397648L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397648L, tmp___3 | 33554432U, 1); if ((int )pipe == 0) { tmp___0 = tmp___0 | 1U; } else { tmp___0 = tmp___0 | 2U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397652U), tmp___0, 1); } else { } return (0); } } static void vlv_undo_pipe_scramble_reset(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397652U), 1); tmp = tmp___0; switch ((int )pipe) { case 0: tmp = tmp & 4294967294U; goto ldv_50420; case 1: tmp = tmp & 4294967293U; goto ldv_50420; case 2: tmp = tmp & 4294950911U; goto ldv_50420; default: ; return; } ldv_50420: ; if ((tmp & 16387U) == 0U) { tmp = tmp & 2147483647U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397652U), tmp, 1); return; } } static void g4x_undo_pipe_scramble_reset(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397652U), 1); tmp = tmp___0; if ((int )pipe == 0) { tmp = tmp & 4294967294U; } else { tmp = tmp & 4294967293U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397652U), tmp, 1); if ((tmp & 16387U) == 0U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397648L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397648L, tmp___1 & 4261412863U, 1); } else { } return; } } static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source , uint32_t *val ) { { if ((unsigned int )*source == 9U) { *source = 4; } else { } switch ((unsigned int )*source) { case 1U: *val = 2147483648U; goto ldv_50435; case 2U: *val = 2415919104U; goto ldv_50435; case 4U: *val = 2684354560U; goto ldv_50435; case 0U: *val = 0U; goto ldv_50435; default: ; return (-22); } ldv_50435: ; return (0); } } static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)dev_priv->pipe_to_crtc_mapping[0]; crtc = (struct intel_crtc *)__mptr; drm_modeset_lock_all(dev); if ((unsigned int )(crtc->config)->cpu_transcoder == 3U && ! (crtc->config)->pch_pfit.enabled) { (crtc->config)->pch_pfit.force_thru = 1; intel_display_power_get(dev_priv, 3); intel_crtc_reset(crtc); } else { } drm_modeset_unlock_all(dev); return; } } static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)dev_priv->pipe_to_crtc_mapping[0]; crtc = (struct intel_crtc *)__mptr; drm_modeset_lock_all(dev); if ((int )(crtc->config)->pch_pfit.force_thru) { (crtc->config)->pch_pfit.force_thru = 0; intel_crtc_reset(crtc); intel_display_power_put(dev_priv, 3); } else { } drm_modeset_unlock_all(dev); return; } } static int ivb_pipe_crc_ctl_reg(struct drm_device *dev , enum pipe pipe , enum intel_pipe_crc_source *source , uint32_t *val ) { struct drm_i915_private *__p ; { if ((unsigned int )*source == 9U) { *source = 3; } else { } switch ((unsigned int )*source) { case 1U: *val = 2147483648U; goto ldv_50461; case 2U: *val = 2684354560U; goto ldv_50461; case 3U: __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && (int )pipe == 0) { hsw_trans_edp_pipe_A_crc_wa(dev); } else { } *val = 3221225472U; goto ldv_50461; case 0U: *val = 0U; goto ldv_50461; default: ; return (-22); } ldv_50461: ; return (0); } } static int pipe_crc_set_source(struct drm_device *dev , enum pipe pipe , enum intel_pipe_crc_source source ) { struct drm_i915_private *dev_priv ; struct intel_pipe_crc *pipe_crc ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_crtc *tmp ; u32 val ; int ret ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct intel_pipe_crc_entry *entries ; char const *tmp___3 ; long tmp___4 ; void *tmp___5 ; struct intel_pipe_crc_entry *entries___0 ; struct intel_crtc *crtc___0 ; struct drm_crtc const *__mptr___0 ; long tmp___6 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pipe_crc = (struct intel_pipe_crc *)(& dev_priv->pipe_crc) + (unsigned long )pipe; tmp = intel_get_crtc_for_pipe(dev, (int )pipe); __mptr = (struct drm_crtc const *)tmp; crtc = (struct intel_crtc *)__mptr; val = 0U; if ((unsigned int )pipe_crc->source == (unsigned int )source) { return (0); } else { } if ((unsigned int )pipe_crc->source != 0U && (unsigned int )source != 0U) { return (-22); } else { } tmp___1 = intel_display_power_is_enabled(dev_priv, (enum intel_display_power_domain )pipe); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("pipe_crc_set_source", "Trying to capture CRC while pipe is off\n"); } else { } return (-5); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 2U) { ret = i8xx_pipe_crc_ctl_reg(& source, & val); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 4U) { ret = i9xx_pipe_crc_ctl_reg(dev, pipe, & source, & val); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { ret = vlv_pipe_crc_ctl_reg(dev, pipe, & source, & val); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 5U) { ret = ilk_pipe_crc_ctl_reg(& source, & val); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U) { ret = ilk_pipe_crc_ctl_reg(& source, & val); } else { ret = ivb_pipe_crc_ctl_reg(dev, pipe, & source, & val); } } } } } if (ret != 0) { return (ret); } else { } if ((unsigned int )source != 0U) { tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { tmp___3 = pipe_crc_source_name(source); drm_ut_debug_printk("pipe_crc_set_source", "collecting CRCs for pipe %c, %s\n", (int )pipe + 65, tmp___3); } else { } tmp___5 = kcalloc(128UL, 24UL, 208U); entries = (struct intel_pipe_crc_entry *)tmp___5; if ((unsigned long )entries == (unsigned long )((struct intel_pipe_crc_entry *)0)) { return (-12); } else { } hsw_disable_ips(crtc); spin_lock_irq(& pipe_crc->lock); kfree((void const *)pipe_crc->entries); pipe_crc->entries = entries; pipe_crc->head = 0; pipe_crc->tail = 0; spin_unlock_irq(& pipe_crc->lock); } else { } pipe_crc->source = source; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393296U), val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393296U), 0); if ((unsigned int )source == 0U) { __mptr___0 = (struct drm_crtc const *)dev_priv->pipe_to_crtc_mapping[(int )pipe]; crtc___0 = (struct intel_crtc *)__mptr___0; tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("pipe_crc_set_source", "stopping CRCs for pipe %c\n", (int )pipe + 65); } else { } drm_modeset_lock(& crtc___0->base.mutex, (struct drm_modeset_acquire_ctx *)0); if ((int )crtc___0->active) { intel_wait_for_vblank(dev, (int )pipe); } else { } drm_modeset_unlock(& crtc___0->base.mutex); spin_lock_irq(& pipe_crc->lock); entries___0 = pipe_crc->entries; pipe_crc->entries = (struct intel_pipe_crc_entry *)0; pipe_crc->head = 0; pipe_crc->tail = 0; spin_unlock_irq(& pipe_crc->lock); kfree((void const *)entries___0); __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 44UL) != 0U) { g4x_undo_pipe_scramble_reset(dev, pipe); } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { vlv_undo_pipe_scramble_reset(dev, pipe); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U && (int )pipe == 0) { hsw_undo_trans_edp_pipe_A_crc_wa(dev); } else { } } } hsw_enable_ips(crtc___0); } else { } return (0); } } static int display_crc_ctl_tokenize(char *buf , char **words , int max_words ) { int n_words ; char *end ; long tmp ; char *tmp___0 ; int tmp___1 ; { n_words = 0; goto ldv_50551; ldv_50550: buf = skip_spaces((char const *)buf); if ((int )((signed char )*buf) == 0) { goto ldv_50545; } else { } end = buf; goto ldv_50547; ldv_50546: end = end + 1; ldv_50547: ; if ((int )((signed char )*end) != 0 && ((int )_ctype[(int )((unsigned char )*end)] & 32) == 0) { goto ldv_50546; } else { } if (n_words == max_words) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("display_crc_ctl_tokenize", "too many words, allowed <= %d\n", max_words); } else { } return (-22); } else { } if ((int )((signed char )*end) != 0) { tmp___0 = end; end = end + 1; *tmp___0 = 0; } else { } tmp___1 = n_words; n_words = n_words + 1; *(words + (unsigned long )tmp___1) = buf; buf = end; ldv_50551: ; if ((int )((signed char )*buf) != 0) { goto ldv_50550; } else { } ldv_50545: ; return (n_words); } } static char const * const pipe_crc_objects[1U] = { "pipe"}; static int display_crc_ctl_parse_object(char const *buf , enum intel_pipe_crc_object *o ) { int i ; int tmp ; { i = 0; goto ldv_50563; ldv_50562: tmp = strcmp(buf, pipe_crc_objects[i]); if (tmp == 0) { *o = (enum intel_pipe_crc_object )i; return (0); } else { } i = i + 1; ldv_50563: ; if (i == 0) { goto ldv_50562; } else { } return (-22); } } static int display_crc_ctl_parse_pipe(char const *buf , enum pipe *pipe ) { char name ; { name = *buf; if ((int )((signed char )name) <= 64 || (int )((signed char )name) > 67) { return (-22); } else { } *pipe = (enum pipe )((int )name + -65); return (0); } } static int display_crc_ctl_parse_source(char const *buf , enum intel_pipe_crc_source *s ) { int i ; int tmp ; { i = 0; goto ldv_50578; ldv_50577: tmp = strcmp(buf, pipe_crc_sources[i]); if (tmp == 0) { *s = (enum intel_pipe_crc_source )i; return (0); } else { } i = i + 1; ldv_50578: ; if ((unsigned int )i <= 9U) { goto ldv_50577; } else { } return (-22); } } static int display_crc_ctl_parse(struct drm_device *dev , char *buf , size_t len ) { int n_words ; char *words[3U] ; enum pipe pipe ; enum intel_pipe_crc_object object ; enum intel_pipe_crc_source source ; long tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; int tmp___3 ; long tmp___4 ; int tmp___5 ; int tmp___6 ; { n_words = display_crc_ctl_tokenize(buf, (char **)(& words), 3); if (n_words != 3) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("display_crc_ctl_parse", "tokenize failed, a command is %d words\n", 3); } else { } return (-22); } else { } tmp___1 = display_crc_ctl_parse_object((char const *)words[0], & object); if (tmp___1 < 0) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("display_crc_ctl_parse", "unknown object %s\n", words[0]); } else { } return (-22); } else { } tmp___3 = display_crc_ctl_parse_pipe((char const *)words[1], & pipe); if (tmp___3 < 0) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("display_crc_ctl_parse", "unknown pipe %s\n", words[1]); } else { } return (-22); } else { } tmp___5 = display_crc_ctl_parse_source((char const *)words[2], & source); if (tmp___5 < 0) { tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("display_crc_ctl_parse", "unknown source %s\n", words[2]); } else { } return (-22); } else { } tmp___6 = pipe_crc_set_source(dev, pipe, source); return (tmp___6); } } static ssize_t display_crc_ctl_write(struct file *file , char const *ubuf , size_t len , loff_t *offp ) { struct seq_file *m ; struct drm_device *dev ; char *tmpbuf ; int ret ; long tmp ; void *tmp___0 ; unsigned long tmp___1 ; { m = (struct seq_file *)file->private_data; dev = (struct drm_device *)m->private; if (len == 0UL) { return (0L); } else { } if (len > 4095UL) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("display_crc_ctl_write", "expected <%lu bytes into pipe crc control\n", 4096UL); } else { } return (-7L); } else { } tmp___0 = kmalloc(len + 1UL, 208U); tmpbuf = (char *)tmp___0; if ((unsigned long )tmpbuf == (unsigned long )((char *)0)) { return (-12L); } else { } tmp___1 = copy_from_user((void *)tmpbuf, (void const *)ubuf, len); if (tmp___1 != 0UL) { ret = -14; goto out; } else { } *(tmpbuf + len) = 0; ret = display_crc_ctl_parse(dev, tmpbuf, len); out: kfree((void const *)tmpbuf); if (ret < 0) { return ((ssize_t )ret); } else { } *offp = (loff_t )((unsigned long long )*offp + (unsigned long long )len); return ((ssize_t )len); } } static struct file_operations const i915_display_crc_ctl_fops = {& __this_module, & seq_lseek, & seq_read, & display_crc_ctl_write, 0, 0, 0, 0, 0, 0, 0, 0, & display_crc_ctl_open, 0, & single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static ssize_t i915_displayport_test_active_write(struct file *file , char const *ubuf , size_t len , loff_t *offp ) { char *input_buffer ; int status ; struct seq_file *m ; struct drm_device *dev ; struct drm_connector *connector ; struct list_head *connector_list ; struct intel_dp *intel_dp ; int val ; void *tmp ; unsigned long tmp___0 ; long tmp___1 ; struct list_head const *__mptr ; long tmp___2 ; struct list_head const *__mptr___0 ; { status = 0; val = 0; m = (struct seq_file *)file->private_data; if ((unsigned long )m == (unsigned long )((struct seq_file *)0)) { status = -19; return ((ssize_t )status); } else { } dev = (struct drm_device *)m->private; if ((unsigned long )dev == (unsigned long )((struct drm_device *)0)) { status = -19; return ((ssize_t )status); } else { } connector_list = & dev->mode_config.connector_list; if (len == 0UL) { return (0L); } else { } tmp = kmalloc(len + 1UL, 208U); input_buffer = (char *)tmp; if ((unsigned long )input_buffer == (unsigned long )((char *)0)) { return (-12L); } else { } tmp___0 = copy_from_user((void *)input_buffer, (void const *)ubuf, len); if (tmp___0 != 0UL) { status = -14; goto out; } else { } *(input_buffer + len) = 0; tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i915_displayport_test_active_write", "Copied %d bytes from user\n", (unsigned int )len); } else { } __mptr = (struct list_head const *)connector_list->next; connector = (struct drm_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_50626; ldv_50625: ; if (connector->connector_type != 10) { goto ldv_50624; } else { } if ((connector->connector_type == 10 && (unsigned int )connector->status == 1U) && (unsigned long )connector->encoder != (unsigned long )((struct drm_encoder *)0)) { intel_dp = enc_to_intel_dp(connector->encoder); status = kstrtoint((char const *)input_buffer, 10U, & val); if (status < 0) { goto out; } else { } tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("i915_displayport_test_active_write", "Got %d for test active\n", val); } else { } if (val == 1) { intel_dp->compliance_test_active = 1; } else { intel_dp->compliance_test_active = 0; } } else { } ldv_50624: __mptr___0 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_50626: ; if ((unsigned long )(& connector->head) != (unsigned long )connector_list) { goto ldv_50625; } else { } out: kfree((void const *)input_buffer); if (status < 0) { return ((ssize_t )status); } else { } *offp = (loff_t )((unsigned long long )*offp + (unsigned long long )len); return ((ssize_t )len); } } static int i915_displayport_test_active_show(struct seq_file *m , void *data ) { struct drm_device *dev ; struct drm_connector *connector ; struct list_head *connector_list ; struct intel_dp *intel_dp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = (struct drm_device *)m->private; connector_list = & dev->mode_config.connector_list; if ((unsigned long )dev == (unsigned long )((struct drm_device *)0)) { return (-19); } else { } __mptr = (struct list_head const *)connector_list->next; connector = (struct drm_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_50642; ldv_50641: ; if (connector->connector_type != 10) { goto ldv_50640; } else { } if ((unsigned int )connector->status == 1U && (unsigned long )connector->encoder != (unsigned long )((struct drm_encoder *)0)) { intel_dp = enc_to_intel_dp(connector->encoder); if ((int )intel_dp->compliance_test_active) { seq_puts(m, "1"); } else { seq_puts(m, "0"); } } else { seq_puts(m, "0"); } ldv_50640: __mptr___0 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_50642: ; if ((unsigned long )(& connector->head) != (unsigned long )connector_list) { goto ldv_50641; } else { } return (0); } } static int i915_displayport_test_active_open(struct inode *inode , struct file *file ) { struct drm_device *dev ; int tmp ; { dev = (struct drm_device *)inode->i_private; tmp = single_open(file, & i915_displayport_test_active_show, (void *)dev); return (tmp); } } static struct file_operations const i915_displayport_test_active_fops = {& __this_module, & seq_lseek, & seq_read, & i915_displayport_test_active_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_displayport_test_active_open, 0, & single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_displayport_test_data_show(struct seq_file *m , void *data ) { struct drm_device *dev ; struct drm_connector *connector ; struct list_head *connector_list ; struct intel_dp *intel_dp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = (struct drm_device *)m->private; connector_list = & dev->mode_config.connector_list; if ((unsigned long )dev == (unsigned long )((struct drm_device *)0)) { return (-19); } else { } __mptr = (struct list_head const *)connector_list->next; connector = (struct drm_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_50664; ldv_50663: ; if (connector->connector_type != 10) { goto ldv_50662; } else { } if ((unsigned int )connector->status == 1U && (unsigned long )connector->encoder != (unsigned long )((struct drm_encoder *)0)) { intel_dp = enc_to_intel_dp(connector->encoder); seq_printf(m, "%lx", intel_dp->compliance_test_data); } else { seq_puts(m, "0"); } ldv_50662: __mptr___0 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_50664: ; if ((unsigned long )(& connector->head) != (unsigned long )connector_list) { goto ldv_50663; } else { } return (0); } } static int i915_displayport_test_data_open(struct inode *inode , struct file *file ) { struct drm_device *dev ; int tmp ; { dev = (struct drm_device *)inode->i_private; tmp = single_open(file, & i915_displayport_test_data_show, (void *)dev); return (tmp); } } static struct file_operations const i915_displayport_test_data_fops = {& __this_module, & seq_lseek, & seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, & i915_displayport_test_data_open, 0, & single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_displayport_test_type_show(struct seq_file *m , void *data ) { struct drm_device *dev ; struct drm_connector *connector ; struct list_head *connector_list ; struct intel_dp *intel_dp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = (struct drm_device *)m->private; connector_list = & dev->mode_config.connector_list; if ((unsigned long )dev == (unsigned long )((struct drm_device *)0)) { return (-19); } else { } __mptr = (struct list_head const *)connector_list->next; connector = (struct drm_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_50686; ldv_50685: ; if (connector->connector_type != 10) { goto ldv_50684; } else { } if ((unsigned int )connector->status == 1U && (unsigned long )connector->encoder != (unsigned long )((struct drm_encoder *)0)) { intel_dp = enc_to_intel_dp(connector->encoder); seq_printf(m, "%02lx", intel_dp->compliance_test_type); } else { seq_puts(m, "0"); } ldv_50684: __mptr___0 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_50686: ; if ((unsigned long )(& connector->head) != (unsigned long )connector_list) { goto ldv_50685; } else { } return (0); } } static int i915_displayport_test_type_open(struct inode *inode , struct file *file ) { struct drm_device *dev ; int tmp ; { dev = (struct drm_device *)inode->i_private; tmp = single_open(file, & i915_displayport_test_type_show, (void *)dev); return (tmp); } } static struct file_operations const i915_displayport_test_type_fops = {& __this_module, & seq_lseek, & seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, & i915_displayport_test_type_open, 0, & single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static void wm_latency_show(struct seq_file *m , uint16_t const *wm ) { struct drm_device *dev ; int num_levels ; int tmp ; int level ; unsigned int latency ; struct drm_i915_private *__p ; { dev = (struct drm_device *)m->private; tmp = ilk_wm_max_level((struct drm_device const *)dev); num_levels = tmp + 1; drm_modeset_lock_all(dev); level = 0; goto ldv_50709; ldv_50708: latency = (unsigned int )*(wm + (unsigned long )level); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { latency = latency * 10U; } else if (level > 0) { latency = latency * 5U; } else { } seq_printf(m, "WM%d %u (%u.%u usec)\n", level, (int )*(wm + (unsigned long )level), latency / 10U, latency % 10U); level = level + 1; ldv_50709: ; if (level < num_levels) { goto ldv_50708; } else { } drm_modeset_unlock_all(dev); return; } } static int pri_wm_latency_show(struct seq_file *m , void *data ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint16_t const *latencies ; struct drm_i915_private *tmp ; struct drm_i915_private *__p ; { dev = (struct drm_device *)m->private; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { latencies = (uint16_t const *)(& dev_priv->wm.skl_latency); } else { tmp = to_i915((struct drm_device const *)dev); latencies = (uint16_t const *)(& tmp->wm.pri_latency); } wm_latency_show(m, latencies); return (0); } } static int spr_wm_latency_show(struct seq_file *m , void *data ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint16_t const *latencies ; struct drm_i915_private *tmp ; struct drm_i915_private *__p ; { dev = (struct drm_device *)m->private; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { latencies = (uint16_t const *)(& dev_priv->wm.skl_latency); } else { tmp = to_i915((struct drm_device const *)dev); latencies = (uint16_t const *)(& tmp->wm.spr_latency); } wm_latency_show(m, latencies); return (0); } } static int cur_wm_latency_show(struct seq_file *m , void *data ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint16_t const *latencies ; struct drm_i915_private *tmp ; struct drm_i915_private *__p ; { dev = (struct drm_device *)m->private; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { latencies = (uint16_t const *)(& dev_priv->wm.skl_latency); } else { tmp = to_i915((struct drm_device const *)dev); latencies = (uint16_t const *)(& tmp->wm.cur_latency); } wm_latency_show(m, latencies); return (0); } } static int pri_wm_latency_open(struct inode *inode , struct file *file ) { struct drm_device *dev ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp ; { dev = (struct drm_device *)inode->i_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { return (-19); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { return (-19); } else { } } tmp = single_open(file, & pri_wm_latency_show, (void *)dev); return (tmp); } } static int spr_wm_latency_open(struct inode *inode , struct file *file ) { struct drm_device *dev ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp ; { dev = (struct drm_device *)inode->i_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { return (-19); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { return (-19); } else { } } tmp = single_open(file, & spr_wm_latency_show, (void *)dev); return (tmp); } } static int cur_wm_latency_open(struct inode *inode , struct file *file ) { struct drm_device *dev ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp ; { dev = (struct drm_device *)inode->i_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { return (-19); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { return (-19); } else { } } tmp = single_open(file, & cur_wm_latency_show, (void *)dev); return (tmp); } } static ssize_t wm_latency_write(struct file *file , char const *ubuf , size_t len , loff_t *offp , uint16_t *wm ) { struct seq_file *m ; struct drm_device *dev ; uint16_t new[8U] ; unsigned int tmp ; int num_levels ; int tmp___0 ; int level ; int ret ; char tmp___1[32U] ; unsigned long tmp___2 ; { m = (struct seq_file *)file->private_data; dev = (struct drm_device *)m->private; new[0] = 0U; tmp = 1U; while (1) { if (tmp >= 8U) { break; } else { } new[tmp] = (unsigned short)0; tmp = tmp + 1U; } tmp___0 = ilk_wm_max_level((struct drm_device const *)dev); num_levels = tmp___0 + 1; if (len > 31UL) { return (-22L); } else { } tmp___2 = copy_from_user((void *)(& tmp___1), (void const *)ubuf, len); if (tmp___2 != 0UL) { return (-14L); } else { } tmp___1[len] = 0; ret = sscanf((char const *)(& tmp___1), "%hu %hu %hu %hu %hu %hu %hu %hu", (uint16_t *)(& new), (uint16_t *)(& new) + 1UL, (uint16_t *)(& new) + 2UL, (uint16_t *)(& new) + 3UL, (uint16_t *)(& new) + 4UL, (uint16_t *)(& new) + 5UL, (uint16_t *)(& new) + 6UL, (uint16_t *)(& new) + 7UL); if (ret != num_levels) { return (-22L); } else { } drm_modeset_lock_all(dev); level = 0; goto ldv_50816; ldv_50815: *(wm + (unsigned long )level) = new[level]; level = level + 1; ldv_50816: ; if (level < num_levels) { goto ldv_50815; } else { } drm_modeset_unlock_all(dev); return ((ssize_t )len); } } static ssize_t pri_wm_latency_write(struct file *file , char const *ubuf , size_t len , loff_t *offp ) { struct seq_file *m ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint16_t *latencies ; struct drm_i915_private *tmp ; struct drm_i915_private *__p ; ssize_t tmp___0 ; { m = (struct seq_file *)file->private_data; dev = (struct drm_device *)m->private; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { latencies = (uint16_t *)(& dev_priv->wm.skl_latency); } else { tmp = to_i915((struct drm_device const *)dev); latencies = (uint16_t *)(& tmp->wm.pri_latency); } tmp___0 = wm_latency_write(file, ubuf, len, offp, latencies); return (tmp___0); } } static ssize_t spr_wm_latency_write(struct file *file , char const *ubuf , size_t len , loff_t *offp ) { struct seq_file *m ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint16_t *latencies ; struct drm_i915_private *tmp ; struct drm_i915_private *__p ; ssize_t tmp___0 ; { m = (struct seq_file *)file->private_data; dev = (struct drm_device *)m->private; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { latencies = (uint16_t *)(& dev_priv->wm.skl_latency); } else { tmp = to_i915((struct drm_device const *)dev); latencies = (uint16_t *)(& tmp->wm.spr_latency); } tmp___0 = wm_latency_write(file, ubuf, len, offp, latencies); return (tmp___0); } } static ssize_t cur_wm_latency_write(struct file *file , char const *ubuf , size_t len , loff_t *offp ) { struct seq_file *m ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint16_t *latencies ; struct drm_i915_private *tmp ; struct drm_i915_private *__p ; ssize_t tmp___0 ; { m = (struct seq_file *)file->private_data; dev = (struct drm_device *)m->private; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { latencies = (uint16_t *)(& dev_priv->wm.skl_latency); } else { tmp = to_i915((struct drm_device const *)dev); latencies = (uint16_t *)(& tmp->wm.cur_latency); } tmp___0 = wm_latency_write(file, ubuf, len, offp, latencies); return (tmp___0); } } static struct file_operations const i915_pri_wm_latency_fops = {& __this_module, & seq_lseek, & seq_read, & pri_wm_latency_write, 0, 0, 0, 0, 0, 0, 0, 0, & pri_wm_latency_open, 0, & single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static struct file_operations const i915_spr_wm_latency_fops = {& __this_module, & seq_lseek, & seq_read, & spr_wm_latency_write, 0, 0, 0, 0, 0, 0, 0, 0, & spr_wm_latency_open, 0, & single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static struct file_operations const i915_cur_wm_latency_fops = {& __this_module, & seq_lseek, & seq_read, & cur_wm_latency_write, 0, 0, 0, 0, 0, 0, 0, 0, & cur_wm_latency_open, 0, & single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_wedged_get(void *data , u64 *val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int tmp ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = atomic_read((atomic_t const *)(& dev_priv->gpu_error.reset_counter)); *val = (u64 )tmp; return (0); } } static int i915_wedged_set(void *data , u64 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; bool tmp ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = i915_reset_in_progress(& dev_priv->gpu_error); if ((int )tmp) { return (-11); } else { } intel_runtime_pm_get(dev_priv); i915_handle_error(dev, val != 0ULL, "Manually setting wedged to %llu", val); intel_runtime_pm_put(dev_priv); return (0); } } static int i915_wedged_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("%llu\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_wedged_get, & i915_wedged_set, "%llu\n"); return (tmp); } } static struct file_operations const i915_wedged_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_wedged_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_ring_stop_get(void *data , u64 *val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; *val = (u64 )dev_priv->gpu_error.stop_rings; return (0); } } static int i915_ring_stop_set(void *data , u64 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; long tmp ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_ring_stop_set", "Stopping rings 0x%08llx\n", val); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } dev_priv->gpu_error.stop_rings = (u32 )val; mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_ring_stop_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("0x%08llx\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_ring_stop_get, & i915_ring_stop_set, "0x%08llx\n"); return (tmp); } } static struct file_operations const i915_ring_stop_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_ring_stop_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_ring_missed_irq_get(void *data , u64 *val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; *val = (u64 )dev_priv->gpu_error.missed_irq_rings; return (0); } } static int i915_ring_missed_irq_set(void *data , u64 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } dev_priv->gpu_error.missed_irq_rings = (unsigned long )val; mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_ring_missed_irq_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("0x%08llx\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_ring_missed_irq_get, & i915_ring_missed_irq_set, "0x%08llx\n"); return (tmp); } } static struct file_operations const i915_ring_missed_irq_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_ring_missed_irq_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_ring_test_irq_get(void *data , u64 *val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; *val = (u64 )dev_priv->gpu_error.test_irq_rings; return (0); } } static int i915_ring_test_irq_set(void *data , u64 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; long tmp ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_ring_test_irq_set", "Masking interrupts on rings 0x%08llx\n", val); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } dev_priv->gpu_error.test_irq_rings = (unsigned int )val; mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_ring_test_irq_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("0x%08llx\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_ring_test_irq_get, & i915_ring_test_irq_set, "0x%08llx\n"); return (tmp); } } static struct file_operations const i915_ring_test_irq_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_ring_test_irq_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_drop_caches_get(void *data , u64 *val ) { { *val = 15ULL; return (0); } } static int i915_drop_caches_set(void *data , u64 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; long tmp ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_drop_caches_set", "Dropping caches: 0x%08llx\n", val); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } if ((val & 8ULL) != 0ULL) { ret = i915_gpu_idle(dev); if (ret != 0) { goto unlock; } else { } } else { } if ((val & 12ULL) != 0ULL) { i915_gem_retire_requests(dev); } else { } if ((val & 2ULL) != 0ULL) { i915_gem_shrink(dev_priv, 9223372036854775807L, 4U); } else { } if ((int )val & 1) { i915_gem_shrink(dev_priv, 9223372036854775807L, 2U); } else { } unlock: mutex_unlock(& dev->struct_mutex); return (ret); } } static int i915_drop_caches_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("0x%08llx\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_drop_caches_get, & i915_drop_caches_set, "0x%08llx\n"); return (tmp); } } static struct file_operations const i915_drop_caches_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_drop_caches_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_max_freq_get(void *data , u64 *val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; struct drm_i915_private *__p ; int tmp ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return (-19); } else { } ldv_flush_delayed_work_146(& dev_priv->rps.delayed_resume_work); ret = mutex_lock_interruptible_nested(& dev_priv->rps.hw_lock, 0U); if (ret != 0) { return (ret); } else { } tmp = intel_gpu_freq(dev_priv, (int )dev_priv->rps.max_freq_softlimit); *val = (u64 )tmp; mutex_unlock(& dev_priv->rps.hw_lock); return (0); } } static int i915_max_freq_set(void *data , u64 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 hw_max ; u32 hw_min ; int ret ; struct drm_i915_private *__p ; long tmp ; int tmp___0 ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return (-19); } else { } ldv_flush_delayed_work_147(& dev_priv->rps.delayed_resume_work); tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_max_freq_set", "Manually setting max freq to %llu\n", val); } else { } ret = mutex_lock_interruptible_nested(& dev_priv->rps.hw_lock, 0U); if (ret != 0) { return (ret); } else { } tmp___0 = intel_freq_opcode(dev_priv, (int )val); val = (u64 )tmp___0; hw_max = (u32 )dev_priv->rps.max_freq; hw_min = (u32 )dev_priv->rps.min_freq; if (((u64 )hw_min > val || (u64 )hw_max < val) || (u64 )dev_priv->rps.min_freq_softlimit > val) { mutex_unlock(& dev_priv->rps.hw_lock); return (-22); } else { } dev_priv->rps.max_freq_softlimit = (u8 )val; intel_set_rps(dev, (int )((u8 )val)); mutex_unlock(& dev_priv->rps.hw_lock); return (0); } } static int i915_max_freq_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("%llu\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_max_freq_get, & i915_max_freq_set, "%llu\n"); return (tmp); } } static struct file_operations const i915_max_freq_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_max_freq_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_min_freq_get(void *data , u64 *val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; struct drm_i915_private *__p ; int tmp ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return (-19); } else { } ldv_flush_delayed_work_148(& dev_priv->rps.delayed_resume_work); ret = mutex_lock_interruptible_nested(& dev_priv->rps.hw_lock, 0U); if (ret != 0) { return (ret); } else { } tmp = intel_gpu_freq(dev_priv, (int )dev_priv->rps.min_freq_softlimit); *val = (u64 )tmp; mutex_unlock(& dev_priv->rps.hw_lock); return (0); } } static int i915_min_freq_set(void *data , u64 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 hw_max ; u32 hw_min ; int ret ; struct drm_i915_private *__p ; long tmp ; int tmp___0 ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return (-19); } else { } ldv_flush_delayed_work_149(& dev_priv->rps.delayed_resume_work); tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_min_freq_set", "Manually setting min freq to %llu\n", val); } else { } ret = mutex_lock_interruptible_nested(& dev_priv->rps.hw_lock, 0U); if (ret != 0) { return (ret); } else { } tmp___0 = intel_freq_opcode(dev_priv, (int )val); val = (u64 )tmp___0; hw_max = (u32 )dev_priv->rps.max_freq; hw_min = (u32 )dev_priv->rps.min_freq; if (((u64 )hw_min > val || (u64 )hw_max < val) || (u64 )dev_priv->rps.max_freq_softlimit < val) { mutex_unlock(& dev_priv->rps.hw_lock); return (-22); } else { } dev_priv->rps.min_freq_softlimit = (u8 )val; intel_set_rps(dev, (int )((u8 )val)); mutex_unlock(& dev_priv->rps.hw_lock); return (0); } } static int i915_min_freq_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("%llu\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_min_freq_get, & i915_min_freq_set, "%llu\n"); return (tmp); } } static struct file_operations const i915_min_freq_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_min_freq_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_cache_sharing_get(void *data , u64 *val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 snpcr ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 7U) { return (-19); } else { } } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } intel_runtime_pm_get(dev_priv); snpcr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36876L, 1); intel_runtime_pm_put(dev_priv); mutex_unlock(& (dev_priv->dev)->struct_mutex); *val = (u64 )((snpcr & 6291456U) >> 21); return (0); } } static int i915_cache_sharing_set(void *data , u64 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 snpcr ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; long tmp ; { dev = (struct drm_device *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 7U) { return (-19); } else { } } else { } if (val > 3ULL) { return (-22); } else { } intel_runtime_pm_get(dev_priv); tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_cache_sharing_set", "Manually setting uncore sharing to %llu\n", val); } else { } snpcr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36876L, 1); snpcr = snpcr & 4288675839U; snpcr = ((u32 )val << 21U) | snpcr; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 36876L, snpcr, 1); intel_runtime_pm_put(dev_priv); return (0); } } static int i915_cache_sharing_fops_open(struct inode *inode , struct file *file ) { int tmp ; { __simple_attr_check_format("%llu\n", 0ULL); tmp = simple_attr_open(inode, file, & i915_cache_sharing_get, & i915_cache_sharing_set, "%llu\n"); return (tmp); } } static struct file_operations const i915_cache_sharing_fops = {& __this_module, & generic_file_llseek, & simple_attr_read, & simple_attr_write, 0, 0, 0, 0, 0, 0, 0, 0, & i915_cache_sharing_fops_open, 0, & simple_attr_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static void cherryview_sseu_device_status(struct drm_device *dev , struct sseu_dev_status *stat ) { struct drm_i915_private *dev_priv ; int ss_max ; int ss ; u32 *sig1 ; unsigned long __lengthofsig1 ; void *tmp ; u32 *sig2 ; unsigned long __lengthofsig2 ; void *tmp___0 ; int ss_max___0 ; int ss_max___1 ; unsigned int eu_cnt ; unsigned int _max1 ; unsigned int _max2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ss_max = 2; __lengthofsig1 = (unsigned long )((long )ss_max); tmp = __builtin_alloca(sizeof(*sig1) * __lengthofsig1); sig1 = (u32 *)tmp; __lengthofsig2 = (unsigned long )((long )ss_max); tmp___0 = __builtin_alloca(sizeof(*sig2) * __lengthofsig2); sig2 = (u32 *)tmp___0; ss_max___0 = 2; ss_max___1 = 2; *(sig1 + 0) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 42784L, 1); *(sig1 + 1) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 42792L, 1); *(sig2 + 0) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 42788L, 1); *(sig2 + 1) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 42796L, 1); ss = 0; goto ldv_51094; ldv_51093: ; if ((*(sig1 + ss) & 2U) != 0U) { goto ldv_51089; } else { } stat->slice_total = 1U; stat->subslice_per_slice = stat->subslice_per_slice + 1U; eu_cnt = (unsigned int )(((((*(sig1 + ss) & 512U) != 0U ? 0 : 2) + ((*(sig1 + ss) & 131072U) != 0U ? 0 : 2)) + ((*(sig1 + ss) & 33554432U) != 0U ? 0 : 2)) + ((*(sig2 + ss) & 2U) != 0U ? 0 : 2)); stat->eu_total = stat->eu_total + eu_cnt; _max1 = stat->eu_per_subslice; _max2 = eu_cnt; stat->eu_per_subslice = _max1 > _max2 ? _max1 : _max2; ldv_51089: ss = ss + 1; ldv_51094: ; if (ss < ss_max___1) { goto ldv_51093; } else { } stat->subslice_total = stat->subslice_per_slice; return; } } static void gen9_sseu_device_status(struct drm_device *dev , struct sseu_dev_status *stat ) { struct drm_i915_private *dev_priv ; int s_max ; int ss_max ; int s ; int ss ; u32 *s_reg ; unsigned long __lengthofs_reg ; void *tmp ; u32 *eu_reg ; unsigned long __lengthofeu_reg ; void *tmp___0 ; u32 eu_mask[2U] ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; unsigned int ss_cnt ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; unsigned int eu_cnt ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; unsigned int tmp___1 ; unsigned int _max1 ; unsigned int _max2 ; unsigned int _max1___0 ; unsigned int _max2___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; s_max = 3; ss_max = 4; __lengthofs_reg = (unsigned long )((long )s_max); tmp = __builtin_alloca(sizeof(*s_reg) * __lengthofs_reg); s_reg = (u32 *)tmp; __lengthofeu_reg = (unsigned long )((long )(s_max * 2)); tmp___0 = __builtin_alloca(sizeof(*eu_reg) * __lengthofeu_reg); eu_reg = (u32 *)tmp___0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { s_max = 1; ss_max = 3; } else { } } else { } s = 0; goto ldv_51121; ldv_51120: *(s_reg + s) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((s + 8211) * 4), 1); *(eu_reg + s * 2) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(s * 8 + 32860), 1); *(eu_reg + (s * 2 + 1)) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((s + 4108) * 8), 1); s = s + 1; ldv_51121: ; if (s < s_max) { goto ldv_51120; } else { } eu_mask[0] = 85U; eu_mask[1] = 21760U; s = 0; goto ldv_51173; ldv_51172: ss_cnt = 0U; if ((*(s_reg + s) & 1U) == 0U) { goto ldv_51124; } else { } stat->slice_total = stat->slice_total + 1U; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); ss_cnt = (unsigned int )__p___1->info.subslice_per_slice; } else { } ss = 0; goto ldv_51167; ldv_51166: __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 9U) { if ((*(s_reg + s) & (u32 )(1 << (ss + 1) * 2)) == 0U) { goto ldv_51150; } else { } } else { } } else { } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 9U) { ss_cnt = ss_cnt + 1U; } else { } } else { } tmp___1 = __arch_hweight32(*(eu_reg + (s * 2 + ss / 2)) & eu_mask[ss % 2]); eu_cnt = tmp___1 * 2U; stat->eu_total = stat->eu_total + eu_cnt; _max1 = stat->eu_per_subslice; _max2 = eu_cnt; stat->eu_per_subslice = _max1 > _max2 ? _max1 : _max2; ldv_51150: ss = ss + 1; ldv_51167: ; if (ss < ss_max) { goto ldv_51166; } else { } stat->subslice_total = stat->subslice_total + ss_cnt; _max1___0 = stat->subslice_per_slice; _max2___0 = ss_cnt; stat->subslice_per_slice = _max1___0 > _max2___0 ? _max1___0 : _max2___0; ldv_51124: s = s + 1; ldv_51173: ; if (s < s_max) { goto ldv_51172; } else { } return; } } static int i915_sseu_status(struct seq_file *m , void *unused ) { struct drm_info_node *node ; struct drm_device *dev ; struct sseu_dev_status stat ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; char const *tmp ; struct drm_i915_private *__p___8 ; char const *tmp___0 ; struct drm_i915_private *__p___9 ; char const *tmp___1 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; struct drm_i915_private *__p___12 ; { node = (struct drm_info_node *)m->private; dev = (node->minor)->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 7U) { return (-19); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { return (-19); } else { } } else { } } seq_puts(m, "SSEU Device Info\n"); __p___2 = to_i915((struct drm_device const *)dev); seq_printf(m, " Available Slice Total: %u\n", (int )__p___2->info.slice_total); __p___3 = to_i915((struct drm_device const *)dev); seq_printf(m, " Available Subslice Total: %u\n", (int )__p___3->info.subslice_total); __p___4 = to_i915((struct drm_device const *)dev); seq_printf(m, " Available Subslice Per Slice: %u\n", (int )__p___4->info.subslice_per_slice); __p___5 = to_i915((struct drm_device const *)dev); seq_printf(m, " Available EU Total: %u\n", (int )__p___5->info.eu_total); __p___6 = to_i915((struct drm_device const *)dev); seq_printf(m, " Available EU Per Subslice: %u\n", (int )__p___6->info.eu_per_subslice); __p___7 = to_i915((struct drm_device const *)dev); tmp = yesno((int )__p___7->info.has_slice_pg); seq_printf(m, " Has Slice Power Gating: %s\n", tmp); __p___8 = to_i915((struct drm_device const *)dev); tmp___0 = yesno((int )__p___8->info.has_subslice_pg); seq_printf(m, " Has Subslice Power Gating: %s\n", tmp___0); __p___9 = to_i915((struct drm_device const *)dev); tmp___1 = yesno((int )__p___9->info.has_eu_pg); seq_printf(m, " Has EU Power Gating: %s\n", tmp___1); seq_puts(m, "SSEU Device Status\n"); memset((void *)(& stat), 0, 20UL); __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___11 + 45UL) != 0U) { __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___12->info.gen) == 8U) { cherryview_sseu_device_status(dev, & stat); } else { goto _L; } } else { _L: /* CIL Label */ __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___10->info.gen) > 8U) { gen9_sseu_device_status(dev, & stat); } else { } } seq_printf(m, " Enabled Slice Total: %u\n", stat.slice_total); seq_printf(m, " Enabled Subslice Total: %u\n", stat.subslice_total); seq_printf(m, " Enabled Subslice Per Slice: %u\n", stat.subslice_per_slice); seq_printf(m, " Enabled EU Total: %u\n", stat.eu_total); seq_printf(m, " Enabled EU Per Subslice: %u\n", stat.eu_per_subslice); return (0); } } static int i915_forcewake_open(struct inode *inode , struct file *file ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev = (struct drm_device *)inode->i_private; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return (0); } else { } intel_runtime_pm_get(dev_priv); intel_uncore_forcewake_get(dev_priv, 7); return (0); } } static int i915_forcewake_release(struct inode *inode , struct file *file ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev = (struct drm_device *)inode->i_private; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return (0); } else { } intel_uncore_forcewake_put(dev_priv, 7); intel_runtime_pm_put(dev_priv); return (0); } } static struct file_operations const i915_forcewake_fops = {& __this_module, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & i915_forcewake_open, 0, & i915_forcewake_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static int i915_forcewake_create(struct dentry *root , struct drm_minor *minor ) { struct drm_device *dev ; struct dentry *ent ; int tmp ; { dev = minor->dev; ent = debugfs_create_file("i915_forcewake_user", 256, root, (void *)dev, & i915_forcewake_fops); if ((unsigned long )ent == (unsigned long )((struct dentry *)0)) { return (-12); } else { } tmp = drm_add_fake_info_node(minor, ent, (void const *)(& i915_forcewake_fops)); return (tmp); } } static int i915_debugfs_create(struct dentry *root , struct drm_minor *minor , char const *name , struct file_operations const *fops ) { struct drm_device *dev ; struct dentry *ent ; int tmp ; { dev = minor->dev; ent = debugfs_create_file(name, 420, root, (void *)dev, fops); if ((unsigned long )ent == (unsigned long )((struct dentry *)0)) { return (-12); } else { } tmp = drm_add_fake_info_node(minor, ent, (void const *)fops); return (tmp); } } static struct drm_info_list const i915_debugfs_list[48U] = { {"i915_capabilities", & i915_capabilities, 0U, 0}, {"i915_gem_objects", & i915_gem_object_info, 0U, 0}, {"i915_gem_gtt", & i915_gem_gtt_info, 0U, 0}, {"i915_gem_pinned", & i915_gem_gtt_info, 0U, (void *)2}, {"i915_gem_active", & i915_gem_object_list_info, 0U, (void *)0}, {"i915_gem_inactive", & i915_gem_object_list_info, 0U, (void *)1}, {"i915_gem_stolen", & i915_gem_stolen_list_info, 0U, 0}, {"i915_gem_pageflip", & i915_gem_pageflip_info, 0U, 0}, {"i915_gem_request", & i915_gem_request_info, 0U, 0}, {"i915_gem_seqno", & i915_gem_seqno_info, 0U, 0}, {"i915_gem_fence_regs", & i915_gem_fence_regs_info, 0U, 0}, {"i915_gem_interrupt", & i915_interrupt_info, 0U, 0}, {"i915_gem_hws", & i915_hws_info, 0U, (void *)0}, {"i915_gem_hws_blt", & i915_hws_info, 0U, (void *)2}, {"i915_gem_hws_bsd", & i915_hws_info, 0U, (void *)1}, {"i915_gem_hws_vebox", & i915_hws_info, 0U, (void *)3}, {"i915_gem_batch_pool", & i915_gem_batch_pool_info, 0U, 0}, {"i915_frequency_info", & i915_frequency_info, 0U, 0}, {"i915_hangcheck_info", & i915_hangcheck_info, 0U, 0}, {"i915_drpc_info", & i915_drpc_info, 0U, 0}, {"i915_emon_status", & i915_emon_status, 0U, 0}, {"i915_ring_freq_table", & i915_ring_freq_table, 0U, 0}, {"i915_fbc_status", & i915_fbc_status, 0U, 0}, {"i915_ips_status", & i915_ips_status, 0U, 0}, {"i915_sr_status", & i915_sr_status, 0U, 0}, {"i915_opregion", & i915_opregion, 0U, 0}, {"i915_gem_framebuffer", & i915_gem_framebuffer_info, 0U, 0}, {"i915_context_status", & i915_context_status, 0U, 0}, {"i915_dump_lrc", & i915_dump_lrc, 0U, 0}, {"i915_execlists", & i915_execlists, 0U, 0}, {"i915_forcewake_domains", & i915_forcewake_domains, 0U, 0}, {"i915_swizzle_info", & i915_swizzle_info, 0U, 0}, {"i915_ppgtt_info", & i915_ppgtt_info, 0U, 0}, {"i915_llc", & i915_llc, 0U, 0}, {"i915_edp_psr_status", & i915_edp_psr_status, 0U, 0}, {"i915_sink_crc_eDP1", & i915_sink_crc, 0U, 0}, {"i915_energy_uJ", & i915_energy_uJ, 0U, 0}, {"i915_pc8_status", & i915_pc8_status, 0U, 0}, {"i915_power_domain_info", & i915_power_domain_info, 0U, 0}, {"i915_display_info", & i915_display_info, 0U, 0}, {"i915_semaphore_status", & i915_semaphore_status, 0U, 0}, {"i915_shared_dplls_info", & i915_shared_dplls_info, 0U, 0}, {"i915_dp_mst_info", & i915_dp_mst_info, 0U, 0}, {"i915_wa_registers", & i915_wa_registers, 0U, 0}, {"i915_ddb_info", & i915_ddb_info, 0U, 0}, {"i915_sseu_status", & i915_sseu_status, 0U, 0}, {"i915_drrs_status", & i915_drrs_status, 0U, 0}, {"i915_rps_boost_info", & i915_rps_boost_info, 0U, 0}}; static struct i915_debugfs_files const i915_debugfs_files[18U] = { {"i915_wedged", & i915_wedged_fops}, {"i915_max_freq", & i915_max_freq_fops}, {"i915_min_freq", & i915_min_freq_fops}, {"i915_cache_sharing", & i915_cache_sharing_fops}, {"i915_ring_stop", & i915_ring_stop_fops}, {"i915_ring_missed_irq", & i915_ring_missed_irq_fops}, {"i915_ring_test_irq", & i915_ring_test_irq_fops}, {"i915_gem_drop_caches", & i915_drop_caches_fops}, {"i915_error_state", & i915_error_state_fops}, {"i915_next_seqno", & i915_next_seqno_fops}, {"i915_display_crc_ctl", & i915_display_crc_ctl_fops}, {"i915_pri_wm_latency", & i915_pri_wm_latency_fops}, {"i915_spr_wm_latency", & i915_spr_wm_latency_fops}, {"i915_cur_wm_latency", & i915_cur_wm_latency_fops}, {"i915_fbc_false_color", & i915_fbc_fc_fops}, {"i915_dp_test_data", & i915_displayport_test_data_fops}, {"i915_dp_test_type", & i915_displayport_test_type_fops}, {"i915_dp_test_active", & i915_displayport_test_active_fops}}; void intel_display_crc_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; enum pipe pipe ; struct intel_pipe_crc *pipe_crc ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = 0; goto ldv_51325; ldv_51324: pipe_crc = (struct intel_pipe_crc *)(& dev_priv->pipe_crc) + (unsigned long )pipe; pipe_crc->opened = 0; spinlock_check(& pipe_crc->lock); __raw_spin_lock_init(& pipe_crc->lock.__annonCompField18.rlock, "&(&pipe_crc->lock)->rlock", & __key); __init_waitqueue_head(& pipe_crc->wq, "&pipe_crc->wq", & __key___0); pipe = (enum pipe )((int )pipe + 1); ldv_51325: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_51324; } else { } return; } } int i915_debugfs_init(struct drm_minor *minor ) { int ret ; int i ; int tmp ; { ret = i915_forcewake_create(minor->debugfs_root, minor); if (ret != 0) { return (ret); } else { } i = 0; goto ldv_51335; ldv_51334: ret = i915_pipe_crc_create(minor->debugfs_root, minor, (enum pipe )i); if (ret != 0) { return (ret); } else { } i = i + 1; ldv_51335: ; if ((unsigned int )i <= 2U) { goto ldv_51334; } else { } i = 0; goto ldv_51340; ldv_51339: ret = i915_debugfs_create(minor->debugfs_root, minor, i915_debugfs_files[i].name, i915_debugfs_files[i].fops); if (ret != 0) { return (ret); } else { } i = i + 1; ldv_51340: ; if ((unsigned int )i <= 17U) { goto ldv_51339; } else { } tmp = drm_debugfs_create_files((struct drm_info_list const *)(& i915_debugfs_list), 48, minor->debugfs_root, minor); return (tmp); } } void i915_debugfs_cleanup(struct drm_minor *minor ) { int i ; struct drm_info_list *info_list ; struct drm_info_list *info_list___0 ; { drm_debugfs_remove_files((struct drm_info_list const *)(& i915_debugfs_list), 48, minor); drm_debugfs_remove_files((struct drm_info_list const *)(& i915_forcewake_fops), 1, minor); i = 0; goto ldv_51354; ldv_51353: info_list = (struct drm_info_list *)(& i915_pipe_crc_data) + (unsigned long )i; drm_debugfs_remove_files((struct drm_info_list const *)info_list, 1, minor); i = i + 1; ldv_51354: ; if ((unsigned int )i <= 2U) { goto ldv_51353; } else { } i = 0; goto ldv_51360; ldv_51359: info_list___0 = (struct drm_info_list *)i915_debugfs_files[i].fops; drm_debugfs_remove_files((struct drm_info_list const *)info_list___0, 1, minor); i = i + 1; ldv_51360: ; if ((unsigned int )i <= 17U) { goto ldv_51359; } else { } return; } } static struct dpcd_block const i915_dpcd_debug[10U] = { {0U, 0U, 15UL, (_Bool)0}, {112U, 113U, 0UL, (_Bool)0}, {128U, 0U, 16UL, (_Bool)0}, {256U, 266U, 0UL, (_Bool)0}, {512U, 519U, 0UL, (_Bool)0}, {1536U, 0U, 0UL, (_Bool)0}, {1792U, 0U, 0UL, (_Bool)0}, {1793U, 1796U, 0UL, (_Bool)0}, {1824U, 1839U, 0UL, (_Bool)0}, {1842U, 1843U, 0UL, (_Bool)0}}; static int i915_dpcd_show(struct seq_file *m , void *data ) { struct drm_connector *connector ; struct intel_dp *intel_dp ; struct intel_encoder *tmp ; struct intel_dp *tmp___0 ; uint8_t buf[16U] ; ssize_t err ; int i ; struct dpcd_block const *b ; size_t size ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; { connector = (struct drm_connector *)m->private; tmp = intel_attached_encoder(connector); tmp___0 = enc_to_intel_dp(& tmp->base); intel_dp = tmp___0; if ((unsigned int )connector->status != 1U) { return (-19); } else { } i = 0; goto ldv_51385; ldv_51384: b = (struct dpcd_block const *)(& i915_dpcd_debug) + (unsigned long )i; size = (unsigned int )b->end != 0U ? (unsigned long const )(((unsigned int )b->end - (unsigned int )b->offset) + 1U) : ((unsigned long )b->size != 0UL ? b->size : 1UL); if ((int )b->edp && connector->connector_type != 14) { goto ldv_51381; } else { } __ret_warn_on = size > 16UL; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_debugfs.c", 5194, "WARN_ON(size > sizeof(buf))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { goto ldv_51381; } else { } err = drm_dp_dpcd_read(& intel_dp->aux, b->offset, (void *)(& buf), size); if (err <= 0L) { drm_err("dpcd read (%zu bytes at %u) failed (%zd)\n", size, b->offset, err); goto ldv_51381; } else { } seq_printf(m, "%04x: %*ph\n", b->offset, (int )size, (uint8_t *)(& buf)); ldv_51381: i = i + 1; ldv_51385: ; if ((unsigned int )i <= 9U) { goto ldv_51384; } else { } return (0); } } static int i915_dpcd_open(struct inode *inode , struct file *file ) { int tmp ; { tmp = single_open(file, & i915_dpcd_show, inode->i_private); return (tmp); } } static struct file_operations const i915_dpcd_fops = {& __this_module, & seq_lseek, & seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, & i915_dpcd_open, 0, & single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int i915_debugfs_connector_add(struct drm_connector *connector ) { struct dentry *root ; { root = connector->debugfs_entry; if ((unsigned long )root == (unsigned long )((struct dentry *)0)) { return (-19); } else { } if (connector->connector_type == 10 || connector->connector_type == 14) { debugfs_create_file("i915_dpcd", 292, root, (void *)connector, & i915_dpcd_fops); } else { } return (0); } } int ldv_retval_38 ; int ldv_retval_42 ; int ldv_retval_47 ; int ldv_retval_33 ; int ldv_retval_2 ; int ldv_retval_43 ; int ldv_retval_35 ; int ldv_retval_0 ; int ldv_retval_1 ; int ldv_retval_6 ; int ldv_retval_46 ; int ldv_retval_28 ; int ldv_retval_49 ; int ldv_retval_45 ; int ldv_retval_37 ; int ldv_retval_34 ; int ldv_retval_32 ; int ldv_retval_4 ; int ldv_retval_44 ; int ldv_retval_30 ; int ldv_retval_3 ; void ldv_file_operations_144(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_cache_sharing_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_cache_sharing_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_160(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_fbc_fc_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_fbc_fc_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_154(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_pri_wm_latency_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_pri_wm_latency_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_153(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_spr_wm_latency_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_spr_wm_latency_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_156(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_displayport_test_data_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_displayport_test_data_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_143(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_forcewake_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_forcewake_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_155(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_displayport_test_type_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_displayport_test_type_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_149(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_ring_missed_irq_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_ring_missed_irq_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_147(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_drop_caches_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_drop_caches_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_158(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_display_crc_ctl_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_display_crc_ctl_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_161(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_next_seqno_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_next_seqno_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_148(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_ring_test_irq_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_ring_test_irq_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_151(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_wedged_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_wedged_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_150(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_ring_stop_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_ring_stop_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_145(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_min_freq_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_min_freq_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_157(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_displayport_test_active_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_displayport_test_active_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_142(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_dpcd_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_dpcd_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_152(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_cur_wm_latency_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_cur_wm_latency_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_162(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_error_state_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_error_state_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_159(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_pipe_crc_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_pipe_crc_fops_group2 = (struct file *)tmp___0; return; } } void ldv_file_operations_146(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1000UL); i915_max_freq_fops_group1 = (struct inode *)tmp; tmp___0 = ldv_init_zalloc(504UL); i915_max_freq_fops_group2 = (struct file *)tmp___0; return; } } void ldv_main_exported_152(void) { loff_t *ldvarg316 ; void *tmp ; int ldvarg311 ; size_t ldvarg317 ; loff_t *ldvarg313 ; void *tmp___0 ; char *ldvarg318 ; void *tmp___1 ; loff_t ldvarg312 ; char *ldvarg315 ; void *tmp___2 ; size_t ldvarg314 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg316 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg313 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg318 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg315 = (char *)tmp___2; ldv_memset((void *)(& ldvarg311), 0, 4UL); ldv_memset((void *)(& ldvarg317), 0, 8UL); ldv_memset((void *)(& ldvarg312), 0, 8UL); ldv_memset((void *)(& ldvarg314), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_152 == 1) { ldv_retval_35 = cur_wm_latency_open(i915_cur_wm_latency_fops_group1, i915_cur_wm_latency_fops_group2); if (ldv_retval_35 == 0) { ldv_state_variable_152 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51492; case 1: ; if (ldv_state_variable_152 == 2) { single_release(i915_cur_wm_latency_fops_group1, i915_cur_wm_latency_fops_group2); ldv_state_variable_152 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51492; case 2: ; if (ldv_state_variable_152 == 2) { cur_wm_latency_write(i915_cur_wm_latency_fops_group2, (char const *)ldvarg318, ldvarg317, ldvarg316); ldv_state_variable_152 = 2; } else { } if (ldv_state_variable_152 == 1) { cur_wm_latency_write(i915_cur_wm_latency_fops_group2, (char const *)ldvarg318, ldvarg317, ldvarg316); ldv_state_variable_152 = 1; } else { } goto ldv_51492; case 3: ; if (ldv_state_variable_152 == 2) { seq_read(i915_cur_wm_latency_fops_group2, ldvarg315, ldvarg314, ldvarg313); ldv_state_variable_152 = 2; } else { } goto ldv_51492; case 4: ; if (ldv_state_variable_152 == 2) { seq_lseek(i915_cur_wm_latency_fops_group2, ldvarg312, ldvarg311); ldv_state_variable_152 = 2; } else { } goto ldv_51492; default: ldv_stop(); } ldv_51492: ; return; } } void ldv_main_exported_155(void) { size_t ldvarg505 ; loff_t *ldvarg507 ; void *tmp ; int ldvarg503 ; char *ldvarg506 ; void *tmp___0 ; loff_t ldvarg504 ; int tmp___1 ; { tmp = ldv_init_zalloc(8UL); ldvarg507 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg506 = (char *)tmp___0; ldv_memset((void *)(& ldvarg505), 0, 8UL); ldv_memset((void *)(& ldvarg503), 0, 4UL); ldv_memset((void *)(& ldvarg504), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_155 == 1) { ldv_retval_45 = i915_displayport_test_type_open(i915_displayport_test_type_fops_group1, i915_displayport_test_type_fops_group2); if (ldv_retval_45 == 0) { ldv_state_variable_155 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51507; case 1: ; if (ldv_state_variable_155 == 2) { single_release(i915_displayport_test_type_fops_group1, i915_displayport_test_type_fops_group2); ldv_state_variable_155 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51507; case 2: ; if (ldv_state_variable_155 == 2) { seq_read(i915_displayport_test_type_fops_group2, ldvarg506, ldvarg505, ldvarg507); ldv_state_variable_155 = 2; } else { } goto ldv_51507; case 3: ; if (ldv_state_variable_155 == 2) { seq_lseek(i915_displayport_test_type_fops_group2, ldvarg504, ldvarg503); ldv_state_variable_155 = 2; } else { } goto ldv_51507; default: ldv_stop(); } ldv_51507: ; return; } } void ldv_main_exported_142(void) { loff_t *ldvarg332 ; void *tmp ; int ldvarg330 ; size_t ldvarg333 ; char *ldvarg334 ; void *tmp___0 ; loff_t ldvarg331 ; int tmp___1 ; { tmp = ldv_init_zalloc(8UL); ldvarg332 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg334 = (char *)tmp___0; ldv_memset((void *)(& ldvarg330), 0, 4UL); ldv_memset((void *)(& ldvarg333), 0, 8UL); ldv_memset((void *)(& ldvarg331), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_142 == 1) { ldv_retval_37 = i915_dpcd_open(i915_dpcd_fops_group1, i915_dpcd_fops_group2); if (ldv_retval_37 == 0) { ldv_state_variable_142 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51521; case 1: ; if (ldv_state_variable_142 == 2) { single_release(i915_dpcd_fops_group1, i915_dpcd_fops_group2); ldv_state_variable_142 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51521; case 2: ; if (ldv_state_variable_142 == 2) { seq_read(i915_dpcd_fops_group2, ldvarg334, ldvarg333, ldvarg332); ldv_state_variable_142 = 2; } else { } goto ldv_51521; case 3: ; if (ldv_state_variable_142 == 2) { seq_lseek(i915_dpcd_fops_group2, ldvarg331, ldvarg330); ldv_state_variable_142 = 2; } else { } goto ldv_51521; default: ldv_stop(); } ldv_51521: ; return; } } void ldv_main_exported_143(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_143 == 1) { ldv_retval_46 = i915_forcewake_open(i915_forcewake_fops_group1, i915_forcewake_fops_group2); if (ldv_retval_46 == 0) { ldv_state_variable_143 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51530; case 1: ; if (ldv_state_variable_143 == 2) { i915_forcewake_release(i915_forcewake_fops_group1, i915_forcewake_fops_group2); ldv_state_variable_143 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51530; default: ldv_stop(); } ldv_51530: ; return; } } void ldv_main_exported_158(void) { char *ldvarg525 ; void *tmp ; loff_t *ldvarg526 ; void *tmp___0 ; size_t ldvarg524 ; loff_t *ldvarg529 ; void *tmp___1 ; loff_t ldvarg523 ; int ldvarg522 ; size_t ldvarg527 ; char *ldvarg528 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg525 = (char *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg526 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg529 = (loff_t *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg528 = (char *)tmp___2; ldv_memset((void *)(& ldvarg524), 0, 8UL); ldv_memset((void *)(& ldvarg523), 0, 8UL); ldv_memset((void *)(& ldvarg522), 0, 4UL); ldv_memset((void *)(& ldvarg527), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_158 == 1) { ldv_retval_47 = display_crc_ctl_open(i915_display_crc_ctl_fops_group1, i915_display_crc_ctl_fops_group2); if (ldv_retval_47 == 0) { ldv_state_variable_158 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51545; case 1: ; if (ldv_state_variable_158 == 2) { single_release(i915_display_crc_ctl_fops_group1, i915_display_crc_ctl_fops_group2); ldv_state_variable_158 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51545; case 2: ; if (ldv_state_variable_158 == 2) { display_crc_ctl_write(i915_display_crc_ctl_fops_group2, (char const *)ldvarg528, ldvarg527, ldvarg529); ldv_state_variable_158 = 2; } else { } if (ldv_state_variable_158 == 1) { display_crc_ctl_write(i915_display_crc_ctl_fops_group2, (char const *)ldvarg528, ldvarg527, ldvarg529); ldv_state_variable_158 = 1; } else { } goto ldv_51545; case 3: ; if (ldv_state_variable_158 == 2) { seq_read(i915_display_crc_ctl_fops_group2, ldvarg525, ldvarg524, ldvarg526); ldv_state_variable_158 = 2; } else { } goto ldv_51545; case 4: ; if (ldv_state_variable_158 == 2) { seq_lseek(i915_display_crc_ctl_fops_group2, ldvarg523, ldvarg522); ldv_state_variable_158 = 2; } else { } goto ldv_51545; default: ldv_stop(); } ldv_51545: ; return; } } void ldv_main_exported_154(void) { char *ldvarg206 ; void *tmp ; loff_t *ldvarg201 ; void *tmp___0 ; int ldvarg199 ; size_t ldvarg205 ; size_t ldvarg202 ; loff_t ldvarg200 ; char *ldvarg203 ; void *tmp___1 ; loff_t *ldvarg204 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg206 = (char *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg201 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg203 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg204 = (loff_t *)tmp___2; ldv_memset((void *)(& ldvarg199), 0, 4UL); ldv_memset((void *)(& ldvarg205), 0, 8UL); ldv_memset((void *)(& ldvarg202), 0, 8UL); ldv_memset((void *)(& ldvarg200), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_154 == 1) { ldv_retval_30 = pri_wm_latency_open(i915_pri_wm_latency_fops_group1, i915_pri_wm_latency_fops_group2); if (ldv_retval_30 == 0) { ldv_state_variable_154 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51563; case 1: ; if (ldv_state_variable_154 == 2) { single_release(i915_pri_wm_latency_fops_group1, i915_pri_wm_latency_fops_group2); ldv_state_variable_154 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51563; case 2: ; if (ldv_state_variable_154 == 2) { pri_wm_latency_write(i915_pri_wm_latency_fops_group2, (char const *)ldvarg206, ldvarg205, ldvarg204); ldv_state_variable_154 = 2; } else { } if (ldv_state_variable_154 == 1) { pri_wm_latency_write(i915_pri_wm_latency_fops_group2, (char const *)ldvarg206, ldvarg205, ldvarg204); ldv_state_variable_154 = 1; } else { } goto ldv_51563; case 3: ; if (ldv_state_variable_154 == 2) { seq_read(i915_pri_wm_latency_fops_group2, ldvarg203, ldvarg202, ldvarg201); ldv_state_variable_154 = 2; } else { } goto ldv_51563; case 4: ; if (ldv_state_variable_154 == 2) { seq_lseek(i915_pri_wm_latency_fops_group2, ldvarg200, ldvarg199); ldv_state_variable_154 = 2; } else { } goto ldv_51563; default: ldv_stop(); } ldv_51563: ; return; } } void ldv_main_exported_162(void) { char *ldvarg283 ; void *tmp ; int ldvarg279 ; loff_t ldvarg280 ; loff_t *ldvarg284 ; void *tmp___0 ; loff_t *ldvarg281 ; void *tmp___1 ; char *ldvarg286 ; void *tmp___2 ; size_t ldvarg282 ; size_t ldvarg285 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg283 = (char *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg284 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg281 = (loff_t *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg286 = (char *)tmp___2; ldv_memset((void *)(& ldvarg279), 0, 4UL); ldv_memset((void *)(& ldvarg280), 0, 8UL); ldv_memset((void *)(& ldvarg282), 0, 8UL); ldv_memset((void *)(& ldvarg285), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_162 == 1) { ldv_retval_34 = i915_error_state_open(i915_error_state_fops_group1, i915_error_state_fops_group2); if (ldv_retval_34 == 0) { ldv_state_variable_162 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51581; case 1: ; if (ldv_state_variable_162 == 2) { i915_error_state_release(i915_error_state_fops_group1, i915_error_state_fops_group2); ldv_state_variable_162 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51581; case 2: ; if (ldv_state_variable_162 == 2) { i915_error_state_write(i915_error_state_fops_group2, (char const *)ldvarg286, ldvarg285, ldvarg284); ldv_state_variable_162 = 2; } else { } if (ldv_state_variable_162 == 1) { i915_error_state_write(i915_error_state_fops_group2, (char const *)ldvarg286, ldvarg285, ldvarg284); ldv_state_variable_162 = 1; } else { } goto ldv_51581; case 3: ; if (ldv_state_variable_162 == 2) { i915_error_state_read(i915_error_state_fops_group2, ldvarg283, ldvarg282, ldvarg281); ldv_state_variable_162 = 2; } else { } goto ldv_51581; case 4: ; if (ldv_state_variable_162 == 2) { default_llseek(i915_error_state_fops_group2, ldvarg280, ldvarg279); ldv_state_variable_162 = 2; } else { } goto ldv_51581; default: ldv_stop(); } ldv_51581: ; return; } } void ldv_main_exported_144(void) { char *ldvarg442 ; void *tmp ; loff_t ldvarg437 ; loff_t *ldvarg443 ; void *tmp___0 ; char *ldvarg439 ; void *tmp___1 ; size_t ldvarg438 ; int ldvarg436 ; loff_t *ldvarg440 ; void *tmp___2 ; size_t ldvarg441 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg442 = (char *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg443 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg439 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg440 = (loff_t *)tmp___2; ldv_memset((void *)(& ldvarg437), 0, 8UL); ldv_memset((void *)(& ldvarg438), 0, 8UL); ldv_memset((void *)(& ldvarg436), 0, 4UL); ldv_memset((void *)(& ldvarg441), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_144 == 1) { ldv_retval_42 = i915_cache_sharing_fops_open(i915_cache_sharing_fops_group1, i915_cache_sharing_fops_group2); if (ldv_retval_42 == 0) { ldv_state_variable_144 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51599; case 1: ; if (ldv_state_variable_144 == 2) { simple_attr_release(i915_cache_sharing_fops_group1, i915_cache_sharing_fops_group2); ldv_state_variable_144 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51599; case 2: ; if (ldv_state_variable_144 == 2) { simple_attr_write(i915_cache_sharing_fops_group2, (char const *)ldvarg442, ldvarg441, ldvarg443); ldv_state_variable_144 = 2; } else { } if (ldv_state_variable_144 == 1) { simple_attr_write(i915_cache_sharing_fops_group2, (char const *)ldvarg442, ldvarg441, ldvarg443); ldv_state_variable_144 = 1; } else { } goto ldv_51599; case 3: ; if (ldv_state_variable_144 == 2) { simple_attr_read(i915_cache_sharing_fops_group2, ldvarg439, ldvarg438, ldvarg440); ldv_state_variable_144 = 2; } else { } goto ldv_51599; case 4: ; if (ldv_state_variable_144 == 2) { generic_file_llseek(i915_cache_sharing_fops_group2, ldvarg437, ldvarg436); ldv_state_variable_144 = 2; } else { } goto ldv_51599; default: ldv_stop(); } ldv_51599: ; return; } } void ldv_main_exported_157(void) { loff_t ldvarg83 ; int ldvarg82 ; char *ldvarg89 ; void *tmp ; char *ldvarg86 ; void *tmp___0 ; loff_t *ldvarg87 ; void *tmp___1 ; size_t ldvarg88 ; loff_t *ldvarg84 ; void *tmp___2 ; size_t ldvarg85 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg89 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg86 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg87 = (loff_t *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg84 = (loff_t *)tmp___2; ldv_memset((void *)(& ldvarg83), 0, 8UL); ldv_memset((void *)(& ldvarg82), 0, 4UL); ldv_memset((void *)(& ldvarg88), 0, 8UL); ldv_memset((void *)(& ldvarg85), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_157 == 1) { ldv_retval_3 = i915_displayport_test_active_open(i915_displayport_test_active_fops_group1, i915_displayport_test_active_fops_group2); if (ldv_retval_3 == 0) { ldv_state_variable_157 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51617; case 1: ; if (ldv_state_variable_157 == 2) { single_release(i915_displayport_test_active_fops_group1, i915_displayport_test_active_fops_group2); ldv_state_variable_157 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51617; case 2: ; if (ldv_state_variable_157 == 1) { i915_displayport_test_active_write(i915_displayport_test_active_fops_group2, (char const *)ldvarg89, ldvarg88, ldvarg87); ldv_state_variable_157 = 1; } else { } if (ldv_state_variable_157 == 2) { i915_displayport_test_active_write(i915_displayport_test_active_fops_group2, (char const *)ldvarg89, ldvarg88, ldvarg87); ldv_state_variable_157 = 2; } else { } goto ldv_51617; case 3: ; if (ldv_state_variable_157 == 2) { seq_read(i915_displayport_test_active_fops_group2, ldvarg86, ldvarg85, ldvarg84); ldv_state_variable_157 = 2; } else { } goto ldv_51617; case 4: ; if (ldv_state_variable_157 == 2) { seq_lseek(i915_displayport_test_active_fops_group2, ldvarg83, ldvarg82); ldv_state_variable_157 = 2; } else { } goto ldv_51617; default: ldv_stop(); } ldv_51617: ; return; } } void ldv_main_exported_159(void) { size_t ldvarg214 ; char *ldvarg215 ; void *tmp ; loff_t *ldvarg213 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg215 = (char *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg213 = (loff_t *)tmp___0; ldv_memset((void *)(& ldvarg214), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_159 == 1) { ldv_retval_32 = i915_pipe_crc_open(i915_pipe_crc_fops_group1, i915_pipe_crc_fops_group2); if (ldv_retval_32 == 0) { ldv_state_variable_159 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51630; case 1: ; if (ldv_state_variable_159 == 2) { i915_pipe_crc_release(i915_pipe_crc_fops_group1, i915_pipe_crc_fops_group2); ldv_state_variable_159 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51630; case 2: ; if (ldv_state_variable_159 == 2) { i915_pipe_crc_read(i915_pipe_crc_fops_group2, ldvarg215, ldvarg214, ldvarg213); ldv_state_variable_159 = 2; } else { } goto ldv_51630; default: ldv_stop(); } ldv_51630: ; return; } } void ldv_main_exported_147(void) { loff_t ldvarg148 ; loff_t *ldvarg149 ; void *tmp ; size_t ldvarg153 ; size_t ldvarg150 ; loff_t *ldvarg152 ; void *tmp___0 ; char *ldvarg151 ; void *tmp___1 ; char *ldvarg154 ; void *tmp___2 ; int ldvarg147 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg149 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg152 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg151 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg154 = (char *)tmp___2; ldv_memset((void *)(& ldvarg148), 0, 8UL); ldv_memset((void *)(& ldvarg153), 0, 8UL); ldv_memset((void *)(& ldvarg150), 0, 8UL); ldv_memset((void *)(& ldvarg147), 0, 4UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_147 == 1) { ldv_retval_6 = i915_drop_caches_fops_open(i915_drop_caches_fops_group1, i915_drop_caches_fops_group2); if (ldv_retval_6 == 0) { ldv_state_variable_147 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51646; case 1: ; if (ldv_state_variable_147 == 2) { simple_attr_release(i915_drop_caches_fops_group1, i915_drop_caches_fops_group2); ldv_state_variable_147 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51646; case 2: ; if (ldv_state_variable_147 == 1) { simple_attr_write(i915_drop_caches_fops_group2, (char const *)ldvarg154, ldvarg153, ldvarg152); ldv_state_variable_147 = 1; } else { } if (ldv_state_variable_147 == 2) { simple_attr_write(i915_drop_caches_fops_group2, (char const *)ldvarg154, ldvarg153, ldvarg152); ldv_state_variable_147 = 2; } else { } goto ldv_51646; case 3: ; if (ldv_state_variable_147 == 2) { simple_attr_read(i915_drop_caches_fops_group2, ldvarg151, ldvarg150, ldvarg149); ldv_state_variable_147 = 2; } else { } goto ldv_51646; case 4: ; if (ldv_state_variable_147 == 2) { generic_file_llseek(i915_drop_caches_fops_group2, ldvarg148, ldvarg147); ldv_state_variable_147 = 2; } else { } goto ldv_51646; default: ldv_stop(); } ldv_51646: ; return; } } void ldv_main_exported_149(void) { loff_t *ldvarg356 ; void *tmp ; char *ldvarg355 ; void *tmp___0 ; size_t ldvarg357 ; int ldvarg351 ; char *ldvarg358 ; void *tmp___1 ; size_t ldvarg354 ; loff_t *ldvarg353 ; void *tmp___2 ; loff_t ldvarg352 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg356 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg355 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg358 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg353 = (loff_t *)tmp___2; ldv_memset((void *)(& ldvarg357), 0, 8UL); ldv_memset((void *)(& ldvarg351), 0, 4UL); ldv_memset((void *)(& ldvarg354), 0, 8UL); ldv_memset((void *)(& ldvarg352), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_149 == 1) { ldv_retval_38 = i915_ring_missed_irq_fops_open(i915_ring_missed_irq_fops_group1, i915_ring_missed_irq_fops_group2); if (ldv_retval_38 == 0) { ldv_state_variable_149 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51664; case 1: ; if (ldv_state_variable_149 == 2) { simple_attr_release(i915_ring_missed_irq_fops_group1, i915_ring_missed_irq_fops_group2); ldv_state_variable_149 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51664; case 2: ; if (ldv_state_variable_149 == 2) { simple_attr_write(i915_ring_missed_irq_fops_group2, (char const *)ldvarg358, ldvarg357, ldvarg356); ldv_state_variable_149 = 2; } else { } if (ldv_state_variable_149 == 1) { simple_attr_write(i915_ring_missed_irq_fops_group2, (char const *)ldvarg358, ldvarg357, ldvarg356); ldv_state_variable_149 = 1; } else { } goto ldv_51664; case 3: ; if (ldv_state_variable_149 == 2) { simple_attr_read(i915_ring_missed_irq_fops_group2, ldvarg355, ldvarg354, ldvarg353); ldv_state_variable_149 = 2; } else { } goto ldv_51664; case 4: ; if (ldv_state_variable_149 == 2) { generic_file_llseek(i915_ring_missed_irq_fops_group2, ldvarg352, ldvarg351); ldv_state_variable_149 = 2; } else { } goto ldv_51664; default: ldv_stop(); } ldv_51664: ; return; } } void ldv_main_exported_161(void) { size_t ldvarg34 ; loff_t *ldvarg36 ; void *tmp ; size_t ldvarg37 ; loff_t *ldvarg33 ; void *tmp___0 ; char *ldvarg38 ; void *tmp___1 ; int ldvarg31 ; loff_t ldvarg32 ; char *ldvarg35 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg36 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg33 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg38 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg35 = (char *)tmp___2; ldv_memset((void *)(& ldvarg34), 0, 8UL); ldv_memset((void *)(& ldvarg37), 0, 8UL); ldv_memset((void *)(& ldvarg31), 0, 4UL); ldv_memset((void *)(& ldvarg32), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_161 == 1) { ldv_retval_0 = i915_next_seqno_fops_open(i915_next_seqno_fops_group1, i915_next_seqno_fops_group2); if (ldv_retval_0 == 0) { ldv_state_variable_161 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51682; case 1: ; if (ldv_state_variable_161 == 2) { simple_attr_release(i915_next_seqno_fops_group1, i915_next_seqno_fops_group2); ldv_state_variable_161 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51682; case 2: ; if (ldv_state_variable_161 == 1) { simple_attr_write(i915_next_seqno_fops_group2, (char const *)ldvarg38, ldvarg37, ldvarg36); ldv_state_variable_161 = 1; } else { } if (ldv_state_variable_161 == 2) { simple_attr_write(i915_next_seqno_fops_group2, (char const *)ldvarg38, ldvarg37, ldvarg36); ldv_state_variable_161 = 2; } else { } goto ldv_51682; case 3: ; if (ldv_state_variable_161 == 2) { simple_attr_read(i915_next_seqno_fops_group2, ldvarg35, ldvarg34, ldvarg33); ldv_state_variable_161 = 2; } else { } goto ldv_51682; case 4: ; if (ldv_state_variable_161 == 2) { generic_file_llseek(i915_next_seqno_fops_group2, ldvarg32, ldvarg31); ldv_state_variable_161 = 2; } else { } goto ldv_51682; default: ldv_stop(); } ldv_51682: ; return; } } void ldv_main_exported_156(void) { char *ldvarg479 ; void *tmp ; int ldvarg476 ; loff_t *ldvarg480 ; void *tmp___0 ; loff_t ldvarg477 ; size_t ldvarg478 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg479 = (char *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg480 = (loff_t *)tmp___0; ldv_memset((void *)(& ldvarg476), 0, 4UL); ldv_memset((void *)(& ldvarg477), 0, 8UL); ldv_memset((void *)(& ldvarg478), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_156 == 1) { ldv_retval_43 = i915_displayport_test_data_open(i915_displayport_test_data_fops_group1, i915_displayport_test_data_fops_group2); if (ldv_retval_43 == 0) { ldv_state_variable_156 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51697; case 1: ; if (ldv_state_variable_156 == 2) { single_release(i915_displayport_test_data_fops_group1, i915_displayport_test_data_fops_group2); ldv_state_variable_156 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51697; case 2: ; if (ldv_state_variable_156 == 2) { seq_read(i915_displayport_test_data_fops_group2, ldvarg479, ldvarg478, ldvarg480); ldv_state_variable_156 = 2; } else { } goto ldv_51697; case 3: ; if (ldv_state_variable_156 == 2) { seq_lseek(i915_displayport_test_data_fops_group2, ldvarg477, ldvarg476); ldv_state_variable_156 = 2; } else { } goto ldv_51697; default: ldv_stop(); } ldv_51697: ; return; } } void ldv_main_exported_160(void) { int ldvarg216 ; char *ldvarg223 ; void *tmp ; size_t ldvarg222 ; loff_t *ldvarg218 ; void *tmp___0 ; char *ldvarg220 ; void *tmp___1 ; size_t ldvarg219 ; loff_t *ldvarg221 ; void *tmp___2 ; loff_t ldvarg217 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg223 = (char *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg218 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg220 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg221 = (loff_t *)tmp___2; ldv_memset((void *)(& ldvarg216), 0, 4UL); ldv_memset((void *)(& ldvarg222), 0, 8UL); ldv_memset((void *)(& ldvarg219), 0, 8UL); ldv_memset((void *)(& ldvarg217), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_160 == 1) { ldv_retval_33 = i915_fbc_fc_fops_open(i915_fbc_fc_fops_group1, i915_fbc_fc_fops_group2); if (ldv_retval_33 == 0) { ldv_state_variable_160 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51714; case 1: ; if (ldv_state_variable_160 == 2) { simple_attr_release(i915_fbc_fc_fops_group1, i915_fbc_fc_fops_group2); ldv_state_variable_160 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51714; case 2: ; if (ldv_state_variable_160 == 2) { simple_attr_write(i915_fbc_fc_fops_group2, (char const *)ldvarg223, ldvarg222, ldvarg221); ldv_state_variable_160 = 2; } else { } if (ldv_state_variable_160 == 1) { simple_attr_write(i915_fbc_fc_fops_group2, (char const *)ldvarg223, ldvarg222, ldvarg221); ldv_state_variable_160 = 1; } else { } goto ldv_51714; case 3: ; if (ldv_state_variable_160 == 2) { simple_attr_read(i915_fbc_fc_fops_group2, ldvarg220, ldvarg219, ldvarg218); ldv_state_variable_160 = 2; } else { } goto ldv_51714; case 4: ; if (ldv_state_variable_160 == 2) { generic_file_llseek(i915_fbc_fc_fops_group2, ldvarg217, ldvarg216); ldv_state_variable_160 = 2; } else { } goto ldv_51714; default: ldv_stop(); } ldv_51714: ; return; } } void ldv_main_exported_146(void) { size_t ldvarg547 ; loff_t ldvarg546 ; char *ldvarg551 ; void *tmp ; loff_t *ldvarg549 ; void *tmp___0 ; loff_t *ldvarg552 ; void *tmp___1 ; size_t ldvarg550 ; int ldvarg545 ; char *ldvarg548 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg551 = (char *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg549 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg552 = (loff_t *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg548 = (char *)tmp___2; ldv_memset((void *)(& ldvarg547), 0, 8UL); ldv_memset((void *)(& ldvarg546), 0, 8UL); ldv_memset((void *)(& ldvarg550), 0, 8UL); ldv_memset((void *)(& ldvarg545), 0, 4UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_146 == 1) { ldv_retval_49 = i915_max_freq_fops_open(i915_max_freq_fops_group1, i915_max_freq_fops_group2); if (ldv_retval_49 == 0) { ldv_state_variable_146 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51732; case 1: ; if (ldv_state_variable_146 == 2) { simple_attr_release(i915_max_freq_fops_group1, i915_max_freq_fops_group2); ldv_state_variable_146 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51732; case 2: ; if (ldv_state_variable_146 == 2) { simple_attr_write(i915_max_freq_fops_group2, (char const *)ldvarg551, ldvarg550, ldvarg552); ldv_state_variable_146 = 2; } else { } if (ldv_state_variable_146 == 1) { simple_attr_write(i915_max_freq_fops_group2, (char const *)ldvarg551, ldvarg550, ldvarg552); ldv_state_variable_146 = 1; } else { } goto ldv_51732; case 3: ; if (ldv_state_variable_146 == 2) { simple_attr_read(i915_max_freq_fops_group2, ldvarg548, ldvarg547, ldvarg549); ldv_state_variable_146 = 2; } else { } goto ldv_51732; case 4: ; if (ldv_state_variable_146 == 2) { generic_file_llseek(i915_max_freq_fops_group2, ldvarg546, ldvarg545); ldv_state_variable_146 = 2; } else { } goto ldv_51732; default: ldv_stop(); } ldv_51732: ; return; } } void ldv_main_exported_153(void) { loff_t *ldvarg114 ; void *tmp ; char *ldvarg113 ; void *tmp___0 ; size_t ldvarg112 ; loff_t ldvarg110 ; loff_t *ldvarg111 ; void *tmp___1 ; int ldvarg109 ; size_t ldvarg115 ; char *ldvarg116 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg114 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg113 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg111 = (loff_t *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg116 = (char *)tmp___2; ldv_memset((void *)(& ldvarg112), 0, 8UL); ldv_memset((void *)(& ldvarg110), 0, 8UL); ldv_memset((void *)(& ldvarg109), 0, 4UL); ldv_memset((void *)(& ldvarg115), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_153 == 1) { ldv_retval_4 = spr_wm_latency_open(i915_spr_wm_latency_fops_group1, i915_spr_wm_latency_fops_group2); if (ldv_retval_4 == 0) { ldv_state_variable_153 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51750; case 1: ; if (ldv_state_variable_153 == 2) { single_release(i915_spr_wm_latency_fops_group1, i915_spr_wm_latency_fops_group2); ldv_state_variable_153 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51750; case 2: ; if (ldv_state_variable_153 == 1) { spr_wm_latency_write(i915_spr_wm_latency_fops_group2, (char const *)ldvarg116, ldvarg115, ldvarg114); ldv_state_variable_153 = 1; } else { } if (ldv_state_variable_153 == 2) { spr_wm_latency_write(i915_spr_wm_latency_fops_group2, (char const *)ldvarg116, ldvarg115, ldvarg114); ldv_state_variable_153 = 2; } else { } goto ldv_51750; case 3: ; if (ldv_state_variable_153 == 2) { seq_read(i915_spr_wm_latency_fops_group2, ldvarg113, ldvarg112, ldvarg111); ldv_state_variable_153 = 2; } else { } goto ldv_51750; case 4: ; if (ldv_state_variable_153 == 2) { seq_lseek(i915_spr_wm_latency_fops_group2, ldvarg110, ldvarg109); ldv_state_variable_153 = 2; } else { } goto ldv_51750; default: ldv_stop(); } ldv_51750: ; return; } } void ldv_main_exported_145(void) { size_t ldvarg169 ; int ldvarg166 ; loff_t *ldvarg168 ; void *tmp ; loff_t *ldvarg171 ; void *tmp___0 ; char *ldvarg170 ; void *tmp___1 ; size_t ldvarg172 ; char *ldvarg173 ; void *tmp___2 ; loff_t ldvarg167 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg168 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg171 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg170 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg173 = (char *)tmp___2; ldv_memset((void *)(& ldvarg169), 0, 8UL); ldv_memset((void *)(& ldvarg166), 0, 4UL); ldv_memset((void *)(& ldvarg172), 0, 8UL); ldv_memset((void *)(& ldvarg167), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_145 == 1) { ldv_retval_28 = i915_min_freq_fops_open(i915_min_freq_fops_group1, i915_min_freq_fops_group2); if (ldv_retval_28 == 0) { ldv_state_variable_145 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51768; case 1: ; if (ldv_state_variable_145 == 2) { simple_attr_release(i915_min_freq_fops_group1, i915_min_freq_fops_group2); ldv_state_variable_145 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51768; case 2: ; if (ldv_state_variable_145 == 2) { simple_attr_write(i915_min_freq_fops_group2, (char const *)ldvarg173, ldvarg172, ldvarg171); ldv_state_variable_145 = 2; } else { } if (ldv_state_variable_145 == 1) { simple_attr_write(i915_min_freq_fops_group2, (char const *)ldvarg173, ldvarg172, ldvarg171); ldv_state_variable_145 = 1; } else { } goto ldv_51768; case 3: ; if (ldv_state_variable_145 == 2) { simple_attr_read(i915_min_freq_fops_group2, ldvarg170, ldvarg169, ldvarg168); ldv_state_variable_145 = 2; } else { } goto ldv_51768; case 4: ; if (ldv_state_variable_145 == 2) { generic_file_llseek(i915_min_freq_fops_group2, ldvarg167, ldvarg166); ldv_state_variable_145 = 2; } else { } goto ldv_51768; default: ldv_stop(); } ldv_51768: ; return; } } void ldv_main_exported_151(void) { int ldvarg52 ; size_t ldvarg55 ; loff_t ldvarg53 ; size_t ldvarg58 ; loff_t *ldvarg54 ; void *tmp ; loff_t *ldvarg57 ; void *tmp___0 ; char *ldvarg56 ; void *tmp___1 ; char *ldvarg59 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg54 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg57 = (loff_t *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg56 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg59 = (char *)tmp___2; ldv_memset((void *)(& ldvarg52), 0, 4UL); ldv_memset((void *)(& ldvarg55), 0, 8UL); ldv_memset((void *)(& ldvarg53), 0, 8UL); ldv_memset((void *)(& ldvarg58), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_151 == 1) { ldv_retval_1 = i915_wedged_fops_open(i915_wedged_fops_group1, i915_wedged_fops_group2); if (ldv_retval_1 == 0) { ldv_state_variable_151 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51786; case 1: ; if (ldv_state_variable_151 == 2) { simple_attr_release(i915_wedged_fops_group1, i915_wedged_fops_group2); ldv_state_variable_151 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51786; case 2: ; if (ldv_state_variable_151 == 1) { simple_attr_write(i915_wedged_fops_group2, (char const *)ldvarg59, ldvarg58, ldvarg57); ldv_state_variable_151 = 1; } else { } if (ldv_state_variable_151 == 2) { simple_attr_write(i915_wedged_fops_group2, (char const *)ldvarg59, ldvarg58, ldvarg57); ldv_state_variable_151 = 2; } else { } goto ldv_51786; case 3: ; if (ldv_state_variable_151 == 2) { simple_attr_read(i915_wedged_fops_group2, ldvarg56, ldvarg55, ldvarg54); ldv_state_variable_151 = 2; } else { } goto ldv_51786; case 4: ; if (ldv_state_variable_151 == 2) { generic_file_llseek(i915_wedged_fops_group2, ldvarg53, ldvarg52); ldv_state_variable_151 = 2; } else { } goto ldv_51786; default: ldv_stop(); } ldv_51786: ; return; } } void ldv_main_exported_148(void) { loff_t ldvarg66 ; int ldvarg65 ; char *ldvarg69 ; void *tmp ; char *ldvarg72 ; void *tmp___0 ; size_t ldvarg68 ; size_t ldvarg71 ; loff_t *ldvarg67 ; void *tmp___1 ; loff_t *ldvarg70 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg69 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg72 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg67 = (loff_t *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg70 = (loff_t *)tmp___2; ldv_memset((void *)(& ldvarg66), 0, 8UL); ldv_memset((void *)(& ldvarg65), 0, 4UL); ldv_memset((void *)(& ldvarg68), 0, 8UL); ldv_memset((void *)(& ldvarg71), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_148 == 1) { ldv_retval_2 = i915_ring_test_irq_fops_open(i915_ring_test_irq_fops_group1, i915_ring_test_irq_fops_group2); if (ldv_retval_2 == 0) { ldv_state_variable_148 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51804; case 1: ; if (ldv_state_variable_148 == 2) { simple_attr_release(i915_ring_test_irq_fops_group1, i915_ring_test_irq_fops_group2); ldv_state_variable_148 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51804; case 2: ; if (ldv_state_variable_148 == 1) { simple_attr_write(i915_ring_test_irq_fops_group2, (char const *)ldvarg72, ldvarg71, ldvarg70); ldv_state_variable_148 = 1; } else { } if (ldv_state_variable_148 == 2) { simple_attr_write(i915_ring_test_irq_fops_group2, (char const *)ldvarg72, ldvarg71, ldvarg70); ldv_state_variable_148 = 2; } else { } goto ldv_51804; case 3: ; if (ldv_state_variable_148 == 2) { simple_attr_read(i915_ring_test_irq_fops_group2, ldvarg69, ldvarg68, ldvarg67); ldv_state_variable_148 = 2; } else { } goto ldv_51804; case 4: ; if (ldv_state_variable_148 == 2) { generic_file_llseek(i915_ring_test_irq_fops_group2, ldvarg66, ldvarg65); ldv_state_variable_148 = 2; } else { } goto ldv_51804; default: ldv_stop(); } ldv_51804: ; return; } } void ldv_main_exported_150(void) { loff_t *ldvarg499 ; void *tmp ; char *ldvarg498 ; void *tmp___0 ; int ldvarg495 ; size_t ldvarg500 ; loff_t ldvarg496 ; size_t ldvarg497 ; loff_t *ldvarg502 ; void *tmp___1 ; char *ldvarg501 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg499 = (loff_t *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg498 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg502 = (loff_t *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg501 = (char *)tmp___2; ldv_memset((void *)(& ldvarg495), 0, 4UL); ldv_memset((void *)(& ldvarg500), 0, 8UL); ldv_memset((void *)(& ldvarg496), 0, 8UL); ldv_memset((void *)(& ldvarg497), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_150 == 1) { ldv_retval_44 = i915_ring_stop_fops_open(i915_ring_stop_fops_group1, i915_ring_stop_fops_group2); if (ldv_retval_44 == 0) { ldv_state_variable_150 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_51822; case 1: ; if (ldv_state_variable_150 == 2) { simple_attr_release(i915_ring_stop_fops_group1, i915_ring_stop_fops_group2); ldv_state_variable_150 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51822; case 2: ; if (ldv_state_variable_150 == 2) { simple_attr_write(i915_ring_stop_fops_group2, (char const *)ldvarg501, ldvarg500, ldvarg502); ldv_state_variable_150 = 2; } else { } if (ldv_state_variable_150 == 1) { simple_attr_write(i915_ring_stop_fops_group2, (char const *)ldvarg501, ldvarg500, ldvarg502); ldv_state_variable_150 = 1; } else { } goto ldv_51822; case 3: ; if (ldv_state_variable_150 == 2) { simple_attr_read(i915_ring_stop_fops_group2, ldvarg498, ldvarg497, ldvarg499); ldv_state_variable_150 = 2; } else { } goto ldv_51822; case 4: ; if (ldv_state_variable_150 == 2) { generic_file_llseek(i915_ring_stop_fops_group2, ldvarg496, ldvarg495); ldv_state_variable_150 = 2; } else { } goto ldv_51822; default: ldv_stop(); } ldv_51822: ; return; } } bool ldv_queue_work_on_139(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_140(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_141(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_142(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_143(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_flush_delayed_work_144(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_145(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_146(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_147(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___6 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_148(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___7 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_149(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___8 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_is_err(void const *ptr ) ; void *ldv_err_ptr(long error ) ; long ldv_ptr_err(void const *ptr ) ; __inline static void INIT_HLIST_NODE(struct hlist_node *h ) { { h->next = (struct hlist_node *)0; h->pprev = (struct hlist_node **)0; return; } } __inline static int hlist_unhashed(struct hlist_node const *h ) { { return ((unsigned long )h->pprev == (unsigned long )((struct hlist_node **/* const */)0)); } } __inline static int hlist_empty(struct hlist_head const *h ) { { return ((unsigned long )h->first == (unsigned long )((struct hlist_node */* const */)0)); } } __inline static void __hlist_del(struct hlist_node *n ) { struct hlist_node *next ; struct hlist_node **pprev ; { next = n->next; pprev = n->pprev; *pprev = next; if ((unsigned long )next != (unsigned long )((struct hlist_node *)0)) { next->pprev = pprev; } else { } return; } } __inline static void hlist_del_init(struct hlist_node *n ) { int tmp ; { tmp = hlist_unhashed((struct hlist_node const *)n); if (tmp == 0) { __hlist_del(n); INIT_HLIST_NODE(n); } else { } return; } } __inline static void hlist_add_head(struct hlist_node *n , struct hlist_head *h ) { struct hlist_node *first ; { first = h->first; n->next = first; if ((unsigned long )first != (unsigned long )((struct hlist_node *)0)) { first->pprev = & n->next; } else { } h->first = n; n->pprev = & h->first; return; } } __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; bool ldv_queue_work_on_165(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_167(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_166(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_169(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_168(struct workqueue_struct *ldv_func_arg1 ) ; extern void kvfree(void const * ) ; __inline static struct page *sg_page___0(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_26144: ; goto ldv_26144; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_26145: ; goto ldv_26145; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } extern bool __sg_page_iter_next(struct sg_page_iter * ) ; extern void __sg_page_iter_start(struct sg_page_iter * , struct scatterlist * , unsigned int , unsigned long ) ; __inline static struct page *sg_page_iter_page(struct sg_page_iter *piter ) { struct page *tmp ; { tmp = sg_page___0(piter->sg); return ((struct page *)-24189255811072L + ((unsigned long )(((long )tmp + 24189255811072L) / 64L) + (unsigned long )piter->sg_pgoffset)); } } extern void *__vmalloc(unsigned long , gfp_t , pgprot_t ) ; extern void *vmap(struct page ** , unsigned int , unsigned long , pgprot_t ) ; extern void vunmap(void const * ) ; __inline static void *drm_malloc_ab(size_t nmemb , size_t size ) { void *tmp ; pgprot_t __constr_expr_0___0 ; void *tmp___0 ; { if (size != 0UL && 0xffffffffffffffffUL / size < nmemb) { return ((void *)0); } else { } if (size * nmemb <= 4096UL) { tmp = kmalloc(nmemb * size, 208U); return (tmp); } else { } __constr_expr_0___0.pgprot = 0x8000000000000163UL; tmp___0 = __vmalloc(size * nmemb, 210U, __constr_expr_0___0); return (tmp___0); } } __inline static void drm_free_large(void *ptr ) { { kvfree((void const *)ptr); return; } } extern void drm_clflush_virt_range(void * , unsigned long ) ; __inline static u32 hash_32(u32 val , unsigned int bits ) { u32 hash ; { hash = val * 2654404609U; return (hash >> (int )(32U - bits)); } } __inline static void __hash_init(struct hlist_head *ht , unsigned int sz ) { unsigned int i ; { i = 0U; goto ldv_40449; ldv_40448: (ht + (unsigned long )i)->first = (struct hlist_node *)0; i = i + 1U; ldv_40449: ; if (i < sz) { goto ldv_40448; } else { } return; } } __inline static bool __hash_empty(struct hlist_head *ht , unsigned int sz ) { unsigned int i ; int tmp ; { i = 0U; goto ldv_40460; ldv_40459: tmp = hlist_empty((struct hlist_head const *)ht + (unsigned long )i); if (tmp == 0) { return (0); } else { } i = i + 1U; ldv_40460: ; if (i < sz) { goto ldv_40459; } else { } return (1); } } __inline static void hash_del(struct hlist_node *node ) { { hlist_del_init(node); return; } } int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj , int *needs_clflush ) ; __inline static void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj ) { long tmp ; { tmp = ldv__builtin_expect(obj->pages_pin_count == 0, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h"), "i" (2778), "i" (12UL)); ldv_47345: ; goto ldv_47345; } else { } obj->pages_pin_count = obj->pages_pin_count - 1; return; } } int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj , bool write ) ; int i915_cmd_parser_get_version(void) ; int i915_cmd_parser_init_ring(struct intel_engine_cs *ring ) ; void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring ) ; bool i915_needs_cmd_parser(struct intel_engine_cs *ring ) ; int i915_parse_cmds(struct intel_engine_cs *ring , struct drm_i915_gem_object *batch_obj , struct drm_i915_gem_object *shadow_batch_obj , u32 batch_start_offset , u32 batch_len , bool is_master ) ; static struct drm_i915_cmd_descriptor const common_cmds[12U] = { {3U, {0U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {5U, {16777216U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {33U, {25165824U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {41943040U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {58720256U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {92274688U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {4U, {184549377U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {4U, {276824065U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {8U, {285212673U, 4286578688U}, {255U}, {1U, 8388604U, 2U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {24U, {301989889U, 4286578688U}, {255U}, {1U, 8388604U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}, {24U, {343932928U, 4286578688U}, {255U}, {1U, 8388604U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}, {2U, {411041792U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}}; static struct drm_i915_cmd_descriptor const render_cmds[20U] = { {3U, {33554432U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {5U, {67108864U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {100663296U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {109051904U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {4U, {167772162U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {117440512U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {4U, {201326592U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {209715200U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {268435457U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}, {4U, {293601280U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {327155712U, 4286578688U}, {1023U}, {0U, 0U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}, {16U, {335544320U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{1U, 1U, 0U, 0U, 0U}}}, {16U, {452984832U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}, {3U, {1745551360U, 4294901760U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {1761869824U, 4294901760U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {1879048192U, 4294901760U}, {65535U}, {0U, 0U, 0U}, {{2U, 24U, 0U, 0U, 0U}}}, {2U, {1896087552U, 4294901760U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {1896153088U, 4294901760U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {2031550464U, 4294901760U}, {511U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {2046820355U, 4294901760U}, {255U}, {0U, 0U, 0U}, {{1U, 8388864U, 0U, 0U, 0U}, {1U, 18874368U, 0U, 1U, 49152U}}}}; static struct drm_i915_cmd_descriptor const hsw_render_cmds[18U] = { {3U, {8388608U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {50331648U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {75497472U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {117440512U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {125829120U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {32U, {150994944U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {4U, {159383552U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {4U, {352321536U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {360710144U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {369098752U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {377487360U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {2017001472U, 4294901760U}, {2047U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {2017067008U, 4294901760U}, {2047U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {2017656832U, 4294901760U}, {511U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {2017722368U, 4294901760U}, {511U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {2017787904U, 4294901760U}, {511U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {2017853440U, 4294901760U}, {511U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {2017918976U, 4294901760U}, {511U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}}; static struct drm_i915_cmd_descriptor const video_cmds[7U] = { {5U, {67108864U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {117440512U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {268435457U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}, {4U, {293601280U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {318767105U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 256U, 0U, 0U, 0U}, {1U, 4U, 0U, 0U, 49152U}, {0U, 2097152U, 0U, 0U, 49152U}}}, {16U, {452984832U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}, {3U, {1744830464U, 4294901760U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}}; static struct drm_i915_cmd_descriptor const vecs_cmds[6U] = { {5U, {67108864U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {3U, {117440512U, 4286578688U}, {1U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {268435457U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}, {4U, {293601280U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {318767105U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 256U, 0U, 0U, 0U}, {1U, 4U, 0U, 0U, 49152U}, {0U, 2097152U, 0U, 0U, 49152U}}}, {16U, {452984832U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}}; static struct drm_i915_cmd_descriptor const blt_cmds[6U] = { {4U, {167772162U, 4286578688U}, {255U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {268435457U, 4286578688U}, {1023U}, {0U, 0U, 0U}, {{0U, 4194304U, 0U, 0U, 0U}}}, {4U, {293601280U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {16U, {318767105U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 256U, 0U, 0U, 0U}, {1U, 4U, 0U, 0U, 49152U}, {0U, 2097152U, 0U, 0U, 49152U}}}, {2U, {1342177280U, 4290772992U}, {63U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {2U, {1354760192U, 4290772992U}, {63U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}}; static struct drm_i915_cmd_descriptor const hsw_blt_cmds[2U] = { {32U, {150994944U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}, {4U, {159383552U, 4286578688U}, {63U}, {0U, 0U, 0U}, {{0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U, 0U}}}}; static struct drm_i915_cmd_table const gen7_render_cmds[2U] = { {(struct drm_i915_cmd_descriptor const *)(& common_cmds), 12}, {(struct drm_i915_cmd_descriptor const *)(& render_cmds), 20}}; static struct drm_i915_cmd_table const hsw_render_ring_cmds[3U] = { {(struct drm_i915_cmd_descriptor const *)(& common_cmds), 12}, {(struct drm_i915_cmd_descriptor const *)(& render_cmds), 20}, {(struct drm_i915_cmd_descriptor const *)(& hsw_render_cmds), 18}}; static struct drm_i915_cmd_table const gen7_video_cmds[2U] = { {(struct drm_i915_cmd_descriptor const *)(& common_cmds), 12}, {(struct drm_i915_cmd_descriptor const *)(& video_cmds), 7}}; static struct drm_i915_cmd_table const hsw_vebox_cmds[2U] = { {(struct drm_i915_cmd_descriptor const *)(& common_cmds), 12}, {(struct drm_i915_cmd_descriptor const *)(& vecs_cmds), 6}}; static struct drm_i915_cmd_table const gen7_blt_cmds[2U] = { {(struct drm_i915_cmd_descriptor const *)(& common_cmds), 12}, {(struct drm_i915_cmd_descriptor const *)(& blt_cmds), 6}}; static struct drm_i915_cmd_table const hsw_blt_ring_cmds[3U] = { {(struct drm_i915_cmd_descriptor const *)(& common_cmds), 12}, {(struct drm_i915_cmd_descriptor const *)(& blt_cmds), 6}, {(struct drm_i915_cmd_descriptor const *)(& hsw_blt_cmds), 2}}; static struct drm_i915_reg_descriptor const gen7_render_regs[60U] = { {8848U, 0U, 0U}, {8852U, 0U, 0U}, {8960U, 0U, 0U}, {8964U, 0U, 0U}, {8968U, 0U, 0U}, {8972U, 0U, 0U}, {8976U, 0U, 0U}, {8980U, 0U, 0U}, {8984U, 0U, 0U}, {8988U, 0U, 0U}, {8992U, 0U, 0U}, {8996U, 0U, 0U}, {9000U, 0U, 0U}, {9004U, 0U, 0U}, {9008U, 0U, 0U}, {9012U, 0U, 0U}, {9016U, 0U, 0U}, {9020U, 0U, 0U}, {9024U, 0U, 0U}, {9028U, 0U, 0U}, {9032U, 0U, 0U}, {9036U, 0U, 0U}, {9040U, 0U, 0U}, {9044U, 0U, 0U}, {9056U, 0U, 0U}, {9216U, 0U, 0U}, {9220U, 0U, 0U}, {9224U, 0U, 0U}, {9228U, 0U, 0U}, {9248U, 0U, 0U}, {9264U, 0U, 0U}, {9268U, 0U, 0U}, {9272U, 0U, 0U}, {9276U, 0U, 0U}, {9280U, 0U, 0U}, {20992U, 0U, 0U}, {20996U, 0U, 0U}, {21000U, 0U, 0U}, {21004U, 0U, 0U}, {21008U, 0U, 0U}, {21012U, 0U, 0U}, {21016U, 0U, 0U}, {21020U, 0U, 0U}, {21056U, 0U, 0U}, {21060U, 0U, 0U}, {21064U, 0U, 0U}, {21068U, 0U, 0U}, {21072U, 0U, 0U}, {21076U, 0U, 0U}, {21080U, 0U, 0U}, {21084U, 0U, 0U}, {21120U, 0U, 0U}, {21124U, 0U, 0U}, {21128U, 0U, 0U}, {21132U, 0U, 0U}, {45072U, 0U, 0U}, {45088U, 0U, 0U}, {45092U, 0U, 0U}, {45112U, 4160749567U, 0U}, {58524U, 4290772927U, 0U}}; static struct drm_i915_reg_descriptor const gen7_blt_regs[1U] = { {139776U, 0U, 0U}}; static struct drm_i915_reg_descriptor const ivb_master_regs[5U] = { {41352U, 0U, 0U}, {278608U, 0U, 0U}, {458856U, 0U, 0U}, {462952U, 0U, 0U}, {467048U, 0U, 0U}}; static struct drm_i915_reg_descriptor const hsw_master_regs[2U] = { {41352U, 0U, 0U}, {278608U, 0U, 0U}}; static u32 gen7_render_get_cmd_length_mask(u32 cmd_header ) { u32 client ; u32 subclient ; long tmp ; { client = cmd_header >> 29; subclient = (cmd_header & 402653184U) >> 27; if (client == 0U) { return (63U); } else if (client == 3U) { if (subclient == 2U) { return (65535U); } else { return (255U); } } else { } tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen7_render_get_cmd_length_mask", "CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header); } else { } return (0U); } } static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header ) { u32 client ; u32 subclient ; u32 op ; long tmp ; { client = cmd_header >> 29; subclient = (cmd_header & 402653184U) >> 27; op = (cmd_header & 117440512U) >> 24; if (client == 0U) { return (63U); } else if (client == 3U) { if (subclient == 2U) { if (op == 6U) { return (65535U); } else { return (4095U); } } else { return (255U); } } else { } tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen7_bsd_get_cmd_length_mask", "CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header); } else { } return (0U); } } static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header ) { u32 client ; long tmp ; { client = cmd_header >> 29; if (client == 0U) { return (63U); } else if (client == 2U) { return (255U); } else { } tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen7_blt_get_cmd_length_mask", "CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); } else { } return (0U); } } static bool validate_cmds_sorted(struct intel_engine_cs *ring , struct drm_i915_cmd_table const *cmd_tables , int cmd_table_count ) { int i ; bool ret ; struct drm_i915_cmd_table const *table ; u32 previous ; int j ; struct drm_i915_cmd_descriptor const *desc ; u32 curr ; { ret = 1; if ((unsigned long )cmd_tables == (unsigned long )((struct drm_i915_cmd_table const *)0) || cmd_table_count == 0) { return (1); } else { } i = 0; goto ldv_48059; ldv_48058: table = cmd_tables + (unsigned long )i; previous = 0U; j = 0; goto ldv_48056; ldv_48055: desc = table->table + (unsigned long )i; curr = (unsigned int )desc->cmd.value & (unsigned int )desc->cmd.mask; if (curr < previous) { drm_err("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n", (unsigned int )ring->id, i, j, curr, previous); ret = 0; } else { } previous = curr; j = j + 1; ldv_48056: ; if ((int )table->count > j) { goto ldv_48055; } else { } i = i + 1; ldv_48059: ; if (i < cmd_table_count) { goto ldv_48058; } else { } return (ret); } } static bool check_sorted(int ring_id , struct drm_i915_reg_descriptor const *reg_table , int reg_count ) { int i ; u32 previous ; bool ret ; u32 curr ; { previous = 0U; ret = 1; i = 0; goto ldv_48071; ldv_48070: curr = (reg_table + (unsigned long )i)->addr; if (curr < previous) { drm_err("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", ring_id, i, curr, previous); ret = 0; } else { } previous = curr; i = i + 1; ldv_48071: ; if (i < reg_count) { goto ldv_48070; } else { } return (ret); } } static bool validate_regs_sorted(struct intel_engine_cs *ring ) { bool tmp ; bool tmp___0 ; int tmp___1 ; { tmp = check_sorted((int )ring->id, ring->reg_table, ring->reg_count); if ((int )tmp) { tmp___0 = check_sorted((int )ring->id, ring->master_reg_table, ring->master_reg_count); if ((int )tmp___0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } return ((bool )tmp___1); } } static int init_hash_table(struct intel_engine_cs *ring , struct drm_i915_cmd_table const *cmd_tables , int cmd_table_count ) { int i ; int j ; struct drm_i915_cmd_table const *table ; struct drm_i915_cmd_descriptor const *desc ; struct cmd_node *desc_node ; void *tmp ; u32 tmp___0 ; { __hash_init((struct hlist_head *)(& ring->cmd_hash), 512U); i = 0; goto ldv_48371; ldv_48370: table = cmd_tables + (unsigned long )i; j = 0; goto ldv_48368; ldv_48367: desc = table->table + (unsigned long )j; tmp = kmalloc(24UL, 208U); desc_node = (struct cmd_node *)tmp; if ((unsigned long )desc_node == (unsigned long )((struct cmd_node *)0)) { return (-12); } else { } desc_node->desc = desc; tmp___0 = hash_32((unsigned int )desc->cmd.value & 4286578688U, 9U); hlist_add_head(& desc_node->node, (struct hlist_head *)(& ring->cmd_hash) + (u64 )tmp___0); j = j + 1; ldv_48368: ; if ((int )table->count > j) { goto ldv_48367; } else { } i = i + 1; ldv_48371: ; if (i < cmd_table_count) { goto ldv_48370; } else { } return (0); } } static void fini_hash_table(struct intel_engine_cs *ring ) { struct hlist_node *tmp ; struct cmd_node *desc_node ; int i ; struct hlist_node *____ptr ; struct hlist_node const *__mptr ; struct cmd_node *tmp___0 ; struct hlist_node *____ptr___0 ; struct hlist_node const *__mptr___0 ; struct cmd_node *tmp___1 ; { i = 0; desc_node = (struct cmd_node *)0; goto ldv_48394; ldv_48393: ____ptr = ((struct hlist_head *)(& ring->cmd_hash) + (unsigned long )i)->first; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___0 = (struct cmd_node *)__mptr + 0xfffffffffffffff8UL; } else { tmp___0 = (struct cmd_node *)0; } desc_node = tmp___0; goto ldv_48391; ldv_48390: hash_del(& desc_node->node); kfree((void const *)desc_node); ____ptr___0 = tmp; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___1 = (struct cmd_node *)__mptr___0 + 0xfffffffffffffff8UL; } else { tmp___1 = (struct cmd_node *)0; } desc_node = tmp___1; ldv_48391: ; if ((unsigned long )desc_node != (unsigned long )((struct cmd_node *)0)) { tmp = desc_node->node.next; goto ldv_48390; } else { } i = i + 1; ldv_48394: ; if ((unsigned long )desc_node == (unsigned long )((struct cmd_node *)0) && (unsigned int )i <= 511U) { goto ldv_48393; } else { } return; } } int i915_cmd_parser_init_ring(struct intel_engine_cs *ring ) { struct drm_i915_cmd_table const *cmd_tables ; int cmd_table_count ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; bool tmp ; int tmp___0 ; long tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; int __ret_warn_on ; bool tmp___5 ; int tmp___6 ; long tmp___7 ; { __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) != 7U) { return (0); } else { } switch ((unsigned int )ring->id) { case 0U: __p___0 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { cmd_tables = (struct drm_i915_cmd_table const *)(& hsw_render_ring_cmds); cmd_table_count = 3; } else { cmd_tables = (struct drm_i915_cmd_table const *)(& gen7_render_cmds); cmd_table_count = 2; } ring->reg_table = (struct drm_i915_reg_descriptor const *)(& gen7_render_regs); ring->reg_count = 60; __p___1 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { ring->master_reg_table = (struct drm_i915_reg_descriptor const *)(& hsw_master_regs); ring->master_reg_count = 2; } else { ring->master_reg_table = (struct drm_i915_reg_descriptor const *)(& ivb_master_regs); ring->master_reg_count = 5; } ring->get_cmd_length_mask = & gen7_render_get_cmd_length_mask; goto ldv_48431; case 1U: cmd_tables = (struct drm_i915_cmd_table const *)(& gen7_video_cmds); cmd_table_count = 2; ring->get_cmd_length_mask = & gen7_bsd_get_cmd_length_mask; goto ldv_48431; case 2U: __p___2 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { cmd_tables = (struct drm_i915_cmd_table const *)(& hsw_blt_ring_cmds); cmd_table_count = 3; } else { cmd_tables = (struct drm_i915_cmd_table const *)(& gen7_blt_cmds); cmd_table_count = 2; } ring->reg_table = (struct drm_i915_reg_descriptor const *)(& gen7_blt_regs); ring->reg_count = 1; __p___3 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { ring->master_reg_table = (struct drm_i915_reg_descriptor const *)(& hsw_master_regs); ring->master_reg_count = 2; } else { ring->master_reg_table = (struct drm_i915_reg_descriptor const *)(& ivb_master_regs); ring->master_reg_count = 5; } ring->get_cmd_length_mask = & gen7_blt_get_cmd_length_mask; goto ldv_48431; case 3U: cmd_tables = (struct drm_i915_cmd_table const *)(& hsw_vebox_cmds); cmd_table_count = 2; ring->get_cmd_length_mask = & gen7_bsd_get_cmd_length_mask; goto ldv_48431; default: drm_err("CMD: cmd_parser_init with unknown ring: %d\n", (unsigned int )ring->id); __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_cmd_parser.c"), "i" (754), "i" (12UL)); ldv_48462: ; goto ldv_48462; } ldv_48431: tmp = validate_cmds_sorted(ring, cmd_tables, cmd_table_count); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } tmp___1 = ldv__builtin_expect((long )tmp___0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_cmd_parser.c"), "i" (757), "i" (12UL)); ldv_48463: ; goto ldv_48463; } else { } tmp___2 = validate_regs_sorted(ring); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } tmp___4 = ldv__builtin_expect((long )tmp___3, 0L); if (tmp___4 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_cmd_parser.c"), "i" (758), "i" (12UL)); ldv_48464: ; goto ldv_48464; } else { } tmp___5 = __hash_empty((struct hlist_head *)(& ring->cmd_hash), 512U); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } __ret_warn_on = tmp___6; tmp___7 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___7 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_cmd_parser.c", 760, "WARN_ON(!hash_empty(ring->cmd_hash))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = init_hash_table(ring, cmd_tables, cmd_table_count); if (ret != 0) { drm_err("CMD: cmd_parser_init failed!\n"); fini_hash_table(ring); return (ret); } else { } ring->needs_cmd_parser = 1; return (0); } } void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring ) { { if (! ring->needs_cmd_parser) { return; } else { } fini_hash_table(ring); return; } } static struct drm_i915_cmd_descriptor const *find_cmd_in_table(struct intel_engine_cs *ring , u32 cmd_header ) { struct cmd_node *desc_node ; struct hlist_node *____ptr ; u32 tmp ; struct hlist_node const *__mptr ; struct cmd_node *tmp___0 ; struct drm_i915_cmd_descriptor const *desc ; u32 masked_cmd ; u32 masked_value ; struct hlist_node *____ptr___0 ; struct hlist_node const *__mptr___0 ; struct cmd_node *tmp___1 ; { tmp = hash_32(cmd_header & 4286578688U, 9U); ____ptr = ((struct hlist_head *)(& ring->cmd_hash) + (u64 )tmp)->first; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___0 = (struct cmd_node *)__mptr + 0xfffffffffffffff8UL; } else { tmp___0 = (struct cmd_node *)0; } desc_node = tmp___0; goto ldv_49041; ldv_49040: desc = desc_node->desc; masked_cmd = (unsigned int )desc->cmd.mask & cmd_header; masked_value = (unsigned int )desc->cmd.value & (unsigned int )desc->cmd.mask; if (masked_cmd == masked_value) { return (desc); } else { } ____ptr___0 = desc_node->node.next; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___1 = (struct cmd_node *)__mptr___0 + 0xfffffffffffffff8UL; } else { tmp___1 = (struct cmd_node *)0; } desc_node = tmp___1; ldv_49041: ; if ((unsigned long )desc_node != (unsigned long )((struct cmd_node *)0)) { goto ldv_49040; } else { } return ((struct drm_i915_cmd_descriptor const *)0); } } static struct drm_i915_cmd_descriptor const *find_cmd(struct intel_engine_cs *ring , u32 cmd_header , struct drm_i915_cmd_descriptor *default_desc ) { struct drm_i915_cmd_descriptor const *desc ; u32 mask ; long tmp ; { desc = find_cmd_in_table(ring, cmd_header); if ((unsigned long )desc != (unsigned long )((struct drm_i915_cmd_descriptor const *)0)) { return (desc); } else { } mask = (*(ring->get_cmd_length_mask))(cmd_header); if (mask == 0U) { return ((struct drm_i915_cmd_descriptor const *)0); } else { } tmp = ldv__builtin_expect((unsigned long )default_desc == (unsigned long )((struct drm_i915_cmd_descriptor *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_cmd_parser.c"), "i" (832), "i" (12UL)); ldv_49050: ; goto ldv_49050; } else { } default_desc->flags = 2U; default_desc->length.mask = mask; return ((struct drm_i915_cmd_descriptor const *)default_desc); } } static struct drm_i915_reg_descriptor const *find_reg(struct drm_i915_reg_descriptor const *table , int count , u32 addr ) { int i ; { if ((unsigned long )table != (unsigned long )((struct drm_i915_reg_descriptor const *)0)) { i = 0; goto ldv_49058; ldv_49057: ; if ((unsigned int )(table + (unsigned long )i)->addr == addr) { return (table + (unsigned long )i); } else { } i = i + 1; ldv_49058: ; if (i < count) { goto ldv_49057; } else { } } else { } return ((struct drm_i915_reg_descriptor const *)0); } } static u32 *vmap_batch(struct drm_i915_gem_object *obj , unsigned int start , unsigned int len ) { int i ; void *addr ; struct sg_page_iter sg_iter ; int first_page ; int last_page ; int npages ; struct page **pages ; void *tmp ; long tmp___0 ; int tmp___1 ; bool tmp___2 ; pgprot_t __constr_expr_0___0 ; long tmp___3 ; { addr = (void *)0; first_page = (int )(start >> 12); last_page = (int )(((len + start) + 4095U) >> 12); npages = last_page - first_page; tmp = drm_malloc_ab((size_t )npages, 8UL); pages = (struct page **)tmp; if ((unsigned long )pages == (unsigned long )((struct page **)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("vmap_batch", "Failed to get space for pages\n"); } else { } goto finish; } else { } i = 0; __sg_page_iter_start(& sg_iter, (obj->pages)->sgl, (obj->pages)->nents, (unsigned long )first_page); goto ldv_49076; ldv_49075: tmp___1 = i; i = i + 1; *(pages + (unsigned long )tmp___1) = sg_page_iter_page(& sg_iter); if (i == npages) { goto ldv_49074; } else { } ldv_49076: tmp___2 = __sg_page_iter_next(& sg_iter); if ((int )tmp___2) { goto ldv_49075; } else { } ldv_49074: __constr_expr_0___0.pgprot = 0x8000000000000163UL; addr = vmap(pages, (unsigned int )i, 0UL, __constr_expr_0___0); if ((unsigned long )addr == (unsigned long )((void *)0)) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("vmap_batch", "Failed to vmap pages\n"); } else { } goto finish; } else { } finish: ; if ((unsigned long )pages != (unsigned long )((struct page **)0)) { drm_free_large((void *)pages); } else { } return ((u32 *)addr); } } static u32 *copy_batch(struct drm_i915_gem_object *dest_obj , struct drm_i915_gem_object *src_obj , u32 batch_start_offset , u32 batch_len ) { int needs_clflush ; void *src_base ; void *src ; void *dst ; int ret ; void *tmp ; void *tmp___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; long tmp___3 ; void *tmp___4 ; u32 *tmp___5 ; long tmp___6 ; long tmp___7 ; u32 *tmp___8 ; long tmp___9 ; void *tmp___10 ; void *tmp___11 ; { needs_clflush = 0; dst = (void *)0; if ((size_t )batch_len > dest_obj->base.size || (size_t )(batch_len + batch_start_offset) > src_obj->base.size) { tmp = ERR_PTR(-7L); return ((u32 *)tmp); } else { } __ret_warn_on = dest_obj->pages_pin_count == 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_cmd_parser.c", 906, "WARN_ON(dest_obj->pages_pin_count == 0)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { tmp___0 = ERR_PTR(-19L); return ((u32 *)tmp___0); } else { } ret = i915_gem_obj_prepare_shmem_read(src_obj, & needs_clflush); if (ret != 0) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("copy_batch", "CMD: failed to prepare shadow batch\n"); } else { } tmp___4 = ERR_PTR((long )ret); return ((u32 *)tmp___4); } else { } tmp___5 = vmap_batch(src_obj, batch_start_offset, batch_len); src_base = (void *)tmp___5; if ((unsigned long )src_base == (unsigned long )((void *)0)) { tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("copy_batch", "CMD: Failed to vmap batch\n"); } else { } ret = -12; goto unpin_src; } else { } ret = i915_gem_object_set_to_cpu_domain(dest_obj, 1); if (ret != 0) { tmp___7 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("copy_batch", "CMD: Failed to set shadow batch to CPU\n"); } else { } goto unmap_src; } else { } tmp___8 = vmap_batch(dest_obj, 0U, batch_len); dst = (void *)tmp___8; if ((unsigned long )dst == (unsigned long )((void *)0)) { tmp___9 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("copy_batch", "CMD: Failed to vmap shadow batch\n"); } else { } ret = -12; goto unmap_src; } else { } src = src_base + ((unsigned long )batch_start_offset & 4095UL); if (needs_clflush != 0) { drm_clflush_virt_range(src, (unsigned long )batch_len); } else { } memcpy(dst, (void const *)src, (size_t )batch_len); unmap_src: vunmap((void const *)src_base); unpin_src: i915_gem_object_unpin_pages(src_obj); if (ret != 0) { tmp___10 = ERR_PTR((long )ret); tmp___11 = tmp___10; } else { tmp___11 = dst; } return ((u32 *)tmp___11); } } bool i915_needs_cmd_parser(struct intel_engine_cs *ring ) { { if (! ring->needs_cmd_parser) { return (0); } else { } if (i915.enable_ppgtt == 0) { return (0); } else { } return (i915.enable_cmd_parser == 1); } } static bool check_cmd(struct intel_engine_cs const *ring , struct drm_i915_cmd_descriptor const *desc , u32 const *cmd , u32 length , bool const is_master , bool *oacontrol_set ) { long tmp ; long tmp___0 ; u32 step ; u32 offset ; u32 reg_addr ; struct drm_i915_reg_descriptor const *reg ; struct drm_i915_reg_descriptor const *tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; int i ; u32 dword ; u32 offset___0 ; u32 condition ; long tmp___6 ; { if (((unsigned int )desc->flags & 4U) != 0U) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("check_cmd", "CMD: Rejected command: 0x%08X\n", *cmd); } else { } return (0); } else { } if (((unsigned int )desc->flags & 32U) != 0U && ! ((_Bool )is_master)) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("check_cmd", "CMD: Rejected master-only command: 0x%08X\n", *cmd); } else { } return (0); } else { } if (((unsigned int )desc->flags & 8U) != 0U) { step = (unsigned int )desc->reg.step != 0U ? desc->reg.step : (unsigned int const )length; offset = desc->reg.offset; goto ldv_49111; ldv_49110: reg_addr = (unsigned int )*(cmd + (unsigned long )offset) & (unsigned int )desc->reg.mask; tmp___1 = find_reg(ring->reg_table, ring->reg_count, reg_addr); reg = tmp___1; if ((unsigned long )reg == (unsigned long )((struct drm_i915_reg_descriptor const *)0) && (int )is_master) { reg = find_reg(ring->master_reg_table, ring->master_reg_count, reg_addr); } else { } if ((unsigned long )reg == (unsigned long )((struct drm_i915_reg_descriptor const *)0)) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("check_cmd", "CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n", reg_addr, *cmd, (unsigned int )ring->id); } else { } return (0); } else { } if (reg_addr == 9056U) { if ((unsigned int )desc->cmd.value == 343932928U) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("check_cmd", "CMD: Rejected LRM to OACONTROL\n"); } else { } return (0); } else { } if ((unsigned int )desc->cmd.value == 285212673U) { *oacontrol_set = (unsigned int )*(cmd + (unsigned long )(offset + 1U)) != 0U; } else { } } else { } if ((unsigned int )reg->mask != 0U) { if ((unsigned int )desc->cmd.value == 343932928U) { tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("check_cmd", "CMD: Rejected LRM to masked register 0x%08X\n", reg_addr); } else { } return (0); } else { } if ((unsigned int )desc->cmd.value == 285212673U && (offset + 2U > length || ((unsigned int )*(cmd + (unsigned long )(offset + 1U)) & (unsigned int )reg->mask) != (unsigned int )reg->value)) { tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("check_cmd", "CMD: Rejected LRI to masked register 0x%08X\n", reg_addr); } else { } return (0); } else { } } else { } offset = offset + step; ldv_49111: ; if (offset < length) { goto ldv_49110; } else { } } else { } if (((unsigned int )desc->flags & 16U) != 0U) { i = 0; goto ldv_49120; ldv_49119: ; if ((unsigned int )desc->bits[i].mask == 0U) { goto ldv_49115; } else { } if ((unsigned int )desc->bits[i].condition_mask != 0U) { offset___0 = desc->bits[i].condition_offset; condition = (unsigned int )*(cmd + (unsigned long )offset___0) & (unsigned int )desc->bits[i].condition_mask; if (condition == 0U) { goto ldv_49118; } else { } } else { } dword = (unsigned int )*(cmd + (unsigned long )desc->bits[i].offset) & (unsigned int )desc->bits[i].mask; if ((u32 )desc->bits[i].expected != dword) { tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("check_cmd", "CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n", *cmd, desc->bits[i].mask, desc->bits[i].expected, dword, (unsigned int )ring->id); } else { } return (0); } else { } ldv_49118: i = i + 1; ldv_49120: ; if (i <= 2) { goto ldv_49119; } else { } ldv_49115: ; } else { } return (1); } } int i915_parse_cmds(struct intel_engine_cs *ring , struct drm_i915_gem_object *batch_obj , struct drm_i915_gem_object *shadow_batch_obj , u32 batch_start_offset , u32 batch_len , bool is_master ) { u32 *cmd ; u32 *batch_base ; u32 *batch_end ; struct drm_i915_cmd_descriptor default_desc ; bool oacontrol_set ; int ret ; long tmp ; long tmp___0 ; bool tmp___1 ; struct drm_i915_cmd_descriptor const *desc ; u32 length ; long tmp___2 ; long tmp___3 ; bool tmp___4 ; int tmp___5 ; long tmp___6 ; long tmp___7 ; { default_desc.flags = 0U; default_desc.cmd.value = 0U; default_desc.cmd.mask = 0U; default_desc.length.fixed = 0U; default_desc.reg.offset = 0U; default_desc.reg.mask = 0U; default_desc.reg.step = 0U; default_desc.bits[0].offset = 0U; default_desc.bits[0].mask = 0U; default_desc.bits[0].expected = 0U; default_desc.bits[0].condition_offset = 0U; default_desc.bits[0].condition_mask = 0U; default_desc.bits[1].offset = 0U; default_desc.bits[1].mask = 0U; default_desc.bits[1].expected = 0U; default_desc.bits[1].condition_offset = 0U; default_desc.bits[1].condition_mask = 0U; default_desc.bits[2].offset = 0U; default_desc.bits[2].mask = 0U; default_desc.bits[2].expected = 0U; default_desc.bits[2].condition_offset = 0U; default_desc.bits[2].condition_mask = 0U; oacontrol_set = 0; ret = 0; batch_base = copy_batch(shadow_batch_obj, batch_obj, batch_start_offset, batch_len); tmp___1 = IS_ERR((void const *)batch_base); if ((int )tmp___1) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_parse_cmds", "CMD: Failed to copy batch\n"); } else { } tmp___0 = PTR_ERR((void const *)batch_base); return ((int )tmp___0); } else { } batch_end = batch_base + (unsigned long )(batch_len / 4U); cmd = batch_base; goto ldv_49140; ldv_49139: ; if (*cmd == 83886080U) { goto ldv_49138; } else { } desc = find_cmd(ring, *cmd, & default_desc); if ((unsigned long )desc == (unsigned long )((struct drm_i915_cmd_descriptor const *)0)) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("i915_parse_cmds", "CMD: Unrecognized command: 0x%08X\n", *cmd); } else { } ret = -22; goto ldv_49138; } else { } if ((unsigned int )desc->cmd.value == 411041792U) { ret = -13; goto ldv_49138; } else { } if ((int )desc->flags & 1) { length = desc->length.fixed; } else { length = (*cmd & (u32 )desc->length.mask) + 2U; } if (((long )batch_end - (long )cmd) / 4L < (long )length) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("i915_parse_cmds", "CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n", *cmd, length, ((long )batch_end - (long )cmd) / 4L); } else { } ret = -22; goto ldv_49138; } else { } tmp___4 = check_cmd((struct intel_engine_cs const *)ring, desc, (u32 const *)cmd, length, (int )is_master, & oacontrol_set); if (tmp___4) { tmp___5 = 0; } else { tmp___5 = 1; } if (tmp___5) { ret = -22; goto ldv_49138; } else { } cmd = cmd + (unsigned long )length; ldv_49140: ; if ((unsigned long )cmd < (unsigned long )batch_end) { goto ldv_49139; } else { } ldv_49138: ; if ((int )oacontrol_set) { tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("i915_parse_cmds", "CMD: batch set OACONTROL but did not clear it\n"); } else { } ret = -22; } else { } if ((unsigned long )cmd >= (unsigned long )batch_end) { tmp___7 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("i915_parse_cmds", "CMD: Got to the end of the buffer w/o a BBE cmd!\n"); } else { } ret = -22; } else { } vunmap((void const *)batch_base); return (ret); } } int i915_cmd_parser_get_version(void) { { return (3); } } __inline static void *ERR_PTR(long error ) { void *tmp ; { tmp = ldv_err_ptr(error); return (tmp); } } __inline static long PTR_ERR(void const *ptr ) { long tmp ; { tmp = ldv_ptr_err(ptr); return (tmp); } } __inline static bool IS_ERR(void const *ptr ) { bool tmp ; { tmp = ldv_is_err(ptr); return (tmp); } } bool ldv_queue_work_on_165(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_166(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_167(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_168(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_169(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static int fls(int x ) { int r ; { __asm__ ("bsrl %1,%0": "=r" (r): "rm" (x), "0" (-1)); return (r + 1); } } __inline static void list_add_tail(struct list_head *new , struct list_head *head ) { { __list_add(new, head->prev, head); return; } } extern void list_del(struct list_head * ) ; __inline static void list_move_tail(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add_tail(list, head); return; } } __inline static void *ERR_PTR(long error ) ; bool ldv_queue_work_on_179(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_181(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_180(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_183(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_182(struct workqueue_struct *ldv_func_arg1 ) ; __inline static int kref_sub___0(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___0(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___0(kref, 1U, release); return (tmp); } } void i915_gem_batch_pool_init(struct drm_device *dev , struct i915_gem_batch_pool *pool ) ; void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool ) ; struct drm_i915_gem_object *i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool , size_t size ) ; __inline static void drm_gem_object_unreference___0(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___0(& obj->refcount, & drm_gem_object_free); } else { } return; } } struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev , size_t size ) ; __inline static void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )obj->pages == (unsigned long )((struct sg_table *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h"), "i" (2773), "i" (12UL)); ldv_47341: ; goto ldv_47341; } else { } obj->pages_pin_count = obj->pages_pin_count + 1; return; } } void i915_gem_batch_pool_init(struct drm_device *dev , struct i915_gem_batch_pool *pool ) { int n ; { pool->dev = dev; n = 0; goto ldv_47985; ldv_47984: INIT_LIST_HEAD((struct list_head *)(& pool->cache_list) + (unsigned long )n); n = n + 1; ldv_47985: ; if ((unsigned int )n <= 3U) { goto ldv_47984; } else { } return; } } void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool ) { int n ; int __ret_warn_on ; int tmp ; long tmp___0 ; struct drm_i915_gem_object *obj ; struct list_head const *__mptr ; int tmp___1 ; { tmp = mutex_is_locked(& (pool->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_batch_pool.c", 68, "WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); n = 0; goto ldv_48002; ldv_48001: ; goto ldv_47999; ldv_47998: __mptr = (struct list_head const *)((struct list_head *)(& pool->cache_list) + (unsigned long )n)->next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffe78UL; list_del(& obj->batch_pool_link); drm_gem_object_unreference___0(& obj->base); ldv_47999: tmp___1 = list_empty((struct list_head const *)(& pool->cache_list) + (unsigned long )n); if (tmp___1 == 0) { goto ldv_47998; } else { } n = n + 1; ldv_48002: ; if ((unsigned int )n <= 3U) { goto ldv_48001; } else { } return; } } struct drm_i915_gem_object *i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool , size_t size ) { struct drm_i915_gem_object *obj ; struct drm_i915_gem_object *tmp ; struct drm_i915_gem_object *next ; struct list_head *list ; int n ; int __ret_warn_on ; int tmp___0 ; long tmp___1 ; int tmp___2 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int ret ; void *tmp___3 ; void *tmp___4 ; { obj = (struct drm_i915_gem_object *)0; tmp___0 = mutex_is_locked(& (pool->dev)->struct_mutex); __ret_warn_on = tmp___0 == 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_batch_pool.c", 105, "WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = fls((int )(size >> 12)); n = tmp___2 + -1; if ((unsigned int )n > 3U) { n = 3; } else { } list = (struct list_head *)(& pool->cache_list) + (unsigned long )n; __mptr = (struct list_head const *)list->next; tmp = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffe78UL; __mptr___0 = (struct list_head const *)tmp->batch_pool_link.next; next = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffe78UL; goto ldv_48028; ldv_48027: ; if ((unsigned int )*((unsigned char *)tmp + 408UL) != 0U) { goto ldv_48025; } else { } if ((unsigned int )*((unsigned char *)tmp + 409UL) == 32U) { list_del(& tmp->batch_pool_link); drm_gem_object_unreference___0(& tmp->base); goto ldv_48026; } else { } if (tmp->base.size >= size) { obj = tmp; goto ldv_48025; } else { } ldv_48026: tmp = next; __mptr___1 = (struct list_head const *)next->batch_pool_link.next; next = (struct drm_i915_gem_object *)__mptr___1 + 0xfffffffffffffe78UL; ldv_48028: ; if ((unsigned long )(& tmp->batch_pool_link) != (unsigned long )list) { goto ldv_48027; } else { } ldv_48025: ; if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { obj = i915_gem_alloc_object(pool->dev, size); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___3 = ERR_PTR(-12L); return ((struct drm_i915_gem_object *)tmp___3); } else { } ret = i915_gem_object_get_pages(obj); if (ret != 0) { tmp___4 = ERR_PTR((long )ret); return ((struct drm_i915_gem_object *)tmp___4); } else { } obj->madv = 1U; } else { } list_move_tail(& obj->batch_pool_link, list); i915_gem_object_pin_pages(obj); return (obj); } } bool ldv_queue_work_on_179(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_180(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_181(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_182(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_183(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_is_err_or_null(void const *ptr ) ; __inline static unsigned long arch_local_save_flags___3(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static bool IS_ERR_OR_NULL(void const *ptr ) ; __inline static void atomic_set(atomic_t *v , int i ) { { v->counter = i; return; } } __inline static bool static_key_false___1(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int rcu_read_lock_sched_held___1(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___3(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } bool ldv_queue_work_on_193(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_195(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_194(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_197(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_196(struct workqueue_struct *ldv_func_arg1 ) ; extern void iowrite32(u32 , void * ) ; extern void *idr_find_slowpath(struct idr * , int ) ; extern int idr_alloc(struct idr * , void * , int , int , gfp_t ) ; extern void idr_remove(struct idr * , int ) ; extern void idr_destroy(struct idr * ) ; extern void idr_init(struct idr * ) ; __inline static void *idr_find(struct idr *idr , int id ) { struct idr_layer *hint ; struct idr_layer *________p1 ; struct idr_layer *_________p1 ; union __anonunion___u_168 __u ; int tmp ; struct idr_layer *________p1___0 ; struct idr_layer *_________p1___0 ; union __anonunion___u_170 __u___0 ; int tmp___0 ; void *tmp___1 ; { __read_once_size((void const volatile *)(& idr->hint), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); hint = ________p1; if ((unsigned long )hint != (unsigned long )((struct idr_layer *)0) && (id & -256) == hint->prefix) { __read_once_size((void const volatile *)(& hint->ary) + ((unsigned long )id & 255UL), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___0 = debug_lockdep_rcu_enabled(); return ((void *)________p1___0); } else { } tmp___1 = idr_find_slowpath(idr, id); return (tmp___1); } } __inline static void kref_init(struct kref *kref ) { { atomic_set(& kref->refcount, 1); return; } } __inline static void kref_get___0(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___1(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___1(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___1(kref, 1U, release); return (tmp); } } extern bool capable(int ) ; __inline static unsigned int intel_ring_flag(struct intel_engine_cs *ring ) { { return ((unsigned int )(1 << (int )ring->id)); } } int intel_ring_begin(struct intel_engine_cs *ring , int num_dwords ) ; __inline static void intel_ring_emit(struct intel_engine_cs *ring , u32 data ) { struct intel_ringbuffer *ringbuf ; { ringbuf = ring->buffer; iowrite32(data, ringbuf->virtual_start + (unsigned long )ringbuf->tail); ringbuf->tail = ringbuf->tail + 4U; return; } } __inline static void intel_ring_advance(struct intel_engine_cs *ring ) { struct intel_ringbuffer *ringbuf ; { ringbuf = ring->buffer; ringbuf->tail = ringbuf->tail & (u32 )(ringbuf->size + -1); return; } } void intel_lr_context_free(struct intel_context *ctx ) ; void intel_lr_context_reset(struct drm_device *dev , struct intel_context *ctx ) ; void i915_ppgtt_release(struct kref *kref ) ; struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev , struct drm_i915_file_private *fpriv ) ; __inline static void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt ) { { if ((unsigned long )ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0)) { kref_put___1(& ppgtt->ref, & i915_ppgtt_release); } else { } return; } } __inline static void drm_gem_object_unreference___1(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___1(& obj->refcount, & drm_gem_object_free); } else { } return; } } struct tracepoint __tracepoint_i915_context_create ; __inline static void trace_i915_context_create(struct intel_context *ctx ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_428 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_430 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___1(& __tracepoint_i915_context_create.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_context_create.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___1(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 740, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46896: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct intel_context * ))it_func))(__data, ctx); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46896; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_context_create.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___1(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 740, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_context_free ; __inline static void trace_i915_context_free(struct intel_context *ctx ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_432 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_434 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___1(& __tracepoint_i915_context_free.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_context_free.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___1(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 745, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46947: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct intel_context * ))it_func))(__data, ctx); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46947; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_context_free.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___1(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 745, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_switch_mm ; __inline static void trace_switch_mm(struct intel_engine_cs *ring , struct intel_context *to ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_436 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_438 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___1(& __tracepoint_switch_mm.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_switch_mm.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___1(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 775, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_47000: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct intel_engine_cs * , struct intel_context * ))it_func))(__data, ring, to); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_47000; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_switch_mm.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___1(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 775, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } int i915_gem_object_pin(struct drm_i915_gem_object *obj , struct i915_address_space *vm , uint32_t alignment , uint64_t flags ) ; void i915_vma_move_to_active(struct i915_vma *vma , struct intel_engine_cs *ring ) ; int i915_gem_l3_remap(struct intel_engine_cs *ring , int slice ) ; int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj , bool write ) ; int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj , enum i915_cache_level cache_level ) ; __inline static int i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj , uint32_t alignment , unsigned int flags ) { int tmp ; { tmp = i915_gem_object_pin(obj, & ((struct drm_i915_private *)(obj->base.dev)->dev_private)->gtt.base, alignment, (uint64_t )(flags | 4U)); return (tmp); } } void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) ; __inline static void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj ) { { i915_gem_object_ggtt_unpin_view(obj, & i915_ggtt_view_normal); return; } } int i915_gem_context_init(struct drm_device *dev ) ; void i915_gem_context_fini(struct drm_device *dev ) ; void i915_gem_context_reset(struct drm_device *dev ) ; int i915_gem_context_open(struct drm_device *dev , struct drm_file *file ) ; int i915_gem_context_enable(struct drm_i915_private *dev_priv ) ; void i915_gem_context_close(struct drm_device *dev , struct drm_file *file ) ; int i915_switch_context(struct intel_engine_cs *ring , struct intel_context *to ) ; struct intel_context *i915_gem_context_get(struct drm_i915_file_private *file_priv , u32 id ) ; void i915_gem_context_free(struct kref *ctx_ref ) ; struct drm_i915_gem_object *i915_gem_alloc_context_obj(struct drm_device *dev , size_t size ) ; __inline static void i915_gem_context_reference(struct intel_context *ctx ) { { kref_get___0(& ctx->ref); return; } } __inline static void i915_gem_context_unreference(struct intel_context *ctx ) { { kref_put___1(& ctx->ref, & i915_gem_context_free); return; } } int i915_gem_context_create_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_context_destroy_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_context_getparam_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_context_setparam_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; static size_t get_context_alignment(struct drm_device *dev ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { return (65536UL); } else { } return (4096UL); } } static int get_context_size(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; u32 reg ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); switch ((int )__p->info.gen) { case 6: reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8608L, 1); ret = (int )(((((reg >> 18) & 63U) + ((reg >> 6) & 63U)) + (reg & 63U)) * 64U); goto ldv_47999; case 7: reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8616L, 1); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { ret = 69632; } else { ret = (int )((((reg >> 9) & 127U) + (reg & 63U)) * 64U); } goto ldv_47999; case 8: ret = 73728; goto ldv_47999; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c"), "i" (130), "i" (12UL)); ldv_48009: ; goto ldv_48009; } ldv_47999: ; return (ret); } } void i915_gem_context_free(struct kref *ctx_ref ) { struct intel_context *ctx ; struct kref const *__mptr ; { __mptr = (struct kref const *)ctx_ref; ctx = (struct intel_context *)__mptr; trace_i915_context_free(ctx); if (i915.enable_execlists != 0) { intel_lr_context_free(ctx); } else { } i915_ppgtt_put(ctx->ppgtt); if ((unsigned long )ctx->legacy_hw_ctx.rcs_state != (unsigned long )((struct drm_i915_gem_object *)0)) { drm_gem_object_unreference___1(& (ctx->legacy_hw_ctx.rcs_state)->base); } else { } list_del(& ctx->link); kfree((void const *)ctx); return; } } struct drm_i915_gem_object *i915_gem_alloc_context_obj(struct drm_device *dev , size_t size ) { struct drm_i915_gem_object *obj ; int ret ; void *tmp ; void *tmp___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { obj = i915_gem_object_create_stolen(dev, (u32 )size); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { obj = i915_gem_alloc_object(dev, size); } else { } if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { tmp = ERR_PTR(-12L); return ((struct drm_i915_gem_object *)tmp); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 6U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { ret = i915_gem_object_set_cache_level(obj, 2); __ret_warn_on = ret != 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c", 177, "WARN_ON(ret)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { drm_gem_object_unreference___1(& obj->base); tmp___0 = ERR_PTR((long )ret); return ((struct drm_i915_gem_object *)tmp___0); } else { } } else { } } else { } return (obj); } } static struct intel_context *__create_hw_context(struct drm_device *dev , struct drm_i915_file_private *file_priv ) { struct drm_i915_private *dev_priv ; struct intel_context *ctx ; int ret ; void *tmp ; void *tmp___0 ; struct drm_i915_gem_object *obj ; struct drm_i915_gem_object *tmp___1 ; long tmp___2 ; bool tmp___3 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int tmp___4 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; void *tmp___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = kzalloc(224UL, 208U); ctx = (struct intel_context *)tmp; if ((unsigned long )ctx == (unsigned long )((struct intel_context *)0)) { tmp___0 = ERR_PTR(-12L); return ((struct intel_context *)tmp___0); } else { } kref_init(& ctx->ref); list_add_tail(& ctx->link, & dev_priv->context_list); if (dev_priv->hw_context_size != 0U) { tmp___1 = i915_gem_alloc_context_obj(dev, (size_t )dev_priv->hw_context_size); obj = tmp___1; tmp___3 = IS_ERR((void const *)obj); if ((int )tmp___3) { tmp___2 = PTR_ERR((void const *)obj); ret = (int )tmp___2; goto err_out; } else { } ctx->legacy_hw_ctx.rcs_state = obj; } else { } if ((unsigned long )file_priv != (unsigned long )((struct drm_i915_file_private *)0)) { ret = idr_alloc(& file_priv->context_idr, (void *)ctx, 0, 0, 208U); if (ret < 0) { goto err_out; } else { } } else { ret = 0; } ctx->file_priv = file_priv; ctx->user_handle = ret; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { goto _L; } else { __p___4 = to_i915((struct drm_device const *)dev); if (((int )__p___4->info.device_id & 240) != 32) { _L: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { tmp___4 = 1; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { tmp___4 = 1; } else { tmp___4 = 0; } } ctx->remap_slice = (unsigned int )((uint8_t )(1 << tmp___4)) + 255U; } else { ctx->remap_slice = 3U; } } ctx->hang_stats.ban_period_seconds = 12UL; return (ctx); err_out: i915_gem_context_unreference(ctx); tmp___5 = ERR_PTR((long )ret); return ((struct intel_context *)tmp___5); } } static struct intel_context *i915_gem_create_context(struct drm_device *dev , struct drm_i915_file_private *file_priv ) { bool is_global_default_ctx ; struct intel_context *ctx ; int ret ; int tmp ; long tmp___0 ; bool tmp___1 ; size_t tmp___2 ; long tmp___3 ; struct i915_hw_ppgtt *ppgtt ; struct i915_hw_ppgtt *tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; bool tmp___8 ; void *tmp___9 ; { is_global_default_ctx = (unsigned long )file_priv == (unsigned long )((struct drm_i915_file_private *)0); ret = 0; tmp = mutex_is_locked(& dev->struct_mutex); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c"), "i" (249), "i" (12UL)); ldv_48076: ; goto ldv_48076; } else { } ctx = __create_hw_context(dev, file_priv); tmp___1 = IS_ERR((void const *)ctx); if ((int )tmp___1) { return (ctx); } else { } if ((int )is_global_default_ctx && (unsigned long )ctx->legacy_hw_ctx.rcs_state != (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___2 = get_context_alignment(dev); ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, (uint32_t )tmp___2, 0U); if (ret != 0) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("i915_gem_create_context", "Couldn\'t pin %d\n", ret); } else { } goto err_destroy; } else { } } else { } if (i915.enable_ppgtt == 2) { tmp___4 = i915_ppgtt_create(dev, file_priv); ppgtt = tmp___4; tmp___8 = IS_ERR_OR_NULL((void const *)ppgtt); if ((int )tmp___8) { tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { tmp___5 = PTR_ERR((void const *)ppgtt); drm_ut_debug_printk("i915_gem_create_context", "PPGTT setup failed (%ld)\n", tmp___5); } else { } tmp___7 = PTR_ERR((void const *)ppgtt); ret = (int )tmp___7; goto err_unpin; } else { } ctx->ppgtt = ppgtt; } else { } trace_i915_context_create(ctx); return (ctx); err_unpin: ; if ((int )is_global_default_ctx && (unsigned long )ctx->legacy_hw_ctx.rcs_state != (unsigned long )((struct drm_i915_gem_object *)0)) { i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); } else { } err_destroy: i915_gem_context_unreference(ctx); tmp___9 = ERR_PTR((long )ret); return ((struct intel_context *)tmp___9); } } void i915_gem_context_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int i ; struct intel_context *ctx ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct intel_engine_cs *ring ; struct intel_context *lctx ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (i915.enable_execlists != 0) { __mptr = (struct list_head const *)dev_priv->context_list.next; ctx = (struct intel_context *)__mptr + 0xffffffffffffff30UL; goto ldv_48092; ldv_48091: intel_lr_context_reset(dev, ctx); __mptr___0 = (struct list_head const *)ctx->link.next; ctx = (struct intel_context *)__mptr___0 + 0xffffffffffffff30UL; ldv_48092: ; if ((unsigned long )(& ctx->link) != (unsigned long )(& dev_priv->context_list)) { goto ldv_48091; } else { } return; } else { } i = 0; goto ldv_48097; ldv_48096: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; lctx = ring->last_context; if ((unsigned long )lctx != (unsigned long )((struct intel_context *)0)) { if ((unsigned long )lctx->legacy_hw_ctx.rcs_state != (unsigned long )((struct drm_i915_gem_object *)0) && i == 0) { i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state); } else { } i915_gem_context_unreference(lctx); ring->last_context = (struct intel_context *)0; } else { } i = i + 1; ldv_48097: ; if (i <= 4) { goto ldv_48096; } else { } return; } } int i915_gem_context_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_context *ctx ; int i ; int __ret_warn_on ; long tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; struct drm_i915_private *__p ; long tmp___3 ; long tmp___4 ; bool tmp___5 ; struct intel_engine_cs *ring ; long tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __ret_warn_on = (unsigned long )dev_priv->ring[0].default_context != (unsigned long )((struct intel_context *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c", 333, "WARN_ON(dev_priv->ring[RCS].default_context)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (0); } else { } if (i915.enable_execlists != 0) { dev_priv->hw_context_size = 0U; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { tmp___1 = get_context_size(dev); dev_priv->hw_context_size = (uint32_t )(((tmp___1 + -1) | 4095) + 1); if (dev_priv->hw_context_size > 1048576U) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("i915_gem_context_init", "Disabling HW Contexts; invalid size %d\n", dev_priv->hw_context_size); } else { } dev_priv->hw_context_size = 0U; } else { } } else { } } ctx = i915_gem_create_context(dev, (struct drm_i915_file_private *)0); tmp___5 = IS_ERR((void const *)ctx); if ((int )tmp___5) { tmp___3 = PTR_ERR((void const *)ctx); drm_err("Failed to create default global context (error %ld)\n", tmp___3); tmp___4 = PTR_ERR((void const *)ctx); return ((int )tmp___4); } else { } i = 0; goto ldv_48116; ldv_48115: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; ring->default_context = ctx; i = i + 1; ldv_48116: ; if (i <= 4) { goto ldv_48115; } else { } tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("i915_gem_context_init", "%s context support initialized\n", i915.enable_execlists == 0 ? (dev_priv->hw_context_size != 0U ? (char *)"HW" : (char *)"fake") : (char *)"LR"); } else { } return (0); } } void i915_gem_context_fini(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_context *dctx ; int i ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; long tmp___0 ; struct intel_engine_cs *ring ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dctx = dev_priv->ring[0].default_context; if ((unsigned long )dctx->legacy_hw_ctx.rcs_state != (unsigned long )((struct drm_i915_gem_object *)0)) { intel_gpu_reset(dev); __ret_warn_on = (unsigned long )dev_priv->ring[0].last_context == (unsigned long )((struct intel_context *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c", 387, "WARN_ON(!dev_priv->ring[RCS].last_context)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned long )dev_priv->ring[0].last_context == (unsigned long )dctx) { __ret_warn_on___0 = (unsigned int )*((unsigned char *)dctx->legacy_hw_ctx.rcs_state + 408UL) != 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c", 390, "WARN_ON(dctx->legacy_hw_ctx.rcs_state->active)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); i915_gem_context_unreference(dctx); dev_priv->ring[0].last_context = (struct intel_context *)0; } else { } i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); } else { } i = 0; goto ldv_48130; ldv_48129: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; if ((unsigned long )ring->last_context != (unsigned long )((struct intel_context *)0)) { i915_gem_context_unreference(ring->last_context); } else { } ring->default_context = (struct intel_context *)0; ring->last_context = (struct intel_context *)0; i = i + 1; ldv_48130: ; if (i <= 4) { goto ldv_48129; } else { } i915_gem_context_unreference(dctx); return; } } int i915_gem_context_enable(struct drm_i915_private *dev_priv ) { struct intel_engine_cs *ring ; int ret ; int i ; long tmp ; bool tmp___0 ; bool tmp___1 ; { tmp = ldv__builtin_expect((unsigned long )dev_priv->ring[0].default_context == (unsigned long )((struct intel_context *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c"), "i" (417), "i" (12UL)); ldv_48138: ; goto ldv_48138; } else { } if (i915.enable_execlists != 0) { i = 0; goto ldv_48140; ldv_48139: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(ring); if ((int )tmp___0) { if ((unsigned long )ring->init_context != (unsigned long )((int (*)(struct intel_engine_cs * , struct intel_context * ))0)) { ret = (*(ring->init_context))(ring, ring->default_context); if (ret != 0) { drm_err("ring init context: %d\n", ret); return (ret); } else { } } else { } } else { } i = i + 1; ldv_48140: ; if (i <= 4) { goto ldv_48139; } else { } } else { i = 0; goto ldv_48143; ldv_48142: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___1 = intel_ring_initialized(ring); if ((int )tmp___1) { ret = i915_switch_context(ring, ring->default_context); if (ret != 0) { return (ret); } else { } } else { } i = i + 1; ldv_48143: ; if (i <= 4) { goto ldv_48142; } else { } } return (0); } } static int context_idr_cleanup(int id , void *p , void *data ) { struct intel_context *ctx ; { ctx = (struct intel_context *)p; i915_gem_context_unreference(ctx); return (0); } } int i915_gem_context_open(struct drm_device *dev , struct drm_file *file ) { struct drm_i915_file_private *file_priv ; struct intel_context *ctx ; long tmp ; bool tmp___0 ; { file_priv = (struct drm_i915_file_private *)file->driver_priv; idr_init(& file_priv->context_idr); mutex_lock_nested(& dev->struct_mutex, 0U); ctx = i915_gem_create_context(dev, file_priv); mutex_unlock(& dev->struct_mutex); tmp___0 = IS_ERR((void const *)ctx); if ((int )tmp___0) { idr_destroy(& file_priv->context_idr); tmp = PTR_ERR((void const *)ctx); return ((int )tmp); } else { } return (0); } } void i915_gem_context_close(struct drm_device *dev , struct drm_file *file ) { struct drm_i915_file_private *file_priv ; { file_priv = (struct drm_i915_file_private *)file->driver_priv; idr_for_each(& file_priv->context_idr, & context_idr_cleanup, (void *)0); idr_destroy(& file_priv->context_idr); return; } } struct intel_context *i915_gem_context_get(struct drm_i915_file_private *file_priv , u32 id ) { struct intel_context *ctx ; void *tmp ; void *tmp___0 ; { tmp = idr_find(& file_priv->context_idr, (int )id); ctx = (struct intel_context *)tmp; if ((unsigned long )ctx == (unsigned long )((struct intel_context *)0)) { tmp___0 = ERR_PTR(-2L); return ((struct intel_context *)tmp___0); } else { } return (ctx); } } extern void __compiletime_assert_563(void) ; __inline static int mi_set_context(struct intel_engine_cs *ring , struct intel_context *new_context , u32 hw_flags ) { u32 flags ; int num_rings ; struct drm_i915_private *__p ; unsigned int tmp___0 ; int tmp___1 ; bool tmp___2 ; int len ; int i ; int ret ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct intel_engine_cs *signaller ; int _a ; struct drm_i915_private *tmp___3 ; bool tmp___4 ; struct drm_i915_private *__p___4 ; unsigned long tmp___5 ; struct intel_engine_cs *signaller___0 ; bool __cond___8 ; bool __cond___9 ; bool __cond___10 ; struct drm_i915_private *tmp___6 ; bool tmp___7 ; struct drm_i915_private *__p___5 ; { flags = hw_flags | 256U; tmp___2 = i915_semaphore_is_enabled(ring->dev); if ((int )tmp___2) { __p = to_i915((struct drm_device const *)ring->dev); tmp___0 = __arch_hweight32((unsigned int )__p->info.ring_mask); tmp___1 = (int const )(tmp___0 - 1U); } else { tmp___1 = 0; } num_rings = tmp___1; __p___0 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U) { ret = (*(ring->flush))(ring, 62U, 0U); if (ret != 0) { return (ret); } else { } } else { } __p___1 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 7U) { flags = flags | 12U; } else { } } else { } len = 4; __p___3 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 6U) { len = (num_rings != 0 ? (num_rings + 1) * 4 : 2) + len; } else { } ret = intel_ring_begin(ring, len); if (ret != 0) { return (ret); } else { } __p___4 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 6U) { intel_ring_emit(ring, 67108864U); if (num_rings != 0) { intel_ring_emit(ring, (u32 )((num_rings * 2 + -1) | 285212672)); i = 0; goto ldv_48429; ldv_48428: tmp___3 = to_i915((struct drm_device const *)ring->dev); signaller = (struct intel_engine_cs *)(& tmp___3->ring) + (unsigned long )i; tmp___4 = intel_ring_initialized(signaller); if ((int )tmp___4) { if ((unsigned long )signaller == (unsigned long )ring) { goto ldv_48412; } else { } intel_ring_emit(ring, signaller->mmio_base + 80U); _a = 1; intel_ring_emit(ring, (u32 )((_a << 16) | _a)); } else { } ldv_48412: i = i + 1; ldv_48429: ; if (i <= 4) { goto ldv_48428; } else { } } else { } } else { } intel_ring_emit(ring, 0U); intel_ring_emit(ring, 201326592U); tmp___5 = i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state); intel_ring_emit(ring, (u32 )tmp___5 | flags); intel_ring_emit(ring, 0U); __p___5 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___5->info.gen) > 6U) { if (num_rings != 0) { intel_ring_emit(ring, (u32 )((num_rings * 2 + -1) | 285212672)); i = 0; goto ldv_48453; ldv_48452: tmp___6 = to_i915((struct drm_device const *)ring->dev); signaller___0 = (struct intel_engine_cs *)(& tmp___6->ring) + (unsigned long )i; tmp___7 = intel_ring_initialized(signaller___0); if ((int )tmp___7) { if ((unsigned long )signaller___0 == (unsigned long )ring) { goto ldv_48438; } else { } intel_ring_emit(ring, signaller___0->mmio_base + 80U); __cond___8 = 0; if ((int )__cond___8) { __compiletime_assert_563(); } else { } __cond___9 = 0; if ((int )__cond___9) { __compiletime_assert_563(); } else { } __cond___10 = 0; if ((int )__cond___10) { __compiletime_assert_563(); } else { } intel_ring_emit(ring, (u32 )65536); } else { } ldv_48438: i = i + 1; ldv_48453: ; if (i <= 4) { goto ldv_48452; } else { } } else { } intel_ring_emit(ring, 67108865U); } else { } intel_ring_advance(ring); return (ret); } } __inline static bool should_skip_switch(struct intel_engine_cs *ring , struct intel_context *from , struct intel_context *to ) { unsigned int tmp ; { if ((unsigned int )to->remap_slice != 0U) { return (0); } else { } if ((unsigned long )to->ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0) && (unsigned long )from == (unsigned long )to) { tmp = intel_ring_flag(ring); if (((unsigned long )tmp & (to->ppgtt)->pd_dirty_rings) == 0UL) { return (1); } else { } } else { } return (0); } } static bool needs_pd_load_pre(struct intel_engine_cs *ring , struct intel_context *to ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; if ((unsigned long )to->ppgtt == (unsigned long )((struct i915_hw_ppgtt *)0)) { return (0); } else { } __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 7U) { return (1); } else { } if ((unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) != (unsigned long )ring) { return (1); } else { } return (0); } } static bool needs_pd_load_post(struct intel_engine_cs *ring , struct intel_context *to , u32 hw_flags ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; if ((unsigned long )to->ppgtt == (unsigned long )((struct i915_hw_ppgtt *)0)) { return (0); } else { } __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) != 8U) { return (0); } else { } if ((unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) != (unsigned long )ring) { return (0); } else { } if ((int )hw_flags & 1) { return (1); } else { } return (0); } } static int do_switch(struct intel_engine_cs *ring , struct intel_context *to ) { struct drm_i915_private *dev_priv ; struct intel_context *from ; u32 hw_flags ; bool uninitialized ; int ret ; int i ; long tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; size_t tmp___4 ; unsigned int tmp___5 ; bool tmp___6 ; unsigned int tmp___7 ; unsigned int tmp___8 ; int __ret_warn_on ; bool tmp___9 ; bool tmp___10 ; int tmp___11 ; long tmp___12 ; bool tmp___13 ; long tmp___14 ; struct i915_vma *tmp___15 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; from = ring->last_context; hw_flags = 0U; uninitialized = 0; if ((unsigned long )from != (unsigned long )((struct intel_context *)0) && (unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) == (unsigned long )ring) { tmp = ldv__builtin_expect((unsigned long )from->legacy_hw_ctx.rcs_state == (unsigned long )((struct drm_i915_gem_object *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c"), "i" (636), "i" (12UL)); ldv_48493: ; goto ldv_48493; } else { } tmp___0 = i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } tmp___2 = ldv__builtin_expect((long )tmp___1, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c"), "i" (637), "i" (12UL)); ldv_48494: ; goto ldv_48494; } else { } } else { } tmp___3 = should_skip_switch(ring, from, to); if ((int )tmp___3) { return (0); } else { } if ((unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) == (unsigned long )ring) { tmp___4 = get_context_alignment(ring->dev); ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, (uint32_t )tmp___4, 0U); if (ret != 0) { return (ret); } else { } } else { } from = ring->last_context; tmp___6 = needs_pd_load_pre(ring, to); if ((int )tmp___6) { trace_switch_mm(ring, to); ret = (*((to->ppgtt)->switch_mm))(to->ppgtt, ring); if (ret != 0) { goto unpin_out; } else { } tmp___5 = intel_ring_flag(ring); (to->ppgtt)->pd_dirty_rings = (to->ppgtt)->pd_dirty_rings & (unsigned long )(~ tmp___5); } else { } if ((unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) != (unsigned long )ring) { if ((unsigned long )from != (unsigned long )((struct intel_context *)0)) { i915_gem_context_unreference(from); } else { } goto done; } else { } ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, 0); if (ret != 0) { goto unpin_out; } else { } if (! to->legacy_hw_ctx.initialized) { hw_flags = hw_flags | 1U; } else if ((unsigned long )to->ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0)) { tmp___8 = intel_ring_flag(ring); if (((unsigned long )tmp___8 & (to->ppgtt)->pd_dirty_rings) != 0UL) { hw_flags = hw_flags | 2U; tmp___7 = intel_ring_flag(ring); (to->ppgtt)->pd_dirty_rings = (to->ppgtt)->pd_dirty_rings & (unsigned long )(~ tmp___7); } else { } } else { } tmp___9 = needs_pd_load_pre(ring, to); if ((int )tmp___9) { tmp___10 = needs_pd_load_post(ring, to, hw_flags); if ((int )tmp___10) { tmp___11 = 1; } else { tmp___11 = 0; } } else { tmp___11 = 0; } __ret_warn_on = tmp___11; tmp___12 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___12 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c", 704, "WARN_ON(needs_pd_load_pre(ring, to) && needs_pd_load_post(ring, to, hw_flags))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = mi_set_context(ring, to, hw_flags); if (ret != 0) { goto unpin_out; } else { } tmp___13 = needs_pd_load_post(ring, to, hw_flags); if ((int )tmp___13) { trace_switch_mm(ring, to); ret = (*((to->ppgtt)->switch_mm))(to->ppgtt, ring); if (ret != 0) { drm_err("Failed to change address space on context switch\n"); goto unpin_out; } else { } } else { } i = 0; goto ldv_48502; ldv_48501: ; if ((((int )to->remap_slice >> i) & 1) == 0) { goto ldv_48499; } else { } ret = i915_gem_l3_remap(ring, i); if (ret != 0) { tmp___14 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("do_switch", "L3 remapping failed\n"); } else { } } else { to->remap_slice = (uint8_t )((int )((signed char )to->remap_slice) & ~ ((int )((signed char )(1 << i)))); } ldv_48499: i = i + 1; ldv_48502: ; if (i <= 1) { goto ldv_48501; } else { } if ((unsigned long )from != (unsigned long )((struct intel_context *)0)) { (from->legacy_hw_ctx.rcs_state)->base.read_domains = 16U; tmp___15 = i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state); i915_vma_move_to_active(tmp___15, ring); (from->legacy_hw_ctx.rcs_state)->dirty = 1U; i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); i915_gem_context_unreference(from); } else { } uninitialized = (bool )(! ((int )to->legacy_hw_ctx.initialized != 0)); to->legacy_hw_ctx.initialized = 1; done: i915_gem_context_reference(to); ring->last_context = to; if ((int )uninitialized) { if ((unsigned long )ring->init_context != (unsigned long )((int (*)(struct intel_engine_cs * , struct intel_context * ))0)) { ret = (*(ring->init_context))(ring, to); if (ret != 0) { drm_err("ring init context: %d\n", ret); } else { } } else { } } else { } return (0); unpin_out: ; if ((unsigned int )ring->id == 0U) { i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); } else { } return (ret); } } int i915_switch_context(struct intel_engine_cs *ring , struct intel_context *to ) { struct drm_i915_private *dev_priv ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; int tmp___0 ; long tmp___1 ; int tmp___2 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; __ret_warn_on = i915.enable_execlists != 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c", 804, "WARN_ON(i915.enable_execlists)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = mutex_is_locked(& (dev_priv->dev)->struct_mutex); __ret_warn_on___0 = tmp___0 == 0; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_context.c", 805, "WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if ((unsigned long )to->legacy_hw_ctx.rcs_state == (unsigned long )((struct drm_i915_gem_object *)0)) { if ((unsigned long )ring->last_context != (unsigned long )to) { i915_gem_context_reference(to); if ((unsigned long )ring->last_context != (unsigned long )((struct intel_context *)0)) { i915_gem_context_unreference(ring->last_context); } else { } ring->last_context = to; } else { } return (0); } else { } tmp___2 = do_switch(ring, to); return (tmp___2); } } static bool contexts_enabled(struct drm_device *dev ) { struct drm_i915_private *tmp ; int tmp___0 ; { if (i915.enable_execlists != 0) { tmp___0 = 1; } else { tmp = to_i915((struct drm_device const *)dev); if (tmp->hw_context_size != 0U) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((bool )tmp___0); } } int i915_gem_context_create_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_context_create *args ; struct drm_i915_file_private *file_priv ; struct intel_context *ctx ; int ret ; bool tmp ; int tmp___0 ; long tmp___1 ; bool tmp___2 ; long tmp___3 ; { args = (struct drm_i915_gem_context_create *)data; file_priv = (struct drm_i915_file_private *)file->driver_priv; tmp = contexts_enabled(dev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-19); } else { } ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } ctx = i915_gem_create_context(dev, file_priv); mutex_unlock(& dev->struct_mutex); tmp___2 = IS_ERR((void const *)ctx); if ((int )tmp___2) { tmp___1 = PTR_ERR((void const *)ctx); return ((int )tmp___1); } else { } args->ctx_id = (__u32 )ctx->user_handle; tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("i915_gem_context_create_ioctl", "HW context %d created\n", args->ctx_id); } else { } return (0); } } int i915_gem_context_destroy_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_context_destroy *args ; struct drm_i915_file_private *file_priv ; struct intel_context *ctx ; int ret ; long tmp ; bool tmp___0 ; long tmp___1 ; { args = (struct drm_i915_gem_context_destroy *)data; file_priv = (struct drm_i915_file_private *)file->driver_priv; if (args->ctx_id == 0U) { return (-2); } else { } ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } ctx = i915_gem_context_get(file_priv, args->ctx_id); tmp___0 = IS_ERR((void const *)ctx); if ((int )tmp___0) { mutex_unlock(& dev->struct_mutex); tmp = PTR_ERR((void const *)ctx); return ((int )tmp); } else { } idr_remove(& (ctx->file_priv)->context_idr, ctx->user_handle); i915_gem_context_unreference(ctx); mutex_unlock(& dev->struct_mutex); tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i915_gem_context_destroy_ioctl", "HW context %d destroyed\n", args->ctx_id); } else { } return (0); } } int i915_gem_context_getparam_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_file_private *file_priv ; struct drm_i915_gem_context_param *args ; struct intel_context *ctx ; int ret ; long tmp ; bool tmp___0 ; { file_priv = (struct drm_i915_file_private *)file->driver_priv; args = (struct drm_i915_gem_context_param *)data; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } ctx = i915_gem_context_get(file_priv, args->ctx_id); tmp___0 = IS_ERR((void const *)ctx); if ((int )tmp___0) { mutex_unlock(& dev->struct_mutex); tmp = PTR_ERR((void const *)ctx); return ((int )tmp); } else { } args->size = 0U; switch (args->param) { case 1ULL: args->value = (__u64 )ctx->hang_stats.ban_period_seconds; goto ldv_48546; default: ret = -22; goto ldv_48546; } ldv_48546: mutex_unlock(& dev->struct_mutex); return (ret); } } int i915_gem_context_setparam_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_file_private *file_priv ; struct drm_i915_gem_context_param *args ; struct intel_context *ctx ; int ret ; long tmp ; bool tmp___0 ; bool tmp___1 ; int tmp___2 ; { file_priv = (struct drm_i915_file_private *)file->driver_priv; args = (struct drm_i915_gem_context_param *)data; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } ctx = i915_gem_context_get(file_priv, args->ctx_id); tmp___0 = IS_ERR((void const *)ctx); if ((int )tmp___0) { mutex_unlock(& dev->struct_mutex); tmp = PTR_ERR((void const *)ctx); return ((int )tmp); } else { } switch (args->param) { case 1ULL: ; if (args->size != 0U) { ret = -22; } else if (args->value < (unsigned long long )ctx->hang_stats.ban_period_seconds) { tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { ret = -1; } else { ctx->hang_stats.ban_period_seconds = (unsigned long )args->value; } } else { ctx->hang_stats.ban_period_seconds = (unsigned long )args->value; } goto ldv_48558; default: ret = -22; goto ldv_48558; } ldv_48558: mutex_unlock(& dev->struct_mutex); return (ret); } } __inline static bool IS_ERR_OR_NULL(void const *ptr ) { bool tmp ; { tmp = ldv_is_err_or_null(ptr); return (tmp); } } bool ldv_queue_work_on_193(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_194(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_195(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_196(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_197(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern void __might_sleep(char const * , int , int ) ; bool ldv_queue_work_on_207(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_209(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_208(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_211(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_210(struct workqueue_struct *ldv_func_arg1 ) ; __inline static int kref_sub___2(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___2(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___2(kref, 1U, release); return (tmp); } } __inline static void *kmap(struct page *page ) { void *tmp ; { __might_sleep("include/linux/highmem.h", 58, 0); tmp = lowmem_page_address((struct page const *)page); return (tmp); } } __inline static void kunmap(struct page *page ) { { return; } } int i915_gem_render_state_init(struct intel_engine_cs *ring ) ; void i915_gem_render_state_fini(struct render_state *so ) ; int i915_gem_render_state_prepare(struct intel_engine_cs *ring , struct render_state *so ) ; __inline static void drm_gem_object_unreference___2(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___2(& obj->refcount, & drm_gem_object_free); } else { } return; } } int __i915_add_request(struct intel_engine_cs *ring , struct drm_file *file , struct drm_i915_gem_object *obj ) ; struct intel_renderstate_rodata const gen6_null_state ; struct intel_renderstate_rodata const gen7_null_state ; struct intel_renderstate_rodata const gen8_null_state ; struct intel_renderstate_rodata const gen9_null_state ; static struct intel_renderstate_rodata const *render_state_get_rodata(struct drm_device *dev , int const gen ) { { switch (gen) { case 6: ; return (& gen6_null_state); case 7: ; return (& gen7_null_state); case 8: ; return (& gen8_null_state); case 9: ; return (& gen9_null_state); } return ((struct intel_renderstate_rodata const *)0); } } static int render_state_init(struct render_state *so , struct drm_device *dev ) { int ret ; struct drm_i915_private *__p ; unsigned long tmp ; { __p = to_i915((struct drm_device const *)dev); so->gen = (int )__p->info.gen; so->rodata = render_state_get_rodata(dev, so->gen); if ((unsigned long )so->rodata == (unsigned long )((struct intel_renderstate_rodata const *)0)) { return (0); } else { } if ((unsigned int )(so->rodata)->batch_items * 4U > 4096U) { return (-22); } else { } so->obj = i915_gem_alloc_object(dev, 4096UL); if ((unsigned long )so->obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return (-12); } else { } ret = i915_gem_obj_ggtt_pin(so->obj, 4096U, 0U); if (ret != 0) { goto free_gem; } else { } tmp = i915_gem_obj_ggtt_offset(so->obj); so->ggtt_offset = (u64 )tmp; return (0); free_gem: drm_gem_object_unreference___2(& (so->obj)->base); return (ret); } } static int render_state_setup(struct render_state *so ) { struct intel_renderstate_rodata const *rodata ; unsigned int i ; unsigned int reloc_index ; struct page *page ; u32 *d ; int ret ; void *tmp ; u32 s ; u64 r ; unsigned int tmp___0 ; unsigned int tmp___1 ; { rodata = so->rodata; i = 0U; reloc_index = 0U; ret = i915_gem_object_set_to_cpu_domain(so->obj, 1); if (ret != 0) { return (ret); } else { } page = sg_page___0(((so->obj)->pages)->sgl); tmp = kmap(page); d = (u32 *)tmp; goto ldv_48013; ldv_48012: s = *(rodata->batch + (unsigned long )i); if (i * 4U == (unsigned int )*(rodata->reloc + (unsigned long )reloc_index)) { r = (u64 )s + so->ggtt_offset; s = (unsigned int )r; if (so->gen > 7) { if (i + 1U >= (unsigned int )rodata->batch_items || (unsigned int )*(rodata->batch + (unsigned long )(i + 1U)) != 0U) { return (-22); } else { } tmp___0 = i; i = i + 1U; *(d + (unsigned long )tmp___0) = s; s = (unsigned int )(r >> 32ULL); } else { } reloc_index = reloc_index + 1U; } else { } tmp___1 = i; i = i + 1U; *(d + (unsigned long )tmp___1) = s; ldv_48013: ; if ((unsigned int )rodata->batch_items > i) { goto ldv_48012; } else { } kunmap(page); ret = i915_gem_object_set_to_gtt_domain(so->obj, 0); if (ret != 0) { return (ret); } else { } if ((unsigned int )*(rodata->reloc + (unsigned long )reloc_index) != 4294967295U) { drm_err("only %d relocs resolved\n", reloc_index); return (-22); } else { } return (0); } } void i915_gem_render_state_fini(struct render_state *so ) { { i915_gem_object_ggtt_unpin(so->obj); drm_gem_object_unreference___2(& (so->obj)->base); return; } } int i915_gem_render_state_prepare(struct intel_engine_cs *ring , struct render_state *so ) { int ret ; int __ret_warn_on ; long tmp ; long tmp___0 ; { __ret_warn_on = (unsigned int )ring->id != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_render_state.c", 136, "WARN_ON(ring->id != RCS)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-2); } else { } ret = render_state_init(so, ring->dev); if (ret != 0) { return (ret); } else { } if ((unsigned long )so->rodata == (unsigned long )((struct intel_renderstate_rodata const *)0)) { return (0); } else { } ret = render_state_setup(so); if (ret != 0) { i915_gem_render_state_fini(so); return (ret); } else { } return (0); } } int i915_gem_render_state_init(struct intel_engine_cs *ring ) { struct render_state so ; int ret ; struct i915_vma *tmp ; { ret = i915_gem_render_state_prepare(ring, & so); if (ret != 0) { return (ret); } else { } if ((unsigned long )so.rodata == (unsigned long )((struct intel_renderstate_rodata const *)0)) { return (0); } else { } ret = (*(ring->dispatch_execbuffer))(ring, so.ggtt_offset, (unsigned int )(so.rodata)->batch_items * 4U, 1U); if (ret != 0) { goto out; } else { } tmp = i915_gem_obj_to_ggtt(so.obj); i915_vma_move_to_active(tmp, ring); ret = __i915_add_request(ring, (struct drm_file *)0, so.obj); out: i915_gem_render_state_fini(& so); return (ret); } } bool ldv_queue_work_on_207(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_208(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_209(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_210(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_211(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_221(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_223(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_222(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_225(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_224(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_queue_work_on_221(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_222(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_223(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_224(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_225(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static void *ERR_CAST(void const *ptr ) { { return ((void *)ptr); } } __inline static void atomic64_inc(atomic64_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incq %0": "=m" (v->counter): "m" (v->counter)); return; } } __inline static void atomic_long_inc(atomic_long_t *l ) { atomic64_t *v ; { v = l; atomic64_inc(v); return; } } bool ldv_queue_work_on_235(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_237(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_236(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_239(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_238(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void kref_get___1(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static void sg_assign_page(struct scatterlist *sg , struct page *page ) { unsigned long page_link ; long tmp ; long tmp___0 ; long tmp___1 ; { page_link = sg->page_link & 3UL; tmp = ldv__builtin_expect(((unsigned long )page & 3UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (90), "i" (12UL)); ldv_25328: ; goto ldv_25328; } else { } tmp___0 = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (92), "i" (12UL)); ldv_25329: ; goto ldv_25329; } else { } tmp___1 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (93), "i" (12UL)); ldv_25330: ; goto ldv_25330; } else { } sg->page_link = page_link | (unsigned long )page; return; } } __inline static void sg_set_page(struct scatterlist *sg , struct page *page , unsigned int len , unsigned int offset ) { { sg_assign_page(sg, page); sg->offset = offset; sg->length = len; return; } } __inline static struct page *sg_page___2(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_25340: ; goto ldv_25340; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_25341: ; goto ldv_25341; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } __inline static void *sg_virt(struct scatterlist *sg ) { struct page *tmp ; void *tmp___0 ; { tmp = sg_page___2(sg); tmp___0 = lowmem_page_address((struct page const *)tmp); return (tmp___0 + (unsigned long )sg->offset); } } extern struct scatterlist *sg_next(struct scatterlist * ) ; extern void sg_free_table(struct sg_table * ) ; extern int sg_alloc_table(struct sg_table * , unsigned int , gfp_t ) ; __inline static struct page *sg_page_iter_page___0(struct sg_page_iter *piter ) { struct page *tmp ; { tmp = sg_page___2(piter->sg); return ((struct page *)-24189255811072L + ((unsigned long )(((long )tmp + 24189255811072L) / 64L) + (unsigned long )piter->sg_pgoffset)); } } __inline static int valid_dma_direction(int dma_direction ) { { return ((dma_direction == 0 || dma_direction == 1) || dma_direction == 2); } } __inline static void kmemcheck_mark_initialized(void *address , unsigned int n ) { { return; } } extern void debug_dma_map_sg(struct device * , struct scatterlist * , int , int , int ) ; extern void debug_dma_unmap_sg(struct device * , struct scatterlist * , int , int ) ; extern struct dma_map_ops *dma_ops ; __inline static struct dma_map_ops *get_dma_ops(struct device *dev ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )dev == (unsigned long )((struct device *)0), 0L); if (tmp != 0L || (unsigned long )dev->archdata.dma_ops == (unsigned long )((struct dma_map_ops *)0)) { return (dma_ops); } else { return (dev->archdata.dma_ops); } } } __inline static int dma_map_sg_attrs(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int i ; int ents ; struct scatterlist *s ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp = get_dma_ops(dev); ops = tmp; i = 0; s = sg; goto ldv_25920; ldv_25919: tmp___0 = sg_virt(s); kmemcheck_mark_initialized(tmp___0, s->length); i = i + 1; s = sg_next(s); ldv_25920: ; if (i < nents) { goto ldv_25919; } else { } tmp___1 = valid_dma_direction((int )dir); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (56), "i" (12UL)); ldv_25922: ; goto ldv_25922; } else { } ents = (*(ops->map_sg))(dev, sg, nents, dir, attrs); tmp___3 = ldv__builtin_expect(ents < 0, 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (58), "i" (12UL)); ldv_25923: ; goto ldv_25923; } else { } debug_dma_map_sg(dev, sg, nents, ents, (int )dir); return (ents); } } __inline static void dma_unmap_sg_attrs(struct device *dev , struct scatterlist *sg , int nents , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (70), "i" (12UL)); ldv_25932: ; goto ldv_25932; } else { } debug_dma_unmap_sg(dev, sg, nents, (int )dir); if ((unsigned long )ops->unmap_sg != (unsigned long )((void (*)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_sg))(dev, sg, nents, dir, attrs); } else { } return; } } __inline static struct file *get_file(struct file *f ) { { atomic_long_inc(& f->f_count); return (f); } } extern void drm_gem_dmabuf_release(struct dma_buf * ) ; extern void drm_gem_private_object_init(struct drm_device * , struct drm_gem_object * , size_t ) ; __inline static void drm_gem_object_reference(struct drm_gem_object *obj ) { { kref_get___1(& obj->refcount); return; } } void *i915_gem_object_alloc(struct drm_device *dev ) ; void i915_gem_object_init(struct drm_i915_gem_object *obj , struct drm_i915_gem_object_ops const *ops ) ; __inline static void get_dma_buf(struct dma_buf *dmabuf ) { { get_file(dmabuf->file); return; } } extern struct dma_buf_attachment *dma_buf_attach(struct dma_buf * , struct device * ) ; extern void dma_buf_detach(struct dma_buf * , struct dma_buf_attachment * ) ; extern struct dma_buf *dma_buf_export(struct dma_buf_export_info const * ) ; extern void dma_buf_put(struct dma_buf * ) ; extern struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment * , enum dma_data_direction ) ; extern void dma_buf_unmap_attachment(struct dma_buf_attachment * , struct sg_table * , enum dma_data_direction ) ; static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf ) { struct drm_gem_object const *__mptr ; { __mptr = (struct drm_gem_object const *)buf->priv; return ((struct drm_i915_gem_object *)__mptr); } } static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment , enum dma_data_direction dir ) { struct drm_i915_gem_object *obj ; struct drm_i915_gem_object *tmp ; struct sg_table *st ; struct scatterlist *src ; struct scatterlist *dst ; int ret ; int i ; void *tmp___0 ; struct page *tmp___1 ; int tmp___2 ; void *tmp___3 ; { tmp = dma_buf_to_obj(attachment->dmabuf); obj = tmp; ret = i915_mutex_lock_interruptible(obj->base.dev); if (ret != 0) { goto err; } else { } ret = i915_gem_object_get_pages(obj); if (ret != 0) { goto err_unlock; } else { } i915_gem_object_pin_pages(obj); tmp___0 = kmalloc(16UL, 208U); st = (struct sg_table *)tmp___0; if ((unsigned long )st == (unsigned long )((struct sg_table *)0)) { ret = -12; goto err_unpin; } else { } ret = sg_alloc_table(st, (obj->pages)->nents, 208U); if (ret != 0) { goto err_free; } else { } src = (obj->pages)->sgl; dst = st->sgl; i = 0; goto ldv_48245; ldv_48244: tmp___1 = sg_page___2(src); sg_set_page(dst, tmp___1, src->length, 0U); dst = sg_next(dst); src = sg_next(src); i = i + 1; ldv_48245: ; if ((unsigned int )i < (obj->pages)->nents) { goto ldv_48244; } else { } tmp___2 = dma_map_sg_attrs(attachment->dev, st->sgl, (int )st->nents, dir, (struct dma_attrs *)0); if (tmp___2 == 0) { ret = -12; goto err_free_sg; } else { } mutex_unlock(& (obj->base.dev)->struct_mutex); return (st); err_free_sg: sg_free_table(st); err_free: kfree((void const *)st); err_unpin: i915_gem_object_unpin_pages(obj); err_unlock: mutex_unlock(& (obj->base.dev)->struct_mutex); err: tmp___3 = ERR_PTR((long )ret); return ((struct sg_table *)tmp___3); } } static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment , struct sg_table *sg , enum dma_data_direction dir ) { struct drm_i915_gem_object *obj ; struct drm_i915_gem_object *tmp ; { tmp = dma_buf_to_obj(attachment->dmabuf); obj = tmp; mutex_lock_nested(& (obj->base.dev)->struct_mutex, 0U); dma_unmap_sg_attrs(attachment->dev, sg->sgl, (int )sg->nents, dir, (struct dma_attrs *)0); sg_free_table(sg); kfree((void const *)sg); i915_gem_object_unpin_pages(obj); mutex_unlock(& (obj->base.dev)->struct_mutex); return; } } static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf ) { struct drm_i915_gem_object *obj ; struct drm_i915_gem_object *tmp ; struct drm_device *dev ; struct sg_page_iter sg_iter ; struct page **pages ; int ret ; int i ; void *tmp___0 ; void *tmp___1 ; int tmp___2 ; bool tmp___3 ; pgprot_t __constr_expr_0___0 ; void *tmp___4 ; { tmp = dma_buf_to_obj(dma_buf); obj = tmp; dev = obj->base.dev; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { tmp___0 = ERR_PTR((long )ret); return (tmp___0); } else { } if ((unsigned long )obj->dma_buf_vmapping != (unsigned long )((void *)0)) { obj->vmapping_count = obj->vmapping_count + 1; goto out_unlock; } else { } ret = i915_gem_object_get_pages(obj); if (ret != 0) { goto err; } else { } i915_gem_object_pin_pages(obj); ret = -12; tmp___1 = drm_malloc_ab(obj->base.size >> 12, 8UL); pages = (struct page **)tmp___1; if ((unsigned long )pages == (unsigned long )((struct page **)0)) { goto err_unpin; } else { } i = 0; __sg_page_iter_start(& sg_iter, (obj->pages)->sgl, (obj->pages)->nents, 0UL); goto ldv_48267; ldv_48266: tmp___2 = i; i = i + 1; *(pages + (unsigned long )tmp___2) = sg_page_iter_page___0(& sg_iter); ldv_48267: tmp___3 = __sg_page_iter_next(& sg_iter); if ((int )tmp___3) { goto ldv_48266; } else { } __constr_expr_0___0.pgprot = 0x8000000000000163UL; obj->dma_buf_vmapping = vmap(pages, (unsigned int )i, 0UL, __constr_expr_0___0); drm_free_large((void *)pages); if ((unsigned long )obj->dma_buf_vmapping == (unsigned long )((void *)0)) { goto err_unpin; } else { } obj->vmapping_count = 1; out_unlock: mutex_unlock(& dev->struct_mutex); return (obj->dma_buf_vmapping); err_unpin: i915_gem_object_unpin_pages(obj); err: mutex_unlock(& dev->struct_mutex); tmp___4 = ERR_PTR((long )ret); return (tmp___4); } } static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf , void *vaddr ) { struct drm_i915_gem_object *obj ; struct drm_i915_gem_object *tmp ; struct drm_device *dev ; { tmp = dma_buf_to_obj(dma_buf); obj = tmp; dev = obj->base.dev; mutex_lock_nested(& dev->struct_mutex, 0U); obj->vmapping_count = obj->vmapping_count - 1; if (obj->vmapping_count == 0) { vunmap((void const *)obj->dma_buf_vmapping); obj->dma_buf_vmapping = (void *)0; i915_gem_object_unpin_pages(obj); } else { } mutex_unlock(& dev->struct_mutex); return; } } static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf , unsigned long page_num ) { { return ((void *)0); } } static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf , unsigned long page_num , void *addr ) { { return; } } static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf , unsigned long page_num ) { { return ((void *)0); } } static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf , unsigned long page_num , void *addr ) { { return; } } static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf , struct vm_area_struct *vma ) { { return (-22); } } static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf , size_t start , size_t length , enum dma_data_direction direction ) { struct drm_i915_gem_object *obj ; struct drm_i915_gem_object *tmp ; struct drm_device *dev ; int ret ; bool write ; { tmp = dma_buf_to_obj(dma_buf); obj = tmp; dev = obj->base.dev; write = (bool )((unsigned int )direction == 0U || (unsigned int )direction == 1U); ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } ret = i915_gem_object_set_to_cpu_domain(obj, (int )write); mutex_unlock(& dev->struct_mutex); return (ret); } } static struct dma_buf_ops const i915_dmabuf_ops = {0, 0, & i915_gem_map_dma_buf, & i915_gem_unmap_dma_buf, & drm_gem_dmabuf_release, & i915_gem_begin_cpu_access, 0, & i915_gem_dmabuf_kmap_atomic, & i915_gem_dmabuf_kunmap_atomic, & i915_gem_dmabuf_kmap, & i915_gem_dmabuf_kunmap, & i915_gem_dmabuf_mmap, & i915_gem_dmabuf_vmap, & i915_gem_dmabuf_vunmap}; struct dma_buf *i915_gem_prime_export(struct drm_device *dev , struct drm_gem_object *gem_obj , int flags ) { struct drm_i915_gem_object *obj ; struct drm_gem_object const *__mptr ; struct dma_buf_export_info exp_info ; int ret ; int tmp ; void *tmp___0 ; struct dma_buf *tmp___1 ; { __mptr = (struct drm_gem_object const *)gem_obj; obj = (struct drm_i915_gem_object *)__mptr; exp_info.exp_name = "i915"; exp_info.owner = & __this_module; exp_info.ops = 0; exp_info.size = 0UL; exp_info.flags = 0; exp_info.resv = 0; exp_info.priv = 0; exp_info.ops = & i915_dmabuf_ops; exp_info.size = gem_obj->size; exp_info.flags = flags; exp_info.priv = (void *)gem_obj; if ((unsigned long )(obj->ops)->dmabuf_export != (unsigned long )((int (*/* const */)(struct drm_i915_gem_object * ))0)) { tmp = (*((obj->ops)->dmabuf_export))(obj); ret = tmp; if (ret != 0) { tmp___0 = ERR_PTR((long )ret); return ((struct dma_buf *)tmp___0); } else { } } else { } tmp___1 = dma_buf_export((struct dma_buf_export_info const *)(& exp_info)); return (tmp___1); } } static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj ) { struct sg_table *sg ; long tmp ; bool tmp___0 ; { sg = dma_buf_map_attachment(obj->base.import_attach, 0); tmp___0 = IS_ERR((void const *)sg); if ((int )tmp___0) { tmp = PTR_ERR((void const *)sg); return ((int )tmp); } else { } obj->pages = sg; obj->has_dma_mapping = 1U; return (0); } } static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj ) { { dma_buf_unmap_attachment(obj->base.import_attach, obj->pages, 0); obj->has_dma_mapping = 0U; return; } } static struct drm_i915_gem_object_ops const i915_gem_object_dmabuf_ops = {& i915_gem_object_get_pages_dmabuf, & i915_gem_object_put_pages_dmabuf, 0, 0}; struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev , struct dma_buf *dma_buf ) { struct dma_buf_attachment *attach ; struct drm_i915_gem_object *obj ; int ret ; void *tmp ; bool tmp___0 ; void *tmp___1 ; void *tmp___2 ; { if ((unsigned long )dma_buf->ops == (unsigned long )(& i915_dmabuf_ops)) { obj = dma_buf_to_obj(dma_buf); if ((unsigned long )obj->base.dev == (unsigned long )dev) { drm_gem_object_reference(& obj->base); return (& obj->base); } else { } } else { } attach = dma_buf_attach(dma_buf, dev->dev); tmp___0 = IS_ERR((void const *)attach); if ((int )tmp___0) { tmp = ERR_CAST((void const *)attach); return ((struct drm_gem_object *)tmp); } else { } get_dma_buf(dma_buf); tmp___1 = i915_gem_object_alloc(dev); obj = (struct drm_i915_gem_object *)tmp___1; if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { ret = -12; goto fail_detach; } else { } drm_gem_private_object_init(dev, & obj->base, dma_buf->size); i915_gem_object_init(obj, & i915_gem_object_dmabuf_ops); obj->base.import_attach = attach; return (& obj->base); fail_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); tmp___2 = ERR_PTR((long )ret); return ((struct drm_gem_object *)tmp___2); } } extern int ldv_probe_141(void) ; void ldv_initialize_drm_i915_gem_object_ops_140(void) { void *tmp ; { tmp = ldv_init_zalloc(592UL); i915_gem_object_dmabuf_ops_group0 = (struct drm_i915_gem_object *)tmp; return; } } void ldv_initialize_dma_buf_ops_141(void) { void *tmp ; void *tmp___0 ; { tmp = __VERIFIER_nondet_pointer(); i915_dmabuf_ops_group0 = (struct dma_buf *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); i915_dmabuf_ops_group1 = (struct dma_buf_attachment *)tmp___0; return; } } void ldv_main_exported_141(void) { void *ldvarg446 ; void *tmp ; enum dma_data_direction ldvarg450 ; void *ldvarg447 ; void *tmp___0 ; struct sg_table *ldvarg445 ; void *tmp___1 ; unsigned long ldvarg457 ; unsigned long ldvarg449 ; size_t ldvarg452 ; enum dma_data_direction ldvarg444 ; enum dma_data_direction ldvarg453 ; unsigned long ldvarg456 ; struct vm_area_struct *ldvarg454 ; void *tmp___2 ; void *ldvarg455 ; void *tmp___3 ; size_t ldvarg451 ; unsigned long ldvarg448 ; int tmp___4 ; { tmp = ldv_init_zalloc(1UL); ldvarg446 = tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg447 = tmp___0; tmp___1 = ldv_init_zalloc(16UL); ldvarg445 = (struct sg_table *)tmp___1; tmp___2 = ldv_init_zalloc(184UL); ldvarg454 = (struct vm_area_struct *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg455 = tmp___3; ldv_memset((void *)(& ldvarg450), 0, 4UL); ldv_memset((void *)(& ldvarg457), 0, 8UL); ldv_memset((void *)(& ldvarg449), 0, 8UL); ldv_memset((void *)(& ldvarg452), 0, 8UL); ldv_memset((void *)(& ldvarg444), 0, 4UL); ldv_memset((void *)(& ldvarg453), 0, 4UL); ldv_memset((void *)(& ldvarg456), 0, 8UL); ldv_memset((void *)(& ldvarg451), 0, 8UL); ldv_memset((void *)(& ldvarg448), 0, 8UL); tmp___4 = __VERIFIER_nondet_int(); switch (tmp___4) { case 0: ; if (ldv_state_variable_141 == 2) { i915_gem_dmabuf_kmap_atomic(i915_dmabuf_ops_group0, ldvarg457); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_dmabuf_kmap_atomic(i915_dmabuf_ops_group0, ldvarg457); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 1: ; if (ldv_state_variable_141 == 2) { i915_gem_dmabuf_kunmap(i915_dmabuf_ops_group0, ldvarg456, ldvarg455); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_dmabuf_kunmap(i915_dmabuf_ops_group0, ldvarg456, ldvarg455); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 2: ; if (ldv_state_variable_141 == 2) { i915_gem_dmabuf_mmap(i915_dmabuf_ops_group0, ldvarg454); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_dmabuf_mmap(i915_dmabuf_ops_group0, ldvarg454); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 3: ; if (ldv_state_variable_141 == 2) { i915_gem_begin_cpu_access(i915_dmabuf_ops_group0, ldvarg452, ldvarg451, ldvarg453); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_begin_cpu_access(i915_dmabuf_ops_group0, ldvarg452, ldvarg451, ldvarg453); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 4: ; if (ldv_state_variable_141 == 2) { i915_gem_dmabuf_vmap(i915_dmabuf_ops_group0); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_dmabuf_vmap(i915_dmabuf_ops_group0); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 5: ; if (ldv_state_variable_141 == 2) { drm_gem_dmabuf_release(i915_dmabuf_ops_group0); ldv_state_variable_141 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48361; case 6: ; if (ldv_state_variable_141 == 2) { i915_gem_map_dma_buf(i915_dmabuf_ops_group1, ldvarg450); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_map_dma_buf(i915_dmabuf_ops_group1, ldvarg450); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 7: ; if (ldv_state_variable_141 == 2) { i915_gem_dmabuf_kmap(i915_dmabuf_ops_group0, ldvarg449); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_dmabuf_kmap(i915_dmabuf_ops_group0, ldvarg449); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 8: ; if (ldv_state_variable_141 == 2) { i915_gem_dmabuf_kunmap_atomic(i915_dmabuf_ops_group0, ldvarg448, ldvarg447); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_dmabuf_kunmap_atomic(i915_dmabuf_ops_group0, ldvarg448, ldvarg447); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 9: ; if (ldv_state_variable_141 == 2) { i915_gem_dmabuf_vunmap(i915_dmabuf_ops_group0, ldvarg446); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_dmabuf_vunmap(i915_dmabuf_ops_group0, ldvarg446); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 10: ; if (ldv_state_variable_141 == 2) { i915_gem_unmap_dma_buf(i915_dmabuf_ops_group1, ldvarg445, ldvarg444); ldv_state_variable_141 = 2; } else { } if (ldv_state_variable_141 == 1) { i915_gem_unmap_dma_buf(i915_dmabuf_ops_group1, ldvarg445, ldvarg444); ldv_state_variable_141 = 1; } else { } goto ldv_48361; case 11: ; if (ldv_state_variable_141 == 1) { ldv_probe_141(); ldv_state_variable_141 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48361; default: ldv_stop(); } ldv_48361: ; return; } } void ldv_main_exported_140(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_140 == 1) { i915_gem_object_put_pages_dmabuf(i915_gem_object_dmabuf_ops_group0); ldv_state_variable_140 = 1; } else { } goto ldv_48378; case 1: ; if (ldv_state_variable_140 == 1) { i915_gem_object_get_pages_dmabuf(i915_gem_object_dmabuf_ops_group0); ldv_state_variable_140 = 1; } else { } goto ldv_48378; default: ldv_stop(); } ldv_48378: ; return; } } bool ldv_queue_work_on_235(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_236(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_237(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_238(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_239(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void list_move(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add(list, head); return; } } __inline static unsigned long arch_local_save_flags___4(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static bool static_key_false___2(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int rcu_read_lock_sched_held___2(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___4(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } bool ldv_queue_work_on_249(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_251(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_250(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_253(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_252(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void kref_get___2(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___3(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___3(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___3(kref, 1U, release); return (tmp); } } extern void drm_mm_init_scan(struct drm_mm * , u64 , unsigned int , unsigned long ) ; extern void drm_mm_init_scan_with_range(struct drm_mm * , u64 , unsigned int , unsigned long , u64 , u64 ) ; extern bool drm_mm_scan_add_block(struct drm_mm_node * ) ; extern bool drm_mm_scan_remove_block(struct drm_mm_node * ) ; __inline static void drm_gem_object_reference___0(struct drm_gem_object *obj ) { { kref_get___2(& obj->refcount); return; } } __inline static void drm_gem_object_unreference___3(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___3(& obj->refcount, & drm_gem_object_free); } else { } return; } } bool intel_has_pending_fb_unpin(struct drm_device *dev ) ; struct tracepoint __tracepoint_i915_gem_evict ; __inline static void trace_i915_gem_evict(struct drm_device *dev , u32 size , u32 align , unsigned int flags ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_356 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_358 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___2(& __tracepoint_i915_gem_evict.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_evict.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___2(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 392, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45914: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_device * , u32 , u32 , unsigned int ))it_func))(__data, dev, size, align, flags); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45914; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_evict.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___2(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 392, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_evict_everything ; __inline static void trace_i915_gem_evict_everything(struct drm_device *dev ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_360 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_362 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___2(& __tracepoint_i915_gem_evict_everything.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_evict_everything.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___2(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 407, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45974: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_device * ))it_func))(__data, dev); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45974; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_evict_everything.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___2(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 407, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_evict_vm ; __inline static void trace_i915_gem_evict_vm(struct i915_address_space *vm ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_364 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_366 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___2(& __tracepoint_i915_gem_evict_vm.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_evict_vm.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___2(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 424, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46025: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct i915_address_space * ))it_func))(__data, vm); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46025; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_evict_vm.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___2(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 424, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } int i915_vma_unbind(struct i915_vma *vma ) ; int i915_gem_evict_something(struct drm_device *dev , struct i915_address_space *vm , int min_size , unsigned int alignment , unsigned int cache_level , unsigned long start , unsigned long end , unsigned int flags ) ; int i915_gem_evict_vm(struct i915_address_space *vm , bool do_idle ) ; int i915_gem_evict_everything(struct drm_device *dev ) ; static bool mark_free(struct i915_vma *vma , struct list_head *unwind ) { int __ret_warn_on ; int tmp ; long tmp___0 ; long tmp___1 ; bool tmp___2 ; { if ((unsigned int )*((unsigned char *)vma + 232UL) != 0U) { return (0); } else { } tmp = list_empty((struct list_head const *)(& vma->exec_list)); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_evict.c", 42, "WARN_ON(!list_empty(&vma->exec_list))"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return (0); } else { } list_add(& vma->exec_list, unwind); tmp___2 = drm_mm_scan_add_block(& vma->node); return (tmp___2); } } int i915_gem_evict_something(struct drm_device *dev , struct i915_address_space *vm , int min_size , unsigned int alignment , unsigned int cache_level , unsigned long start , unsigned long end , unsigned int flags ) { struct list_head eviction_list ; struct list_head unwind_list ; struct i915_vma *vma ; int ret ; int pass ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; bool tmp___0 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; bool tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; bool tmp___5 ; struct list_head const *__mptr___4 ; bool tmp___6 ; int tmp___7 ; struct drm_gem_object *obj ; struct list_head const *__mptr___5 ; int tmp___8 ; { ret = 0; pass = 0; trace_i915_gem_evict(dev, (u32 )min_size, alignment, flags); INIT_LIST_HEAD(& unwind_list); if (start != 0UL || vm->total != end) { drm_mm_init_scan_with_range(& vm->mm, (u64 )min_size, alignment, (unsigned long )cache_level, (u64 )start, (u64 )end); } else { drm_mm_init_scan(& vm->mm, (u64 )min_size, alignment, (unsigned long )cache_level); } search_again: __mptr = (struct list_head const *)vm->inactive_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff68UL; goto ldv_48005; ldv_48004: tmp = mark_free(vma, & unwind_list); if ((int )tmp) { goto found; } else { } __mptr___0 = (struct list_head const *)vma->mm_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff68UL; ldv_48005: ; if ((unsigned long )(& vma->mm_list) != (unsigned long )(& vm->inactive_list)) { goto ldv_48004; } else { } if ((flags & 2U) != 0U) { goto none; } else { } __mptr___1 = (struct list_head const *)vm->active_list.next; vma = (struct i915_vma *)__mptr___1 + 0xffffffffffffff68UL; goto ldv_48013; ldv_48012: tmp___0 = mark_free(vma, & unwind_list); if ((int )tmp___0) { goto found; } else { } __mptr___2 = (struct list_head const *)vma->mm_list.next; vma = (struct i915_vma *)__mptr___2 + 0xffffffffffffff68UL; ldv_48013: ; if ((unsigned long )(& vma->mm_list) != (unsigned long )(& vm->active_list)) { goto ldv_48012; } else { } none: ; goto ldv_48019; ldv_48018: __mptr___3 = (struct list_head const *)unwind_list.next; vma = (struct i915_vma *)__mptr___3 + 0xffffffffffffff48UL; tmp___1 = drm_mm_scan_remove_block(& vma->node); ret = (int )tmp___1; tmp___2 = ldv__builtin_expect(ret != 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_evict.c"), "i" (140), "i" (12UL)); ldv_48017: ; goto ldv_48017; } else { } list_del_init(& vma->exec_list); ldv_48019: tmp___3 = list_empty((struct list_head const *)(& unwind_list)); if (tmp___3 == 0) { goto ldv_48018; } else { } if ((flags & 2U) != 0U) { return (-28); } else { } tmp___4 = pass; pass = pass + 1; if (tmp___4 == 0) { ret = i915_gpu_idle(dev); if (ret != 0) { return (ret); } else { } i915_gem_retire_requests(dev); goto search_again; } else { } tmp___5 = intel_has_pending_fb_unpin(dev); return ((int )tmp___5 ? -11 : -28); found: INIT_LIST_HEAD(& eviction_list); goto ldv_48023; ldv_48024: __mptr___4 = (struct list_head const *)unwind_list.next; vma = (struct i915_vma *)__mptr___4 + 0xffffffffffffff48UL; tmp___6 = drm_mm_scan_remove_block(& vma->node); if ((int )tmp___6) { list_move(& vma->exec_list, & eviction_list); drm_gem_object_reference___0(& (vma->obj)->base); goto ldv_48023; } else { } list_del_init(& vma->exec_list); ldv_48023: tmp___7 = list_empty((struct list_head const *)(& unwind_list)); if (tmp___7 == 0) { goto ldv_48024; } else { } goto ldv_48030; ldv_48029: __mptr___5 = (struct list_head const *)eviction_list.next; vma = (struct i915_vma *)__mptr___5 + 0xffffffffffffff48UL; obj = & (vma->obj)->base; list_del_init(& vma->exec_list); if (ret == 0) { ret = i915_vma_unbind(vma); } else { } drm_gem_object_unreference___3(obj); ldv_48030: tmp___8 = list_empty((struct list_head const *)(& eviction_list)); if (tmp___8 == 0) { goto ldv_48029; } else { } return (ret); } } int i915_gem_evict_vm(struct i915_address_space *vm , bool do_idle ) { struct i915_vma *vma ; struct i915_vma *next ; int ret ; int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; int tmp___1 ; long tmp___2 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int __ret_warn_on___1 ; int tmp___3 ; long tmp___4 ; struct list_head const *__mptr___1 ; { tmp = mutex_is_locked(& (vm->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_evict.c", 221, "WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); trace_i915_gem_evict_vm(vm); if ((int )do_idle) { ret = i915_gpu_idle(vm->dev); if (ret != 0) { return (ret); } else { } i915_gem_retire_requests(vm->dev); tmp___1 = list_empty((struct list_head const *)(& vm->active_list)); __ret_warn_on___0 = tmp___1 == 0; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_evict.c", 231, "WARN_ON(!list_empty(&vm->active_list))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { } __mptr = (struct list_head const *)vm->inactive_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff68UL; __mptr___0 = (struct list_head const *)vma->mm_list.next; next = (struct i915_vma *)__mptr___0 + 0xffffffffffffff68UL; goto ldv_48052; ldv_48051: ; if ((unsigned int )*((unsigned char *)vma + 232UL) == 0U) { tmp___3 = i915_vma_unbind(vma); __ret_warn_on___1 = tmp___3 != 0; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_evict.c", 236, "WARN_ON(i915_vma_unbind(vma))"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); } else { } vma = next; __mptr___1 = (struct list_head const *)next->mm_list.next; next = (struct i915_vma *)__mptr___1 + 0xffffffffffffff68UL; ldv_48052: ; if ((unsigned long )(& vma->mm_list) != (unsigned long )(& vm->inactive_list)) { goto ldv_48051; } else { } return (0); } } int i915_gem_evict_everything(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct i915_address_space *vm ; struct i915_address_space *v ; bool lists_empty ; int ret ; struct list_head const *__mptr ; int tmp ; int tmp___0 ; int tmp___1 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; int __ret_warn_on ; int tmp___2 ; long tmp___3 ; struct list_head const *__mptr___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; lists_empty = 1; __mptr = (struct list_head const *)dev_priv->vm_list.next; vm = (struct i915_address_space *)__mptr + 0xffffffffffffff50UL; goto ldv_48067; ldv_48066: tmp = list_empty((struct list_head const *)(& vm->inactive_list)); if (tmp != 0) { tmp___0 = list_empty((struct list_head const *)(& vm->active_list)); if (tmp___0 != 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } lists_empty = (bool )tmp___1; if (! lists_empty) { lists_empty = 0; } else { } __mptr___0 = (struct list_head const *)vm->global_link.next; vm = (struct i915_address_space *)__mptr___0 + 0xffffffffffffff50UL; ldv_48067: ; if ((unsigned long )(& vm->global_link) != (unsigned long )(& dev_priv->vm_list)) { goto ldv_48066; } else { } if ((int )lists_empty) { return (-28); } else { } trace_i915_gem_evict_everything(dev); ret = i915_gpu_idle(dev); if (ret != 0) { return (ret); } else { } i915_gem_retire_requests(dev); __mptr___1 = (struct list_head const *)dev_priv->vm_list.next; vm = (struct i915_address_space *)__mptr___1 + 0xffffffffffffff50UL; __mptr___2 = (struct list_head const *)vm->global_link.next; v = (struct i915_address_space *)__mptr___2 + 0xffffffffffffff50UL; goto ldv_48078; ldv_48077: tmp___2 = i915_gem_evict_vm(vm, 0); __ret_warn_on = tmp___2 != 0; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_evict.c", 281, "WARN_ON(i915_gem_evict_vm(vm, false))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); vm = v; __mptr___3 = (struct list_head const *)v->global_link.next; v = (struct i915_address_space *)__mptr___3 + 0xffffffffffffff50UL; ldv_48078: ; if ((unsigned long )(& vm->global_link) != (unsigned long )(& dev_priv->vm_list)) { goto ldv_48077; } else { } return (0); } } bool ldv_queue_work_on_249(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_250(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_251(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_252(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_253(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void __list_splice(struct list_head const *list , struct list_head *prev , struct list_head *next ) { struct list_head *first ; struct list_head *last ; { first = list->next; last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; return; } } __inline static void list_splice(struct list_head const *list , struct list_head *head ) { int tmp ; { tmp = list_empty(list); if (tmp == 0) { __list_splice(list, head, head->next); } else { } return; } } __inline static unsigned long arch_local_save_flags___5(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; extern struct cpuinfo_x86 boot_cpu_data ; __inline static bool static_key_false___3(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int rcu_read_lock_sched_held___3(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___5(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } bool ldv_queue_work_on_263(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_265(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_264(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_267(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_266(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void *idr_find___0(struct idr *idr , int id ) { struct idr_layer *hint ; struct idr_layer *________p1 ; struct idr_layer *_________p1 ; union __anonunion___u_168___0 __u ; int tmp ; struct idr_layer *________p1___0 ; struct idr_layer *_________p1___0 ; union __anonunion___u_170___0 __u___0 ; int tmp___0 ; void *tmp___1 ; { __read_once_size((void const volatile *)(& idr->hint), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); hint = ________p1; if ((unsigned long )hint != (unsigned long )((struct idr_layer *)0) && (id & -256) == hint->prefix) { __read_once_size((void const volatile *)(& hint->ary) + ((unsigned long )id & 255UL), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___0 = debug_lockdep_rcu_enabled(); return ((void *)________p1___0); } else { } tmp___1 = idr_find_slowpath(idr, id); return (tmp___1); } } __inline static void kref_get___3(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___4(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___4(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___4(kref, 1U, release); return (tmp); } } extern int __get_user_bad(void) ; extern unsigned long copy_user_enhanced_fast_string(void * , void const * , unsigned int ) ; extern unsigned long copy_user_generic_string(void * , void const * , unsigned int ) ; extern unsigned long copy_user_generic_unrolled(void * , void const * , unsigned int ) ; __inline static unsigned long copy_user_generic(void *to , void const *from , unsigned int len ) { unsigned int ret ; { __asm__ volatile ("661:\n\tcall %P4\n662:\n.skip -((((6651f-6641f) ^ (((6651f-6641f) ^ (6652f-6642f)) & -(-((6651f-6641f) - (6652f-6642f))))) - (662b-661b)) > 0) * (((6651f-6641f) ^ (((6651f-6641f) ^ (6652f-6642f)) & -(-((6651f-6641f) - (6652f-6642f))))) - (662b-661b)), 0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 3*32+16)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n .long 661b - .\n .long 6642f - .\n .word ( 9*32+ 9)\n .byte 663b-661b\n .byte 6652f-6642f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\tcall %P5\n6651:\n\t6642:\n\tcall %P6\n6652:\n\t.popsection": "=a" (ret), "=D" (to), "=S" (from), "=d" (len): [old] "i" (& copy_user_generic_unrolled), [new1] "i" (& copy_user_generic_string), [new2] "i" (& copy_user_enhanced_fast_string), "1" (to), "2" (from), "3" (len): "memory", "rcx", "r8", "r9", "r10", "r11"); return ((unsigned long )ret); } } __inline static int __copy_from_user_nocheck(void *dst , void const *src , unsigned int size ) { int ret ; unsigned long tmp ; long tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; { ret = 0; tmp = copy_user_generic(dst, src, size); return ((int )tmp); switch (size) { case 1U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %2,%b1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorb %b1,%b1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret), "=q" (*((u8 *)dst)): "m" (*((struct __large_struct *)src)), "i" (1), "0" (ret)); return (ret); case 2U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %2,%w1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorw %w1,%w1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret), "=r" (*((u16 *)dst)): "m" (*((struct __large_struct *)src)), "i" (2), "0" (ret)); return (ret); case 4U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %2,%k1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorl %k1,%k1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret), "=r" (*((u32 *)dst)): "m" (*((struct __large_struct *)src)), "i" (4), "0" (ret)); return (ret); case 8U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %2,%1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret), "=r" (*((u64 *)dst)): "m" (*((struct __large_struct *)src)), "i" (8), "0" (ret)); return (ret); case 10U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %2,%1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret), "=r" (*((u64 *)dst)): "m" (*((struct __large_struct *)src)), "i" (10), "0" (ret)); tmp___0 = ldv__builtin_expect(ret != 0, 0L); if (tmp___0 != 0L) { return (ret); } else { } __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %2,%w1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorw %w1,%w1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret), "=r" (*((u16 *)dst + 8U)): "m" (*((struct __large_struct *)src + 8U)), "i" (2), "0" (ret)); return (ret); case 16U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %2,%1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret), "=r" (*((u64 *)dst)): "m" (*((struct __large_struct *)src)), "i" (16), "0" (ret)); tmp___1 = ldv__builtin_expect(ret != 0, 0L); if (tmp___1 != 0L) { return (ret); } else { } __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %2,%1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret), "=r" (*((u64 *)dst + 8U)): "m" (*((struct __large_struct *)src + 8U)), "i" (8), "0" (ret)); return (ret); default: tmp___2 = copy_user_generic(dst, src, size); return ((int )tmp___2); } } } __inline static int __copy_to_user_nocheck(void *dst , void const *src , unsigned int size ) { int ret ; unsigned long tmp ; long tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; { ret = 0; tmp = copy_user_generic(dst, src, size); return ((int )tmp); switch (size) { case 1U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret): "iq" (*((u8 *)src)), "m" (*((struct __large_struct *)dst)), "i" (1), "0" (ret)); return (ret); case 2U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret): "ir" (*((u16 *)src)), "m" (*((struct __large_struct *)dst)), "i" (2), "0" (ret)); return (ret); case 4U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret): "ir" (*((u32 *)src)), "m" (*((struct __large_struct *)dst)), "i" (4), "0" (ret)); return (ret); case 8U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret): "er" (*((u64 *)src)), "m" (*((struct __large_struct *)dst)), "i" (8), "0" (ret)); return (ret); case 10U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret): "er" (*((u64 *)src)), "m" (*((struct __large_struct *)dst)), "i" (10), "0" (ret)); tmp___0 = ldv__builtin_expect(ret != 0, 0L); if (tmp___0 != 0L) { return (ret); } else { } __asm__ volatile ("": : : "memory"); __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret): "ir" (*((u16 *)src + 4UL)), "m" (*((struct __large_struct *)dst + 4U)), "i" (2), "0" (ret)); return (ret); case 16U: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret): "er" (*((u64 *)src)), "m" (*((struct __large_struct *)dst)), "i" (16), "0" (ret)); tmp___1 = ldv__builtin_expect(ret != 0, 0L); if (tmp___1 != 0L) { return (ret); } else { } __asm__ volatile ("": : : "memory"); __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (ret): "er" (*((u64 *)src + 1UL)), "m" (*((struct __large_struct *)dst + 1U)), "i" (8), "0" (ret)); return (ret); default: tmp___2 = copy_user_generic(dst, src, size); return ((int )tmp___2); } } } __inline static int __copy_to_user(void *dst , void const *src , unsigned int size ) { int tmp ; { __might_fault("./arch/x86/include/asm/uaccess_64.h", 147); tmp = __copy_to_user_nocheck(dst, src, size); return (tmp); } } __inline static int __copy_from_user_inatomic(void *dst , void const *src , unsigned int size ) { int tmp ; { tmp = __copy_from_user_nocheck(dst, src, size); return (tmp); } } __inline static int __copy_to_user_inatomic(void *dst , void const *src , unsigned int size ) { int tmp ; { tmp = __copy_to_user_nocheck(dst, src, size); return (tmp); } } __inline static int fault_in_multipages_readable(char const *uaddr , int size ) { char volatile c ; int ret ; char const *end ; long tmp ; int __gu_err ; unsigned long __gu_val ; int tmp___0 ; int __gu_err___0 ; unsigned long __gu_val___0 ; int tmp___1 ; { ret = 0; end = uaddr + ((unsigned long )size + 0xffffffffffffffffUL); tmp = ldv__builtin_expect(size == 0, 0L); if (tmp != 0L) { return (ret); } else { } goto ldv_34231; ldv_34230: __gu_err = 0; switch (1UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %2,%b1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorb %b1,%b1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err), "=q" (__gu_val): "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__gu_err)); goto ldv_34224; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %2,%w1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorw %w1,%w1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err), "=r" (__gu_val): "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__gu_err)); goto ldv_34224; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %2,%k1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorl %k1,%k1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err), "=r" (__gu_val): "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__gu_err)); goto ldv_34224; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %2,%1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err), "=r" (__gu_val): "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__gu_err)); goto ldv_34224; default: tmp___0 = __get_user_bad(); __gu_val = (unsigned long )tmp___0; } ldv_34224: c = (char )__gu_val; ret = __gu_err; if (ret != 0) { return (ret); } else { } uaddr = uaddr + 4096UL; ldv_34231: ; if ((unsigned long )uaddr <= (unsigned long )end) { goto ldv_34230; } else { } if ((((unsigned long )uaddr ^ (unsigned long )end) & 0xfffffffffffff000UL) == 0UL) { __gu_err___0 = 0; switch (1UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %2,%b1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorb %b1,%b1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err___0), "=q" (__gu_val___0): "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__gu_err___0)); goto ldv_34236; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %2,%w1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorw %w1,%w1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err___0), "=r" (__gu_val___0): "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__gu_err___0)); goto ldv_34236; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %2,%k1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorl %k1,%k1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err___0), "=r" (__gu_val___0): "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__gu_err___0)); goto ldv_34236; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %2,%1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err___0), "=r" (__gu_val___0): "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__gu_err___0)); goto ldv_34236; default: tmp___1 = __get_user_bad(); __gu_val___0 = (unsigned long )tmp___1; } ldv_34236: c = (char )__gu_val___0; ret = __gu_err___0; } else { } return (ret); } } int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring ) ; __inline static struct drm_i915_gem_request *intel_ring_get_request(struct intel_engine_cs *ring ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )ring->outstanding_lazy_request == (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/intel_ringbuffer.h"), "i" (450), "i" (12UL)); ldv_40718: ; goto ldv_40718; } else { } return (ring->outstanding_lazy_request); } } int intel_lr_context_deferred_create(struct intel_context *ctx , struct intel_engine_cs *ring ) ; __inline static void *io_mapping_map_atomic_wc(struct io_mapping *mapping , unsigned long offset ) { { __preempt_count_add(1); __asm__ volatile ("": : : "memory"); pagefault_disable(); return ((void *)mapping + offset); } } extern void intel_gtt_chipset_flush(void) ; __inline static void drm_gem_object_reference___1(struct drm_gem_object *obj ) { { kref_get___3(& obj->refcount); return; } } __inline static void drm_gem_object_unreference___4(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___4(& obj->refcount, & drm_gem_object_free); } else { } return; } } __inline static struct drm_i915_gem_request *i915_gem_request_reference___0(struct drm_i915_gem_request *req ) { { if ((unsigned long )req != (unsigned long )((struct drm_i915_gem_request *)0)) { kref_get___3(& req->ref); } else { } return (req); } } __inline static void i915_gem_request_unreference(struct drm_i915_gem_request *req ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = mutex_is_locked(& ((req->ring)->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2216, "WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); kref_put___4(& req->ref, & i915_gem_request_free); return; } } __inline static void i915_gem_request_assign(struct drm_i915_gem_request **pdst , struct drm_i915_gem_request *src ) { { if ((unsigned long )src != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_reference___0(src); } else { } if ((unsigned long )*pdst != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_unreference(*pdst); } else { } *pdst = src; return; } } void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj , struct intel_engine_cs *ring , enum fb_op_origin origin ) ; struct tracepoint __tracepoint_i915_gem_object_change_domain ; __inline static void trace_i915_gem_object_change_domain(struct drm_i915_gem_object *obj , u32 old_read , u32 old_write ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_332 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_334 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___3(& __tracepoint_i915_gem_object_change_domain.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_change_domain.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___3(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 279, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45561: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_object * , u32 , u32 ))it_func))(__data, obj, old_read, old_write); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45561; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_change_domain.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___3(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 279, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_ring_dispatch ; __inline static void trace_i915_gem_ring_dispatch(struct drm_i915_gem_request *req , u32 flags ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_372 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_374 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___3(& __tracepoint_i915_gem_ring_dispatch.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_ring_dispatch.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___3(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 475, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46139: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_request * , u32 ))it_func))(__data, req, flags); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46139; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_ring_dispatch.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___3(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 475, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } void i915_gem_execbuffer_move_to_active(struct list_head *vmas , struct intel_engine_cs *ring ) ; void i915_gem_execbuffer_retire_commands(struct drm_device *dev , struct drm_file *file , struct intel_engine_cs *ring , struct drm_i915_gem_object *obj ) ; int i915_gem_ringbuffer_submission(struct drm_device *dev , struct drm_file *file , struct intel_engine_cs *ring , struct intel_context *ctx , struct drm_i915_gem_execbuffer2 *args , struct list_head *vmas , struct drm_i915_gem_object *batch_obj , u64 exec_start , u32 dispatch_flags ) ; int i915_gem_execbuffer(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_execbuffer2(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_vma_bind(struct i915_vma *vma , enum i915_cache_level cache_level , u32 flags ) ; __inline static struct page *i915_gem_object_get_page___0(struct drm_i915_gem_object *obj , int n ) { int __ret_warn_on ; long tmp ; long tmp___0 ; struct scatterlist *tmp___1 ; int tmp___2 ; long tmp___3 ; int tmp___4 ; struct page *tmp___5 ; { __ret_warn_on = (size_t )n >= obj->base.size >> 12; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2754, "WARN_ON(n >= obj->base.size >> PAGE_SHIFT)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return ((struct page *)0); } else { } if (obj->get_page.last > n) { obj->get_page.sg = (obj->pages)->sgl; obj->get_page.last = 0; } else { } goto ldv_47336; ldv_47335: tmp___1 = obj->get_page.sg; obj->get_page.sg = obj->get_page.sg + 1; tmp___2 = __sg_page_count(tmp___1); obj->get_page.last = obj->get_page.last + tmp___2; tmp___3 = ldv__builtin_expect((long )((int )(obj->get_page.sg)->page_link) & 1L, 0L); if (tmp___3 != 0L) { obj->get_page.sg = (struct scatterlist *)((obj->get_page.sg)->page_link & 0xfffffffffffffffcUL); } else { } ldv_47336: tmp___4 = __sg_page_count(obj->get_page.sg); if (obj->get_page.last + tmp___4 <= n) { goto ldv_47335; } else { } tmp___5 = sg_page___2(obj->get_page.sg); return ((struct page *)-24189255811072L + ((unsigned long )(((long )tmp___5 + 24189255811072L) / 64L) + (unsigned long )(n - obj->get_page.last))); } } int i915_gem_object_sync(struct drm_i915_gem_object *obj , struct intel_engine_cs *to ) ; int i915_gem_object_get_fence(struct drm_i915_gem_object *obj ) ; int i915_gem_object_put_fence(struct drm_i915_gem_object *obj ) ; bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj ) ; void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj ) ; void i915_gem_retire_requests_ring(struct intel_engine_cs *ring ) ; bool i915_gem_clflush_object(struct drm_i915_gem_object *obj , bool force ) ; unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o , struct i915_address_space *vm ) ; struct i915_vma *i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj , struct i915_address_space *vm ) ; __inline static void i915_gem_context_reference___0(struct intel_context *ctx ) { { kref_get___3(& ctx->ref); return; } } __inline static void i915_gem_context_unreference___0(struct intel_context *ctx ) { { kref_put___4(& ctx->ref, & i915_gem_context_free); return; } } __inline static void i915_gem_chipset_flush(struct drm_device *dev ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { intel_gtt_chipset_flush(); } else { } return; } } __inline static void *to_user_ptr(u64 address ) { { return ((void *)address); } } static struct eb_vmas *eb_create(struct drm_i915_gem_execbuffer2 *args ) { struct eb_vmas *eb ; unsigned int size ; void *tmp ; unsigned int size___0 ; unsigned int count ; void *tmp___0 ; { eb = (struct eb_vmas *)0; if ((args->flags & 4096ULL) != 0ULL) { size = args->buffer_count; size = size * 8U; size = size + 24U; tmp = kmalloc((size_t )size, 529104U); eb = (struct eb_vmas *)tmp; } else { } if ((unsigned long )eb == (unsigned long )((struct eb_vmas *)0)) { size___0 = args->buffer_count; count = 256U; goto ldv_47992; ldv_47991: count = count >> 1; ldv_47992: ; if (size___0 * 2U < count) { goto ldv_47991; } else { } tmp___0 = kzalloc(((unsigned long )count + 3UL) * 8UL, 524496U); eb = (struct eb_vmas *)tmp___0; if ((unsigned long )eb == (unsigned long )((struct eb_vmas *)0)) { return (eb); } else { } eb->and = (int )(count - 1U); } else { eb->and = (int )(- args->buffer_count); } INIT_LIST_HEAD(& eb->vmas); return (eb); } } static void eb_reset(struct eb_vmas *eb ) { { if (eb->and >= 0) { memset((void *)(& eb->__annonCompField85.buckets), 0, (unsigned long )(eb->and + 1) * 8UL); } else { } return; } } static int eb_lookup_vmas(struct eb_vmas *eb , struct drm_i915_gem_exec_object2 *exec , struct drm_i915_gem_execbuffer2 const *args , struct i915_address_space *vm , struct drm_file *file ) { struct drm_i915_gem_object *obj ; struct list_head objects ; int i ; int ret ; struct drm_gem_object const *__mptr ; void *tmp ; long tmp___0 ; long tmp___1 ; int tmp___2 ; struct i915_vma *vma ; struct list_head const *__mptr___0 ; long tmp___3 ; long tmp___4 ; bool tmp___5 ; uint32_t handle ; int tmp___6 ; struct list_head const *__mptr___1 ; int tmp___7 ; { INIT_LIST_HEAD(& objects); spin_lock(& file->table_lock); i = 0; goto ldv_48013; ldv_48012: tmp = idr_find___0(& file->object_idr, (int )(exec + (unsigned long )i)->handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { spin_unlock(& file->table_lock); tmp___0 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("eb_lookup_vmas", "Invalid object handle %d at index %d\n", (exec + (unsigned long )i)->handle, i); } else { } ret = -2; goto err; } else { } tmp___2 = list_empty((struct list_head const *)(& obj->obj_exec_link)); if (tmp___2 == 0) { spin_unlock(& file->table_lock); tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("eb_lookup_vmas", "Object %p [handle %d, index %d] appears more than once in object list\n", obj, (exec + (unsigned long )i)->handle, i); } else { } ret = -22; goto err; } else { } drm_gem_object_reference___1(& obj->base); list_add_tail(& obj->obj_exec_link, & objects); i = i + 1; ldv_48013: ; if ((unsigned int )i < (unsigned int )args->buffer_count) { goto ldv_48012; } else { } spin_unlock(& file->table_lock); i = 0; goto ldv_48020; ldv_48019: __mptr___0 = (struct list_head const *)objects.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffe88UL; vma = i915_gem_obj_lookup_or_create_vma(obj, vm); tmp___5 = IS_ERR((void const *)vma); if ((int )tmp___5) { tmp___3 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("eb_lookup_vmas", "Failed to lookup VMA\n"); } else { } tmp___4 = PTR_ERR((void const *)vma); ret = (int )tmp___4; goto err; } else { } list_add_tail(& vma->exec_list, & eb->vmas); list_del_init(& obj->obj_exec_link); vma->exec_entry = exec + (unsigned long )i; if (eb->and < 0) { eb->__annonCompField85.lut[i] = vma; } else { handle = ((unsigned long long )args->flags & 4096ULL) != 0ULL ? (__u32 )i : (exec + (unsigned long )i)->handle; vma->exec_handle = (unsigned long )handle; hlist_add_head(& vma->exec_node, (struct hlist_head *)(& eb->__annonCompField85.buckets) + (unsigned long )((uint32_t )eb->and & handle)); } i = i + 1; ldv_48020: tmp___6 = list_empty((struct list_head const *)(& objects)); if (tmp___6 == 0) { goto ldv_48019; } else { } return (0); err: ; goto ldv_48025; ldv_48024: __mptr___1 = (struct list_head const *)objects.next; obj = (struct drm_i915_gem_object *)__mptr___1 + 0xfffffffffffffe88UL; list_del_init(& obj->obj_exec_link); drm_gem_object_unreference___4(& obj->base); ldv_48025: tmp___7 = list_empty((struct list_head const *)(& objects)); if (tmp___7 == 0) { goto ldv_48024; } else { } return (ret); } } static struct i915_vma *eb_get_vma(struct eb_vmas *eb , unsigned long handle ) { struct hlist_head *head ; struct hlist_node *node ; struct i915_vma *vma ; struct hlist_node const *__mptr ; { if (eb->and < 0) { if ((unsigned long )(- eb->and) <= handle) { return ((struct i915_vma *)0); } else { } return (eb->__annonCompField85.lut[handle]); } else { head = (struct hlist_head *)(& eb->__annonCompField85.buckets) + ((unsigned long )eb->and & handle); node = head->first; goto ldv_48037; ldv_48036: __mptr = (struct hlist_node const *)node; vma = (struct i915_vma *)__mptr + 0xffffffffffffff38UL; if (vma->exec_handle == handle) { return (vma); } else { } node = node->next; ldv_48037: ; if ((unsigned long )node != (unsigned long )((struct hlist_node *)0)) { goto ldv_48036; } else { } return ((struct i915_vma *)0); } } } static void i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma ) { struct drm_i915_gem_exec_object2 *entry ; struct drm_i915_gem_object *obj ; bool tmp ; int tmp___0 ; { obj = vma->obj; tmp = drm_mm_node_allocated(& vma->node); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } entry = vma->exec_entry; if ((entry->flags & 1073741824ULL) != 0ULL) { i915_gem_object_unpin_fence(obj); } else { } if ((entry->flags & 0xffffffff80000000ULL) != 0ULL) { vma->pin_count = (unsigned char )((int )vma->pin_count - 1); } else { } entry->flags = entry->flags & 1073741823ULL; return; } } static void eb_destroy(struct eb_vmas *eb ) { struct i915_vma *vma ; struct list_head const *__mptr ; int tmp ; { goto ldv_48051; ldv_48050: __mptr = (struct list_head const *)eb->vmas.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff48UL; list_del_init(& vma->exec_list); i915_gem_execbuffer_unreserve_vma(vma); drm_gem_object_unreference___4(& (vma->obj)->base); ldv_48051: tmp = list_empty((struct list_head const *)(& eb->vmas)); if (tmp == 0) { goto ldv_48050; } else { } kfree((void const *)eb); return; } } __inline static int use_cpu_reloc(struct drm_i915_gem_object *obj ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)obj->base.dev); return (((unsigned int )*((unsigned char *)__p + 46UL) != 0U || obj->base.write_domain == 1U) || (unsigned int )*((unsigned char *)obj + 410UL) != 0U); } } static int relocate_entry_cpu(struct drm_i915_gem_object *obj , struct drm_i915_gem_relocation_entry *reloc , uint64_t target_offset ) { struct drm_device *dev ; uint32_t page_offset___0 ; uint64_t delta ; char *vaddr ; int ret ; struct page *tmp ; void *tmp___0 ; struct page *tmp___1 ; void *tmp___2 ; struct drm_i915_private *__p ; { dev = obj->base.dev; page_offset___0 = (uint32_t )reloc->offset & 4095U; delta = (uint64_t )reloc->delta + target_offset; ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret != 0) { return (ret); } else { } tmp = i915_gem_object_get_page___0(obj, (int )(reloc->offset >> 12)); tmp___0 = kmap_atomic(tmp); vaddr = (char *)tmp___0; *((uint32_t *)vaddr + (unsigned long )page_offset___0) = (unsigned int )delta; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { page_offset___0 = (page_offset___0 + 4U) & 4095U; if (page_offset___0 == 0U) { __kunmap_atomic((void *)vaddr); tmp___1 = i915_gem_object_get_page___0(obj, (int )((reloc->offset + 4ULL) >> 12)); tmp___2 = kmap_atomic(tmp___1); vaddr = (char *)tmp___2; } else { } *((uint32_t *)vaddr + (unsigned long )page_offset___0) = (unsigned int )(delta >> 32ULL); } else { } __kunmap_atomic((void *)vaddr); return (0); } } static int relocate_entry_gtt(struct drm_i915_gem_object *obj , struct drm_i915_gem_relocation_entry *reloc , uint64_t target_offset ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint64_t delta ; uint64_t offset ; void *reloc_page ; int ret ; unsigned long tmp ; struct drm_i915_private *__p ; { dev = obj->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; delta = (uint64_t )reloc->delta + target_offset; ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret != 0) { return (ret); } else { } ret = i915_gem_object_put_fence(obj); if (ret != 0) { return (ret); } else { } tmp = i915_gem_obj_ggtt_offset(obj); offset = (uint64_t )tmp; offset = reloc->offset + offset; reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, (unsigned long )offset & 0xfffffffffffff000UL); iowrite32((unsigned int )delta, reloc_page + ((unsigned long )offset & 4095UL)); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { offset = offset + 4ULL; if (((unsigned long )offset & 4095UL) == 0UL) { __kunmap_atomic(reloc_page); reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, (unsigned long )offset); } else { } iowrite32((unsigned int )(delta >> 32ULL), reloc_page + ((unsigned long )offset & 4095UL)); } else { } __kunmap_atomic(reloc_page); return (0); } } static void clflush_write32(void *addr , uint32_t value ) { { drm_clflush_virt_range(addr, 4UL); *((uint32_t *)addr) = value; drm_clflush_virt_range(addr, 4UL); return; } } static int relocate_entry_clflush(struct drm_i915_gem_object *obj , struct drm_i915_gem_relocation_entry *reloc , uint64_t target_offset ) { struct drm_device *dev ; uint32_t page_offset___0 ; uint64_t delta ; char *vaddr ; int ret ; struct page *tmp ; void *tmp___0 ; struct page *tmp___1 ; void *tmp___2 ; struct drm_i915_private *__p ; { dev = obj->base.dev; page_offset___0 = (uint32_t )reloc->offset & 4095U; delta = (uint64_t )((int )reloc->delta) + target_offset; ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret != 0) { return (ret); } else { } tmp = i915_gem_object_get_page___0(obj, (int )(reloc->offset >> 12)); tmp___0 = kmap_atomic(tmp); vaddr = (char *)tmp___0; clflush_write32((void *)vaddr + (unsigned long )page_offset___0, (unsigned int )delta); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { page_offset___0 = (page_offset___0 + 4U) & 4095U; if (page_offset___0 == 0U) { __kunmap_atomic((void *)vaddr); tmp___1 = i915_gem_object_get_page___0(obj, (int )((reloc->offset + 4ULL) >> 12)); tmp___2 = kmap_atomic(tmp___1); vaddr = (char *)tmp___2; } else { } clflush_write32((void *)vaddr + (unsigned long )page_offset___0, (unsigned int )(delta >> 32ULL)); } else { } __kunmap_atomic((void *)vaddr); return (0); } } static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj , struct eb_vmas *eb , struct drm_i915_gem_relocation_entry *reloc ) { struct drm_device *dev ; struct drm_gem_object *target_obj ; struct drm_i915_gem_object *target_i915_obj ; struct i915_vma *target_vma ; uint64_t target_offset ; int ret ; long tmp ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; struct drm_i915_private *__p ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; struct drm_i915_private *__p___0 ; long tmp___10 ; long tmp___11 ; long tmp___12 ; struct task_struct *tmp___13 ; bool __warned___0 ; int __ret_warn_once___0 ; int __ret_warn_on___0 ; long tmp___14 ; long tmp___15 ; long tmp___16 ; int tmp___17 ; int tmp___18 ; { dev = obj->base.dev; target_vma = eb_get_vma(eb, (unsigned long )reloc->target_handle); tmp = ldv__builtin_expect((unsigned long )target_vma == (unsigned long )((struct i915_vma *)0), 0L); if (tmp != 0L) { return (-2); } else { } target_i915_obj = target_vma->obj; target_obj = & (target_vma->obj)->base; target_offset = target_vma->node.start; __p = to_i915((struct drm_device const *)dev); tmp___4 = ldv__builtin_expect((long )((unsigned int )((unsigned char )__p->info.gen) == 6U && reloc->write_domain == 16U), 0L); if (tmp___4 != 0L) { ret = i915_vma_bind(target_vma, (enum i915_cache_level )target_i915_obj->cache_level, 4U); __ret_warn_once = ret != 0; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_execbuffer.c", 407, "Unexpected failure to bind target VMA!"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } tmp___3 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___3 != 0L) { return (ret); } else { } } else { } tmp___6 = ldv__builtin_expect((reloc->write_domain & (reloc->write_domain - 1U)) != 0U, 0L); if (tmp___6 != 0L) { tmp___5 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer_relocate_entry", "reloc with multiple write domains: obj %p target %d offset %d read %08x write %08x", obj, reloc->target_handle, (int )reloc->offset, reloc->read_domains, reloc->write_domain); } else { } return (-22); } else { } tmp___8 = ldv__builtin_expect(((reloc->write_domain | reloc->read_domains) & 4294967233U) != 0U, 0L); if (tmp___8 != 0L) { tmp___7 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer_relocate_entry", "reloc with read/write non-GPU domains: obj %p target %d offset %d read %08x write %08x", obj, reloc->target_handle, (int )reloc->offset, reloc->read_domains, reloc->write_domain); } else { } return (-22); } else { } target_obj->pending_read_domains = target_obj->pending_read_domains | reloc->read_domains; target_obj->pending_write_domain = target_obj->pending_write_domain | reloc->write_domain; if (reloc->presumed_offset == target_offset) { return (0); } else { } __p___0 = to_i915((struct drm_device const *)dev); tmp___10 = ldv__builtin_expect(reloc->offset > (unsigned long long )(obj->base.size - ((unsigned int )((unsigned char )__p___0->info.gen) > 7U ? 8UL : 4UL)), 0L); if (tmp___10 != 0L) { tmp___9 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer_relocate_entry", "Relocation beyond object bounds: obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, (int )reloc->offset, (int )obj->base.size); } else { } return (-22); } else { } tmp___12 = ldv__builtin_expect((reloc->offset & 3ULL) != 0ULL, 0L); if (tmp___12 != 0L) { tmp___11 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer_relocate_entry", "Relocation not 4-byte aligned: obj %p target %d offset %d.\n", obj, reloc->target_handle, (int )reloc->offset); } else { } return (-22); } else { } if ((unsigned int )*((unsigned char *)obj + 408UL) != 0U) { tmp___13 = get_current(); if (tmp___13->pagefault_disabled != 0) { return (-14); } else { } } else { } tmp___18 = use_cpu_reloc(obj); if (tmp___18 != 0) { ret = relocate_entry_cpu(obj, reloc, target_offset); } else if ((unsigned int )*((unsigned char *)obj + 410UL) != 0U) { ret = relocate_entry_gtt(obj, reloc, target_offset); } else { tmp___17 = constant_test_bit(19L, (unsigned long const volatile *)(& boot_cpu_data.x86_capability)); if (tmp___17 != 0) { ret = relocate_entry_clflush(obj, reloc, target_offset); } else { __ret_warn_once___0 = 1; tmp___16 = ldv__builtin_expect(__ret_warn_once___0 != 0, 0L); if (tmp___16 != 0L) { __ret_warn_on___0 = ! __warned___0; tmp___14 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___14 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_execbuffer.c", 472, "Impossible case in relocation handling\n"); } else { } tmp___15 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___15 != 0L) { __warned___0 = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once___0 != 0, 0L); ret = -19; } } if (ret != 0) { return (ret); } else { } reloc->presumed_offset = target_offset; return (0); } } static int i915_gem_execbuffer_relocate_vma(struct i915_vma *vma , struct eb_vmas *eb ) { struct drm_i915_gem_relocation_entry stack_reloc[16U] ; struct drm_i915_gem_relocation_entry *user_relocs ; struct drm_i915_gem_exec_object2 *entry ; int remain ; int ret ; void *tmp ; struct drm_i915_gem_relocation_entry *r ; int count ; int tmp___0 ; u64 offset ; int tmp___1 ; { entry = vma->exec_entry; tmp = to_user_ptr(entry->relocs_ptr); user_relocs = (struct drm_i915_gem_relocation_entry *)tmp; remain = (int )entry->relocation_count; goto ldv_48168; ldv_48167: r = (struct drm_i915_gem_relocation_entry *)(& stack_reloc); count = remain; if ((unsigned int )count > 16U) { count = 16; } else { } remain = remain - count; tmp___0 = __copy_from_user_inatomic((void *)r, (void const *)user_relocs, (unsigned int )((unsigned long )count) * 32U); if (tmp___0 != 0) { return (-14); } else { } ldv_48165: offset = r->presumed_offset; ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); if (ret != 0) { return (ret); } else { } if (r->presumed_offset != offset) { tmp___1 = __copy_to_user_inatomic((void *)(& user_relocs->presumed_offset), (void const *)(& r->presumed_offset), 8U); if (tmp___1 != 0) { return (-14); } else { } } else { } user_relocs = user_relocs + 1; r = r + 1; count = count - 1; if (count != 0) { goto ldv_48165; } else { } ldv_48168: ; if (remain != 0) { goto ldv_48167; } else { } return (0); } } static int i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma , struct eb_vmas *eb , struct drm_i915_gem_relocation_entry *relocs ) { struct drm_i915_gem_exec_object2 const *entry ; int i ; int ret ; { entry = (struct drm_i915_gem_exec_object2 const *)vma->exec_entry; i = 0; goto ldv_48179; ldv_48178: ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, relocs + (unsigned long )i); if (ret != 0) { return (ret); } else { } i = i + 1; ldv_48179: ; if ((unsigned int )i < (unsigned int )entry->relocation_count) { goto ldv_48178; } else { } return (0); } } static int i915_gem_execbuffer_relocate(struct eb_vmas *eb ) { struct i915_vma *vma ; int ret ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ret = 0; pagefault_disable(); __mptr = (struct list_head const *)eb->vmas.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff48UL; goto ldv_48192; ldv_48191: ret = i915_gem_execbuffer_relocate_vma(vma, eb); if (ret != 0) { goto ldv_48190; } else { } __mptr___0 = (struct list_head const *)vma->exec_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff48UL; ldv_48192: ; if ((unsigned long )(& vma->exec_list) != (unsigned long )(& eb->vmas)) { goto ldv_48191; } else { } ldv_48190: pagefault_enable(); return (ret); } } static bool only_mappable_for_reloc(unsigned int flags ) { { return ((flags & 536870913U) == 536870912U); } } static int i915_gem_execbuffer_reserve_vma(struct i915_vma *vma , struct intel_engine_cs *ring , bool *need_reloc ) { struct drm_i915_gem_object *obj ; struct drm_i915_gem_exec_object2 *entry ; uint64_t flags ; int ret ; bool tmp ; int tmp___0 ; bool tmp___1 ; bool tmp___2 ; { obj = vma->obj; entry = vma->exec_entry; flags = 16ULL; if ((entry->flags & 2ULL) != 0ULL) { flags = flags | 4ULL; } else { } tmp = drm_mm_node_allocated(& vma->node); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { if ((entry->flags & 536870912ULL) != 0ULL) { flags = flags | 5ULL; } else { } if ((entry->flags & 268435456ULL) != 0ULL) { flags = flags | 262152ULL; } else { } } else { } ret = i915_gem_object_pin(obj, vma->vm, (uint32_t )entry->alignment, flags); if (ret == -28 || ret == -7) { tmp___1 = only_mappable_for_reloc((unsigned int )entry->flags); if ((int )tmp___1) { ret = i915_gem_object_pin(obj, vma->vm, (uint32_t )entry->alignment, flags & 0xfffffffffffffffeULL); } else { } } else { } if (ret != 0) { return (ret); } else { } entry->flags = entry->flags | 0xffffffff80000000ULL; if ((int )entry->flags & 1) { ret = i915_gem_object_get_fence(obj); if (ret != 0) { return (ret); } else { } tmp___2 = i915_gem_object_pin_fence(obj); if ((int )tmp___2) { entry->flags = entry->flags | 1073741824ULL; } else { } } else { } if (entry->offset != vma->node.start) { entry->offset = vma->node.start; *need_reloc = 1; } else { } if ((entry->flags & 4ULL) != 0ULL) { obj->base.pending_read_domains = 2U; obj->base.pending_write_domain = 2U; } else { } return (0); } } static bool need_reloc_mappable(struct i915_vma *vma ) { struct drm_i915_gem_exec_object2 *entry ; bool tmp ; int tmp___0 ; struct drm_i915_private *__p ; { entry = vma->exec_entry; if (entry->relocation_count == 0U) { return (0); } else { } tmp = i915_is_ggtt(vma->vm); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } __p = to_i915((struct drm_device const *)(vma->obj)->base.dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { return (0); } else { } if ((vma->obj)->base.write_domain == 1U) { return (0); } else { } return (1); } } static bool eb_vma_misplaced(struct i915_vma *vma ) { struct drm_i915_gem_exec_object2 *entry ; struct drm_i915_gem_object *obj ; int __ret_warn_on ; bool tmp ; int tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; { entry = vma->exec_entry; obj = vma->obj; if ((entry->flags & 536870912ULL) != 0ULL) { tmp = i915_is_ggtt(vma->vm); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_execbuffer.c", 660, "WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !i915_is_ggtt(vma->vm))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if (entry->alignment != 0ULL && (vma->node.start & (entry->alignment - 1ULL)) != 0ULL) { return (1); } else { } if ((entry->flags & 268435456ULL) != 0ULL && vma->node.start <= 262143ULL) { return (1); } else { } if ((entry->flags & 536870912ULL) != 0ULL && (unsigned int )*((unsigned char *)obj + 410UL) == 0U) { tmp___3 = only_mappable_for_reloc((unsigned int )entry->flags); if ((int )tmp___3 != 0) { tmp___4 = 0; } else { tmp___4 = 1; } return ((bool )tmp___4); } else { } return (0); } } static int i915_gem_execbuffer_reserve(struct intel_engine_cs *ring , struct list_head *vmas , bool *need_relocs ) { struct drm_i915_gem_object *obj ; struct i915_vma *vma ; struct i915_address_space *vm ; struct list_head ordered_vmas ; bool has_fenced_gpu_access ; struct drm_i915_private *__p ; int retry ; struct list_head const *__mptr ; struct drm_i915_gem_exec_object2 *entry ; bool need_fence ; bool need_mappable ; struct list_head const *__mptr___0 ; bool tmp ; int tmp___0 ; int tmp___1 ; int ret ; struct list_head const *__mptr___1 ; bool tmp___2 ; int tmp___3 ; bool tmp___4 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; bool tmp___5 ; struct list_head const *__mptr___4 ; int tmp___6 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; { __p = to_i915((struct drm_device const *)ring->dev); has_fenced_gpu_access = (unsigned int )((unsigned char )__p->info.gen) <= 3U; i915_gem_retire_requests_ring(ring); __mptr = (struct list_head const *)vmas->next; vm = ((struct i915_vma *)__mptr + 0xffffffffffffff48UL)->vm; INIT_LIST_HEAD(& ordered_vmas); goto ldv_48247; ldv_48246: __mptr___0 = (struct list_head const *)vmas->next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff48UL; obj = vma->obj; entry = vma->exec_entry; if (! has_fenced_gpu_access) { entry->flags = entry->flags & 0xfffffffffffffffeULL; } else { } need_fence = (bool )((int )entry->flags & 1 && (unsigned int )*((unsigned char *)obj + 409UL) != 0U); if ((int )need_fence) { tmp___0 = 1; } else { tmp = need_reloc_mappable(vma); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } need_mappable = (bool )tmp___0; if ((int )need_mappable) { entry->flags = entry->flags | 536870912ULL; list_move(& vma->exec_list, & ordered_vmas); } else { list_move_tail(& vma->exec_list, & ordered_vmas); } obj->base.pending_read_domains = 54U; obj->base.pending_write_domain = 0U; ldv_48247: tmp___1 = list_empty((struct list_head const *)vmas); if (tmp___1 == 0) { goto ldv_48246; } else { } list_splice((struct list_head const *)(& ordered_vmas), vmas); retry = 0; ldv_48274: ret = 0; __mptr___1 = (struct list_head const *)vmas->next; vma = (struct i915_vma *)__mptr___1 + 0xffffffffffffff48UL; goto ldv_48257; ldv_48256: tmp___2 = drm_mm_node_allocated(& vma->node); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { goto ldv_48254; } else { } tmp___4 = eb_vma_misplaced(vma); if ((int )tmp___4) { ret = i915_vma_unbind(vma); } else { ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); } if (ret != 0) { goto err; } else { } ldv_48254: __mptr___2 = (struct list_head const *)vma->exec_list.next; vma = (struct i915_vma *)__mptr___2 + 0xffffffffffffff48UL; ldv_48257: ; if ((unsigned long )(& vma->exec_list) != (unsigned long )vmas) { goto ldv_48256; } else { } __mptr___3 = (struct list_head const *)vmas->next; vma = (struct i915_vma *)__mptr___3 + 0xffffffffffffff48UL; goto ldv_48265; ldv_48264: tmp___5 = drm_mm_node_allocated(& vma->node); if ((int )tmp___5) { goto ldv_48263; } else { } ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); if (ret != 0) { goto err; } else { } ldv_48263: __mptr___4 = (struct list_head const *)vma->exec_list.next; vma = (struct i915_vma *)__mptr___4 + 0xffffffffffffff48UL; ldv_48265: ; if ((unsigned long )(& vma->exec_list) != (unsigned long )vmas) { goto ldv_48264; } else { } err: ; if (ret != -28) { return (ret); } else { tmp___6 = retry; retry = retry + 1; if (tmp___6 != 0) { return (ret); } else { } } __mptr___5 = (struct list_head const *)vmas->next; vma = (struct i915_vma *)__mptr___5 + 0xffffffffffffff48UL; goto ldv_48272; ldv_48271: i915_gem_execbuffer_unreserve_vma(vma); __mptr___6 = (struct list_head const *)vma->exec_list.next; vma = (struct i915_vma *)__mptr___6 + 0xffffffffffffff48UL; ldv_48272: ; if ((unsigned long )(& vma->exec_list) != (unsigned long )vmas) { goto ldv_48271; } else { } ret = i915_gem_evict_vm(vm, 1); if (ret != 0) { return (ret); } else { } goto ldv_48274; } } static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev , struct drm_i915_gem_execbuffer2 *args , struct drm_file *file , struct intel_engine_cs *ring , struct eb_vmas *eb , struct drm_i915_gem_exec_object2 *exec ) { struct drm_i915_gem_relocation_entry *reloc ; struct i915_address_space *vm ; struct i915_vma *vma ; bool need_relocs ; int *reloc_offset ; int i ; int total ; int ret ; unsigned int count ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp ; void *tmp___0 ; void *tmp___1 ; struct drm_i915_gem_relocation_entry *user_relocs ; u64 invalid_offset ; int j ; void *tmp___2 ; unsigned long tmp___3 ; int tmp___4 ; struct list_head const *__mptr___1 ; int offset ; struct list_head const *__mptr___2 ; { count = args->buffer_count; __mptr = (struct list_head const *)eb->vmas.next; vm = ((struct i915_vma *)__mptr + 0xffffffffffffff48UL)->vm; goto ldv_48297; ldv_48296: __mptr___0 = (struct list_head const *)eb->vmas.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff48UL; list_del_init(& vma->exec_list); i915_gem_execbuffer_unreserve_vma(vma); drm_gem_object_unreference___4(& (vma->obj)->base); ldv_48297: tmp = list_empty((struct list_head const *)(& eb->vmas)); if (tmp == 0) { goto ldv_48296; } else { } mutex_unlock(& dev->struct_mutex); total = 0; i = 0; goto ldv_48300; ldv_48299: total = (int )((exec + (unsigned long )i)->relocation_count + (__u32 )total); i = i + 1; ldv_48300: ; if ((unsigned int )i < count) { goto ldv_48299; } else { } tmp___0 = drm_malloc_ab((size_t )count, 4UL); reloc_offset = (int *)tmp___0; tmp___1 = drm_malloc_ab((size_t )total, 32UL); reloc = (struct drm_i915_gem_relocation_entry *)tmp___1; if ((unsigned long )reloc == (unsigned long )((struct drm_i915_gem_relocation_entry *)0) || (unsigned long )reloc_offset == (unsigned long )((int *)0)) { drm_free_large((void *)reloc); drm_free_large((void *)reloc_offset); mutex_lock_nested(& dev->struct_mutex, 0U); return (-12); } else { } total = 0; i = 0; goto ldv_48310; ldv_48309: invalid_offset = 0xffffffffffffffffULL; tmp___2 = to_user_ptr((exec + (unsigned long )i)->relocs_ptr); user_relocs = (struct drm_i915_gem_relocation_entry *)tmp___2; tmp___3 = copy_from_user((void *)reloc + (unsigned long )total, (void const *)user_relocs, (unsigned long )(exec + (unsigned long )i)->relocation_count * 32UL); if (tmp___3 != 0UL) { ret = -14; mutex_lock_nested(& dev->struct_mutex, 0U); goto err; } else { } j = 0; goto ldv_48307; ldv_48306: tmp___4 = __copy_to_user((void *)(& (user_relocs + (unsigned long )j)->presumed_offset), (void const *)(& invalid_offset), 8U); if (tmp___4 != 0) { ret = -14; mutex_lock_nested(& dev->struct_mutex, 0U); goto err; } else { } j = j + 1; ldv_48307: ; if ((__u32 )j < (exec + (unsigned long )i)->relocation_count) { goto ldv_48306; } else { } *(reloc_offset + (unsigned long )i) = total; total = (int )((exec + (unsigned long )i)->relocation_count + (__u32 )total); i = i + 1; ldv_48310: ; if ((unsigned int )i < count) { goto ldv_48309; } else { } ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { mutex_lock_nested(& dev->struct_mutex, 0U); goto err; } else { } eb_reset(eb); ret = eb_lookup_vmas(eb, exec, (struct drm_i915_gem_execbuffer2 const *)args, vm, file); if (ret != 0) { goto err; } else { } need_relocs = (args->flags & 2048ULL) == 0ULL; ret = i915_gem_execbuffer_reserve(ring, & eb->vmas, & need_relocs); if (ret != 0) { goto err; } else { } __mptr___1 = (struct list_head const *)eb->vmas.next; vma = (struct i915_vma *)__mptr___1 + 0xffffffffffffff48UL; goto ldv_48318; ldv_48317: offset = (int )(((long )vma->exec_entry - (long )exec) / 56L); ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb, reloc + (unsigned long )*(reloc_offset + (unsigned long )offset)); if (ret != 0) { goto err; } else { } __mptr___2 = (struct list_head const *)vma->exec_list.next; vma = (struct i915_vma *)__mptr___2 + 0xffffffffffffff48UL; ldv_48318: ; if ((unsigned long )(& vma->exec_list) != (unsigned long )(& eb->vmas)) { goto ldv_48317; } else { } err: drm_free_large((void *)reloc); drm_free_large((void *)reloc_offset); return (ret); } } static int i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring , struct list_head *vmas ) { unsigned int other_rings ; unsigned int tmp ; struct i915_vma *vma ; uint32_t flush_domains ; bool flush_chipset ; int ret ; struct list_head const *__mptr ; struct drm_i915_gem_object *obj ; bool tmp___0 ; struct list_head const *__mptr___0 ; int tmp___1 ; { tmp = intel_ring_flag(ring); other_rings = ~ tmp; flush_domains = 0U; flush_chipset = 0; __mptr = (struct list_head const *)vmas->next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff48UL; goto ldv_48335; ldv_48334: obj = vma->obj; if (((unsigned int )obj->active & other_rings) != 0U) { ret = i915_gem_object_sync(obj, ring); if (ret != 0) { return (ret); } else { } } else { } if ((int )obj->base.write_domain & 1) { tmp___0 = i915_gem_clflush_object(obj, 0); flush_chipset = ((int )flush_chipset | (int )tmp___0) != 0; } else { } flush_domains = obj->base.write_domain | flush_domains; __mptr___0 = (struct list_head const *)vma->exec_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff48UL; ldv_48335: ; if ((unsigned long )(& vma->exec_list) != (unsigned long )vmas) { goto ldv_48334; } else { } if ((int )flush_chipset) { i915_gem_chipset_flush(ring->dev); } else { } if ((flush_domains & 64U) != 0U) { __asm__ volatile ("sfence": : : "memory"); } else { } tmp___1 = intel_ring_invalidate_all_caches(ring); return (tmp___1); } } static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec ) { { if ((exec->flags & 0xffffffffffff8000ULL) != 0ULL) { return (0); } else { } return (((exec->batch_start_offset | exec->batch_len) & 7U) == 0U); } } static int validate_exec_list(struct drm_device *dev , struct drm_i915_gem_exec_object2 *exec , int count ) { unsigned int relocs_total ; unsigned int relocs_max ; unsigned int invalid_flags ; int i ; char *ptr ; void *tmp ; int length ; struct thread_info *tmp___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; int tmp___4 ; long tmp___5 ; { relocs_total = 0U; relocs_max = 134217727U; invalid_flags = 4294967288U; if (i915.enable_ppgtt == 2) { invalid_flags = invalid_flags | 2U; } else { } i = 0; goto ldv_48353; ldv_48352: tmp = to_user_ptr((exec + (unsigned long )i)->relocs_ptr); ptr = (char *)tmp; if (((exec + (unsigned long )i)->flags & (__u64 )invalid_flags) != 0ULL) { return (-22); } else { } if ((exec + (unsigned long )i)->relocation_count > relocs_max - relocs_total) { return (-22); } else { } relocs_total = (exec + (unsigned long )i)->relocation_count + relocs_total; length = (int )((exec + (unsigned long )i)->relocation_count * 32U); tmp___0 = current_thread_info(); tmp___1 = __chk_range_not_ok((unsigned long )ptr, (unsigned long )length, tmp___0->addr_limit.seg); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } tmp___3 = ldv__builtin_expect((long )tmp___2, 1L); if (tmp___3 == 0L) { return (-14); } else { } tmp___5 = ldv__builtin_expect((long )(! i915.prefault_disable), 1L); if (tmp___5 != 0L) { tmp___4 = fault_in_multipages_readable((char const *)ptr, length); if (tmp___4 != 0) { return (-14); } else { } } else { } i = i + 1; ldv_48353: ; if (i < count) { goto ldv_48352; } else { } return (0); } } static struct intel_context *i915_gem_validate_context(struct drm_device *dev , struct drm_file *file , struct intel_engine_cs *ring , u32 const ctx_id ) { struct intel_context *ctx ; struct i915_ctx_hang_stats *hs ; void *tmp ; bool tmp___0 ; long tmp___1 ; void *tmp___2 ; int ret ; int tmp___3 ; long tmp___4 ; void *tmp___5 ; { ctx = (struct intel_context *)0; if ((unsigned int )ring->id != 0U && (unsigned int )ctx_id != 0U) { tmp = ERR_PTR(-22L); return ((struct intel_context *)tmp); } else { } ctx = i915_gem_context_get((struct drm_i915_file_private *)file->driver_priv, ctx_id); tmp___0 = IS_ERR((void const *)ctx); if ((int )tmp___0) { return (ctx); } else { } hs = & ctx->hang_stats; if ((int )hs->banned) { tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i915_gem_validate_context", "Context %u tried to submit while banned\n", ctx_id); } else { } tmp___2 = ERR_PTR(-5L); return ((struct intel_context *)tmp___2); } else { } if (i915.enable_execlists != 0 && (unsigned long )ctx->engine[(unsigned int )ring->id].state == (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___3 = intel_lr_context_deferred_create(ctx, ring); ret = tmp___3; if (ret != 0) { tmp___4 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("i915_gem_validate_context", "Could not create LRC %u: %d\n", ctx_id, ret); } else { } tmp___5 = ERR_PTR((long )ret); return ((struct intel_context *)tmp___5); } else { } } else { } return (ctx); } } void i915_gem_execbuffer_move_to_active(struct list_head *vmas , struct intel_engine_cs *ring ) { struct drm_i915_gem_request *req ; struct drm_i915_gem_request *tmp ; struct i915_vma *vma ; struct list_head const *__mptr ; struct drm_i915_gem_exec_object2 *entry ; struct drm_i915_gem_object *obj ; u32 old_read ; u32 old_write ; struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp___0 ; struct list_head const *__mptr___0 ; { tmp = intel_ring_get_request(ring); req = tmp; __mptr = (struct list_head const *)vmas->next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff48UL; goto ldv_48381; ldv_48380: entry = vma->exec_entry; obj = vma->obj; old_read = obj->base.read_domains; old_write = obj->base.write_domain; obj->base.write_domain = obj->base.pending_write_domain; if (obj->base.write_domain == 0U) { obj->base.pending_read_domains = obj->base.pending_read_domains | obj->base.read_domains; } else { } obj->base.read_domains = obj->base.pending_read_domains; i915_vma_move_to_active(vma, ring); if (obj->base.write_domain != 0U) { obj->dirty = 1U; i915_gem_request_assign(& obj->last_write_req, req); intel_fb_obj_invalidate(obj, ring, 2); obj->base.write_domain = obj->base.write_domain & 4294967233U; } else { } if ((int )entry->flags & 1) { i915_gem_request_assign(& obj->last_fenced_req, req); if ((entry->flags & 1073741824ULL) != 0ULL) { tmp___0 = to_i915((struct drm_device const *)ring->dev); dev_priv = tmp___0; list_move_tail(& dev_priv->fence_regs[(int )obj->fence_reg].lru_list, & dev_priv->mm.fence_list); } else { } } else { } trace_i915_gem_object_change_domain(obj, old_read, old_write); __mptr___0 = (struct list_head const *)vma->exec_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff48UL; ldv_48381: ; if ((unsigned long )(& vma->exec_list) != (unsigned long )vmas) { goto ldv_48380; } else { } return; } } void i915_gem_execbuffer_retire_commands(struct drm_device *dev , struct drm_file *file , struct intel_engine_cs *ring , struct drm_i915_gem_object *obj ) { { ring->gpu_caches_dirty = 1; __i915_add_request(ring, file, obj); return; } } static int i915_reset_gen7_sol_offsets(struct drm_device *dev , struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; int ret ; int i ; long tmp ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 7U || (unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) != (unsigned long )ring) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_reset_gen7_sol_offsets", "sol reset is gen7/rcs only\n"); } else { } return (-22); } else { } ret = intel_ring_begin(ring, 12); if (ret != 0) { return (ret); } else { } i = 0; goto ldv_48404; ldv_48403: intel_ring_emit(ring, 285212673U); intel_ring_emit(ring, (u32 )((i + 5280) * 4)); intel_ring_emit(ring, 0U); i = i + 1; ldv_48404: ; if (i <= 3) { goto ldv_48403; } else { } intel_ring_advance(ring); return (0); } } static int i915_emit_box(struct intel_engine_cs *ring , struct drm_clip_rect *box , int DR1 , int DR4 ) { int ret ; struct drm_i915_private *__p ; { if ((((int )box->y2 <= (int )box->y1 || (int )box->x2 <= (int )box->x1) || (unsigned int )box->y2 == 0U) || (unsigned int )box->x2 == 0U) { drm_err("Bad box %d,%d..%d,%d\n", (int )box->x1, (int )box->y1, (int )box->x2, (int )box->y2); return (-22); } else { } __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 2030043138U); intel_ring_emit(ring, (u32 )((int )box->x1 | ((int )box->y1 << 16))); intel_ring_emit(ring, (u32 )((((int )box->x2 + -1) & 65535) | (((int )box->y2 + -1) << 16))); intel_ring_emit(ring, (u32 )DR4); } else { ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 2105540611U); intel_ring_emit(ring, (u32 )DR1); intel_ring_emit(ring, (u32 )((int )box->x1 | ((int )box->y1 << 16))); intel_ring_emit(ring, (u32 )((((int )box->x2 + -1) & 65535) | (((int )box->y2 + -1) << 16))); intel_ring_emit(ring, (u32 )DR4); intel_ring_emit(ring, 0U); } intel_ring_advance(ring); return (0); } } static struct drm_i915_gem_object *i915_gem_execbuffer_parse(struct intel_engine_cs *ring , struct drm_i915_gem_exec_object2 *shadow_exec_entry , struct eb_vmas *eb , struct drm_i915_gem_object *batch_obj , u32 batch_start_offset , u32 batch_len , bool is_master ) { struct drm_i915_gem_object *shadow_batch_obj ; struct i915_vma *vma ; int ret ; bool tmp ; void *tmp___0 ; { shadow_batch_obj = i915_gem_batch_pool_get(& ring->batch_pool, (size_t )(batch_len + 4095U) & 4294963200UL); tmp = IS_ERR((void const *)shadow_batch_obj); if ((int )tmp) { return (shadow_batch_obj); } else { } ret = i915_parse_cmds(ring, batch_obj, shadow_batch_obj, batch_start_offset, batch_len, (int )is_master); if (ret != 0) { goto err; } else { } ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0U, 0U); if (ret != 0) { goto err; } else { } i915_gem_object_unpin_pages(shadow_batch_obj); memset((void *)shadow_exec_entry, 0, 56UL); vma = i915_gem_obj_to_ggtt(shadow_batch_obj); vma->exec_entry = shadow_exec_entry; (vma->exec_entry)->flags = 0xffffffff80000000ULL; drm_gem_object_reference___1(& shadow_batch_obj->base); list_add_tail(& vma->exec_list, & eb->vmas); shadow_batch_obj->base.pending_read_domains = 8U; return (shadow_batch_obj); err: i915_gem_object_unpin_pages(shadow_batch_obj); if (ret == -13) { return (batch_obj); } else { tmp___0 = ERR_PTR((long )ret); return ((struct drm_i915_gem_object *)tmp___0); } } } int i915_gem_ringbuffer_submission(struct drm_device *dev , struct drm_file *file , struct intel_engine_cs *ring , struct intel_context *ctx , struct drm_i915_gem_execbuffer2 *args , struct list_head *vmas , struct drm_i915_gem_object *batch_obj , u64 exec_start , u32 dispatch_flags ) { struct drm_clip_rect *cliprects ; struct drm_i915_private *dev_priv ; u64 exec_len ; int instp_mode ; u32 instp_mask ; int i ; int ret ; long tmp ; long tmp___0 ; struct drm_i915_private *__p ; long tmp___1 ; void *tmp___2 ; void *tmp___3 ; unsigned long tmp___4 ; long tmp___5 ; long tmp___6 ; int __ret_warn_on ; long tmp___7 ; long tmp___8 ; long tmp___9 ; struct drm_i915_private *__p___0 ; long tmp___10 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; long tmp___11 ; struct drm_i915_gem_request *tmp___12 ; { cliprects = (struct drm_clip_rect *)0; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; if (args->num_cliprects != 0U) { if ((unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) != (unsigned long )ring) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_gem_ringbuffer_submission", "clip rectangles are only valid with the render ring\n"); } else { } return (-22); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { tmp___0 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_gem_ringbuffer_submission", "clip rectangles are only valid on pre-gen5\n"); } else { } return (-22); } else { } if (args->num_cliprects > 536870911U) { tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i915_gem_ringbuffer_submission", "execbuf with %u cliprects\n", args->num_cliprects); } else { } return (-22); } else { } tmp___2 = kcalloc((size_t )args->num_cliprects, 8UL, 208U); cliprects = (struct drm_clip_rect *)tmp___2; if ((unsigned long )cliprects == (unsigned long )((struct drm_clip_rect *)0)) { ret = -12; goto error; } else { } tmp___3 = to_user_ptr(args->cliprects_ptr); tmp___4 = copy_from_user((void *)cliprects, (void const *)tmp___3, (unsigned long )args->num_cliprects * 8UL); if (tmp___4 != 0UL) { ret = -14; goto error; } else { } } else { if (args->DR4 == 4294967295U) { tmp___5 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("i915_gem_ringbuffer_submission", "UXA submitting garbage DR4, fixing up\n"); } else { } args->DR4 = 0U; } else { } if ((args->DR1 != 0U || args->DR4 != 0U) || args->cliprects_ptr != 0ULL) { tmp___6 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("i915_gem_ringbuffer_submission", "0 cliprects but dirt in cliprects fields\n"); } else { } return (-22); } else { } } ret = i915_gem_execbuffer_move_to_gpu(ring, vmas); if (ret != 0) { goto error; } else { } ret = i915_switch_context(ring, ctx); if (ret != 0) { goto error; } else { } __ret_warn_on = (unsigned long )ctx->ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0) && ((ctx->ppgtt)->pd_dirty_rings & (unsigned long )(1 << (int )ring->id)) != 0UL; tmp___7 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___7 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_execbuffer.c", 1256, "%s didn\'t clear reload\n", ring->name); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); instp_mode = (int )args->flags & 192; instp_mask = 192U; switch (instp_mode) { case 0: ; case 64: ; case 128: ; if (instp_mode != 0 && (unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) != (unsigned long )ring) { tmp___8 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("i915_gem_ringbuffer_submission", "non-0 rel constants mode on non-RCS\n"); } else { } ret = -22; goto error; } else { } if (dev_priv->relative_constants_mode != instp_mode) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 3U) { tmp___9 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("i915_gem_ringbuffer_submission", "no rel constants on pre-gen4\n"); } else { } ret = -22; goto error; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 5U && instp_mode == 128) { tmp___10 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("i915_gem_ringbuffer_submission", "rel surface constants mode invalid on gen5+\n"); } else { } ret = -22; goto error; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 5U) { instp_mask = instp_mask & 4294967167U; } else { } } else { } goto ldv_48481; default: tmp___11 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("i915_gem_ringbuffer_submission", "execbuf with unknown constants: %d\n", instp_mode); } else { } ret = -22; goto error; } ldv_48481: ; if ((unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) == (unsigned long )ring && dev_priv->relative_constants_mode != instp_mode) { ret = intel_ring_begin(ring, 4); if (ret != 0) { goto error; } else { } intel_ring_emit(ring, 0U); intel_ring_emit(ring, 285212673U); intel_ring_emit(ring, 8384U); intel_ring_emit(ring, (instp_mask << 16) | (u32 )instp_mode); intel_ring_advance(ring); dev_priv->relative_constants_mode = instp_mode; } else { } if ((args->flags & 256ULL) != 0ULL) { ret = i915_reset_gen7_sol_offsets(dev, ring); if (ret != 0) { goto error; } else { } } else { } exec_len = (u64 )args->batch_len; if ((unsigned long )cliprects != (unsigned long )((struct drm_clip_rect *)0)) { i = 0; goto ldv_48484; ldv_48483: ret = i915_emit_box(ring, cliprects + (unsigned long )i, (int )args->DR1, (int )args->DR4); if (ret != 0) { goto error; } else { } ret = (*(ring->dispatch_execbuffer))(ring, exec_start, (u32 )exec_len, dispatch_flags); if (ret != 0) { goto error; } else { } i = i + 1; ldv_48484: ; if ((__u32 )i < args->num_cliprects) { goto ldv_48483; } else { } } else { ret = (*(ring->dispatch_execbuffer))(ring, exec_start, (u32 )exec_len, dispatch_flags); if (ret != 0) { return (ret); } else { } } tmp___12 = intel_ring_get_request(ring); trace_i915_gem_ring_dispatch(tmp___12, dispatch_flags); i915_gem_execbuffer_move_to_active(vmas, ring); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); error: kfree((void const *)cliprects); return (ret); } } static int gen8_dispatch_bsd_ring(struct drm_device *dev , struct drm_file *file ) { struct drm_i915_private *dev_priv ; struct drm_i915_file_private *file_priv ; int ring_id ; { dev_priv = (struct drm_i915_private *)dev->dev_private; file_priv = (struct drm_i915_file_private *)file->driver_priv; if ((unsigned long )file_priv->bsd_ring != (unsigned long )((struct intel_engine_cs *)0)) { return ((int )(file_priv->bsd_ring)->id); } else { mutex_lock_nested(& dev->struct_mutex, 0U); if (dev_priv->mm.bsd_ring_dispatch_index == 0) { ring_id = 1; dev_priv->mm.bsd_ring_dispatch_index = 1; } else { ring_id = 4; dev_priv->mm.bsd_ring_dispatch_index = 0; } file_priv->bsd_ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )ring_id; mutex_unlock(& dev->struct_mutex); return (ring_id); } } } static struct drm_i915_gem_object *eb_get_batch(struct eb_vmas *eb ) { struct i915_vma *vma ; struct list_head const *__mptr ; { __mptr = (struct list_head const *)eb->vmas.prev; vma = (struct i915_vma *)__mptr + 0xffffffffffffff48UL; (vma->exec_entry)->flags = (vma->exec_entry)->flags | 268435456ULL; return (vma->obj); } } static int i915_gem_do_execbuffer(struct drm_device *dev , void *data , struct drm_file *file , struct drm_i915_gem_execbuffer2 *args , struct drm_i915_gem_exec_object2 *exec ) { struct drm_i915_private *dev_priv ; struct eb_vmas *eb ; struct drm_i915_gem_object *batch_obj ; struct drm_i915_gem_exec_object2 shadow_exec_entry ; struct intel_engine_cs *ring ; struct intel_context *ctx ; struct i915_address_space *vm ; u32 ctx_id ; u64 exec_start ; u32 dispatch_flags ; int ret ; bool need_relocs ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; int ring_id ; long tmp___5 ; struct drm_i915_private *__p ; long tmp___6 ; bool tmp___7 ; int tmp___8 ; long tmp___9 ; long tmp___10 ; bool tmp___11 ; int tmp___12 ; long tmp___13 ; long tmp___14 ; struct drm_i915_gem_object *parsed_batch_obj ; long tmp___15 ; bool tmp___16 ; bool tmp___17 ; unsigned long tmp___18 ; unsigned long tmp___19 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ctx_id = (u32 const )args->rsvd1; exec_start = (u64 )args->batch_start_offset; tmp = i915_gem_check_execbuffer(args); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-22); } else { } ret = validate_exec_list(dev, exec, (int )args->buffer_count); if (ret != 0) { return (ret); } else { } dispatch_flags = 0U; if ((args->flags & 512ULL) != 0ULL) { if ((unsigned int )*((unsigned char *)file + 0UL) == 0U) { return (-1); } else { tmp___1 = capable(21); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (-1); } else { } } dispatch_flags = dispatch_flags | 1U; } else { } if ((args->flags & 1024ULL) != 0ULL) { dispatch_flags = dispatch_flags | 2U; } else { } if ((args->flags & 7ULL) > 4ULL) { tmp___3 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("i915_gem_do_execbuffer", "execbuf with unknown ring: %d\n", (int )args->flags & 7); } else { } return (-22); } else { } if ((args->flags & 7ULL) != 2ULL && (args->flags & 24576ULL) != 0ULL) { tmp___4 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("i915_gem_do_execbuffer", "execbuf with non bsd ring but with invalid bsd dispatch flags: %d\n", (int )args->flags); } else { } return (-22); } else { } if ((args->flags & 7ULL) == 0ULL) { ring = (struct intel_engine_cs *)(& dev_priv->ring); } else if ((args->flags & 7ULL) == 2ULL) { __p = to_i915((struct drm_device const *)dev); if (((int )__p->info.ring_mask & 16) != 0) { switch (args->flags & 24576ULL) { case 0ULL: ring_id = gen8_dispatch_bsd_ring(dev, file); ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )ring_id; goto ldv_48527; case 8192ULL: ring = (struct intel_engine_cs *)(& dev_priv->ring) + 1UL; goto ldv_48527; case 16384ULL: ring = (struct intel_engine_cs *)(& dev_priv->ring) + 4UL; goto ldv_48527; default: tmp___5 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("i915_gem_do_execbuffer", "execbuf with unknown bsd ring: %d\n", (int )args->flags & 24576); } else { } return (-22); } ldv_48527: ; } else { ring = (struct intel_engine_cs *)(& dev_priv->ring) + 1UL; } } else { ring = (struct intel_engine_cs *)(& dev_priv->ring) + (((unsigned long )args->flags & 7UL) + 0xffffffffffffffffUL); } tmp___7 = intel_ring_initialized(ring); if (tmp___7) { tmp___8 = 0; } else { tmp___8 = 1; } if (tmp___8) { tmp___6 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("i915_gem_do_execbuffer", "execbuf with invalid ring: %d\n", (int )args->flags & 7); } else { } return (-22); } else { } if (args->buffer_count == 0U) { tmp___9 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("i915_gem_do_execbuffer", "execbuf with %d buffers\n", args->buffer_count); } else { } return (-22); } else { } intel_runtime_pm_get(dev_priv); ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { goto pre_mutex_err; } else { } ctx = i915_gem_validate_context(dev, file, ring, ctx_id); tmp___11 = IS_ERR((void const *)ctx); if ((int )tmp___11) { mutex_unlock(& dev->struct_mutex); tmp___10 = PTR_ERR((void const *)ctx); ret = (int )tmp___10; goto pre_mutex_err; } else { } i915_gem_context_reference___0(ctx); if ((unsigned long )ctx->ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0)) { vm = & (ctx->ppgtt)->base; } else { vm = & dev_priv->gtt.base; } eb = eb_create(args); if ((unsigned long )eb == (unsigned long )((struct eb_vmas *)0)) { i915_gem_context_unreference___0(ctx); mutex_unlock(& dev->struct_mutex); ret = -12; goto pre_mutex_err; } else { } ret = eb_lookup_vmas(eb, exec, (struct drm_i915_gem_execbuffer2 const *)args, vm, file); if (ret != 0) { goto err; } else { } batch_obj = eb_get_batch(eb); need_relocs = (args->flags & 2048ULL) == 0ULL; ret = i915_gem_execbuffer_reserve(ring, & eb->vmas, & need_relocs); if (ret != 0) { goto err; } else { } if ((int )need_relocs) { ret = i915_gem_execbuffer_relocate(eb); } else { } if (ret != 0) { if (ret == -14) { ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, eb, exec); tmp___12 = mutex_is_locked(& dev->struct_mutex); tmp___13 = ldv__builtin_expect(tmp___12 == 0, 0L); if (tmp___13 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_execbuffer.c"), "i" (1534), "i" (12UL)); ldv_48533: ; goto ldv_48533; } else { } } else { } if (ret != 0) { goto err; } else { } } else { } if (batch_obj->base.pending_write_domain != 0U) { tmp___14 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("i915_gem_do_execbuffer", "Attempting to use self-modifying batch buffer\n"); } else { } ret = -22; goto err; } else { } tmp___17 = i915_needs_cmd_parser(ring); if ((int )tmp___17 && args->batch_len != 0U) { parsed_batch_obj = i915_gem_execbuffer_parse(ring, & shadow_exec_entry, eb, batch_obj, args->batch_start_offset, args->batch_len, (unsigned int )*((unsigned char *)file + 0UL) != 0U); tmp___16 = IS_ERR((void const *)parsed_batch_obj); if ((int )tmp___16) { tmp___15 = PTR_ERR((void const *)parsed_batch_obj); ret = (int )tmp___15; goto err; } else { } if ((unsigned long )parsed_batch_obj != (unsigned long )batch_obj) { dispatch_flags = dispatch_flags | 1U; exec_start = 0ULL; batch_obj = parsed_batch_obj; } else { } } else { } batch_obj->base.pending_read_domains = batch_obj->base.pending_read_domains | 8U; if ((int )dispatch_flags & 1) { ret = i915_gem_obj_ggtt_pin(batch_obj, 0U, 0U); if (ret != 0) { goto err; } else { } tmp___18 = i915_gem_obj_ggtt_offset(batch_obj); exec_start = (unsigned long long )tmp___18 + exec_start; } else { tmp___19 = i915_gem_obj_offset(batch_obj, vm); exec_start = (unsigned long long )tmp___19 + exec_start; } ret = (*(dev_priv->gt.execbuf_submit))(dev, file, ring, ctx, args, & eb->vmas, batch_obj, exec_start, dispatch_flags); if ((int )dispatch_flags & 1) { i915_gem_object_ggtt_unpin(batch_obj); } else { } err: i915_gem_context_unreference___0(ctx); eb_destroy(eb); mutex_unlock(& dev->struct_mutex); pre_mutex_err: intel_runtime_pm_put(dev_priv); return (ret); } } int i915_gem_execbuffer(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_execbuffer *args ; struct drm_i915_gem_execbuffer2 exec2 ; struct drm_i915_gem_exec_object *exec_list ; struct drm_i915_gem_exec_object2 *exec2_list ; int ret ; int i ; long tmp ; void *tmp___0 ; void *tmp___1 ; long tmp___2 ; void *tmp___3 ; unsigned long tmp___4 ; long tmp___5 ; struct drm_i915_private *__p ; struct drm_i915_gem_exec_object *user_exec_list ; void *tmp___6 ; long tmp___7 ; { args = (struct drm_i915_gem_execbuffer *)data; exec_list = (struct drm_i915_gem_exec_object *)0; exec2_list = (struct drm_i915_gem_exec_object2 *)0; if (args->buffer_count == 0U) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_gem_execbuffer", "execbuf with %d buffers\n", args->buffer_count); } else { } return (-22); } else { } tmp___0 = drm_malloc_ab(32UL, (size_t )args->buffer_count); exec_list = (struct drm_i915_gem_exec_object *)tmp___0; tmp___1 = drm_malloc_ab(56UL, (size_t )args->buffer_count); exec2_list = (struct drm_i915_gem_exec_object2 *)tmp___1; if ((unsigned long )exec_list == (unsigned long )((struct drm_i915_gem_exec_object *)0) || (unsigned long )exec2_list == (unsigned long )((struct drm_i915_gem_exec_object2 *)0)) { tmp___2 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer", "Failed to allocate exec list for %d buffers\n", args->buffer_count); } else { } drm_free_large((void *)exec_list); drm_free_large((void *)exec2_list); return (-12); } else { } tmp___3 = to_user_ptr(args->buffers_ptr); tmp___4 = copy_from_user((void *)exec_list, (void const *)tmp___3, (unsigned long )args->buffer_count * 32UL); ret = (int )tmp___4; if (ret != 0) { tmp___5 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer", "copy %d exec entries failed %d\n", args->buffer_count, ret); } else { } drm_free_large((void *)exec_list); drm_free_large((void *)exec2_list); return (-14); } else { } i = 0; goto ldv_48554; ldv_48553: (exec2_list + (unsigned long )i)->handle = (exec_list + (unsigned long )i)->handle; (exec2_list + (unsigned long )i)->relocation_count = (exec_list + (unsigned long )i)->relocation_count; (exec2_list + (unsigned long )i)->relocs_ptr = (exec_list + (unsigned long )i)->relocs_ptr; (exec2_list + (unsigned long )i)->alignment = (exec_list + (unsigned long )i)->alignment; (exec2_list + (unsigned long )i)->offset = (exec_list + (unsigned long )i)->offset; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 3U) { (exec2_list + (unsigned long )i)->flags = 1ULL; } else { (exec2_list + (unsigned long )i)->flags = 0ULL; } i = i + 1; ldv_48554: ; if ((__u32 )i < args->buffer_count) { goto ldv_48553; } else { } exec2.buffers_ptr = args->buffers_ptr; exec2.buffer_count = args->buffer_count; exec2.batch_start_offset = args->batch_start_offset; exec2.batch_len = args->batch_len; exec2.DR1 = args->DR1; exec2.DR4 = args->DR4; exec2.num_cliprects = args->num_cliprects; exec2.cliprects_ptr = args->cliprects_ptr; exec2.flags = 1ULL; exec2.rsvd1 = 0ULL; ret = i915_gem_do_execbuffer(dev, data, file, & exec2, exec2_list); if (ret == 0) { tmp___6 = to_user_ptr(args->buffers_ptr); user_exec_list = (struct drm_i915_gem_exec_object *)tmp___6; i = 0; goto ldv_48559; ldv_48558: ret = __copy_to_user((void *)(& (user_exec_list + (unsigned long )i)->offset), (void const *)(& (exec2_list + (unsigned long )i)->offset), 8U); if (ret != 0) { ret = -14; tmp___7 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer", "failed to copy %d exec entries back to user (%d)\n", args->buffer_count, ret); } else { } goto ldv_48557; } else { } i = i + 1; ldv_48559: ; if ((__u32 )i < args->buffer_count) { goto ldv_48558; } else { } ldv_48557: ; } else { } drm_free_large((void *)exec_list); drm_free_large((void *)exec2_list); return (ret); } } int i915_gem_execbuffer2(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_execbuffer2 *args ; struct drm_i915_gem_exec_object2 *exec2_list ; int ret ; long tmp ; long tmp___0 ; void *tmp___1 ; void *tmp___2 ; long tmp___3 ; void *tmp___4 ; unsigned long tmp___5 ; long tmp___6 ; struct drm_i915_gem_exec_object2 *user_exec_list ; void *tmp___7 ; int i ; long tmp___8 ; { args = (struct drm_i915_gem_execbuffer2 *)data; exec2_list = (struct drm_i915_gem_exec_object2 *)0; if (args->buffer_count == 0U || args->buffer_count > 76695844U) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_gem_execbuffer2", "execbuf2 with %d buffers\n", args->buffer_count); } else { } return (-22); } else { } if (args->rsvd2 != 0ULL) { tmp___0 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer2", "dirty rvsd2 field\n"); } else { } return (-22); } else { } tmp___1 = kmalloc((unsigned long )args->buffer_count * 56UL, 529104U); exec2_list = (struct drm_i915_gem_exec_object2 *)tmp___1; if ((unsigned long )exec2_list == (unsigned long )((struct drm_i915_gem_exec_object2 *)0)) { tmp___2 = drm_malloc_ab(56UL, (size_t )args->buffer_count); exec2_list = (struct drm_i915_gem_exec_object2 *)tmp___2; } else { } if ((unsigned long )exec2_list == (unsigned long )((struct drm_i915_gem_exec_object2 *)0)) { tmp___3 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer2", "Failed to allocate exec list for %d buffers\n", args->buffer_count); } else { } return (-12); } else { } tmp___4 = to_user_ptr(args->buffers_ptr); tmp___5 = copy_from_user((void *)exec2_list, (void const *)tmp___4, (unsigned long )args->buffer_count * 56UL); ret = (int )tmp___5; if (ret != 0) { tmp___6 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer2", "copy %d exec entries failed %d\n", args->buffer_count, ret); } else { } drm_free_large((void *)exec2_list); return (-14); } else { } ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (ret == 0) { tmp___7 = to_user_ptr(args->buffers_ptr); user_exec_list = (struct drm_i915_gem_exec_object2 *)tmp___7; i = 0; goto ldv_48573; ldv_48572: ret = __copy_to_user((void *)(& (user_exec_list + (unsigned long )i)->offset), (void const *)(& (exec2_list + (unsigned long )i)->offset), 8U); if (ret != 0) { ret = -14; tmp___8 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("i915_gem_execbuffer2", "failed to copy %d exec entries back to user\n", args->buffer_count); } else { } goto ldv_48571; } else { } i = i + 1; ldv_48573: ; if ((__u32 )i < args->buffer_count) { goto ldv_48572; } else { } ldv_48571: ; } else { } drm_free_large((void *)exec2_list); return (ret); } } bool ldv_queue_work_on_263(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_264(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_265(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_266(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_267(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void set_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static int test_and_clear_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } extern unsigned long find_next_bit(unsigned long const * , unsigned long , unsigned long ) ; extern unsigned long find_first_bit(unsigned long const * , unsigned long ) ; extern void __bitmap_or(unsigned long * , unsigned long const * , unsigned long const * , unsigned int ) ; extern void bitmap_set(unsigned long * , unsigned int , int ) ; __inline static void bitmap_zero(unsigned long *dst , unsigned int nbits ) { unsigned int len ; { len = (unsigned int )(((unsigned long )nbits + 63UL) / 64UL) * 8U; memset((void *)dst, 0, (size_t )len); return; } } __inline static void bitmap_or(unsigned long *dst , unsigned long const *src1 , unsigned long const *src2 , unsigned int nbits ) { { __bitmap_or(dst, src1, src2, nbits); return; } } __inline static int bitmap_empty(unsigned long const *src , unsigned int nbits ) { unsigned long tmp ; { tmp = find_first_bit(src, (unsigned long )nbits); return (tmp == (unsigned long )nbits); } } __inline static unsigned long arch_local_save_flags___6(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static bool static_key_false___4(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int rcu_read_lock_sched_held___4(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___6(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } bool ldv_queue_work_on_277(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_279(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_278(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_281(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_280(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void writel(unsigned int val , void volatile *addr ) { { __asm__ volatile ("movl %0,%1": : "r" (val), "m" (*((unsigned int volatile *)addr)): "memory"); return; } } __inline static unsigned long readq(void const volatile *addr ) { unsigned long ret ; { __asm__ volatile ("movq %1,%0": "=r" (ret): "m" (*((unsigned long volatile *)addr)): "memory"); return (ret); } } __inline static void writeq(unsigned long val , void volatile *addr ) { { __asm__ volatile ("movq %0,%1": : "r" (val), "m" (*((unsigned long volatile *)addr)): "memory"); return; } } extern void *ioremap_nocache(resource_size_t , unsigned long ) ; extern void iounmap(void volatile * ) ; extern void *ioremap_wc(resource_size_t , unsigned long ) ; extern struct page *alloc_pages_current(gfp_t , unsigned int ) ; __inline static struct page *alloc_pages(gfp_t gfp_mask , unsigned int order ) { struct page *tmp ; { tmp = alloc_pages_current(gfp_mask, order); return (tmp); } } extern void __free_pages(struct page * , unsigned int ) ; __inline static void kref_get___4(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } extern void *kmem_cache_alloc(struct kmem_cache * , gfp_t ) ; __inline static void *kmem_cache_zalloc(struct kmem_cache *k , gfp_t flags ) { void *tmp ; { tmp = kmem_cache_alloc(k, flags | 32768U); return (tmp); } } __inline static dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter ) { { return ((piter->sg)->dma_address + (dma_addr_t )(piter->sg_pgoffset << 12)); } } extern void debug_dma_map_page(struct device * , struct page * , size_t , size_t , int , dma_addr_t , bool ) ; extern void debug_dma_mapping_error(struct device * , dma_addr_t ) ; extern void debug_dma_unmap_page(struct device * , dma_addr_t , size_t , int , bool ) ; __inline static dma_addr_t dma_map_page(struct device *dev , struct page *page , size_t offset , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; dma_addr_t addr ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = lowmem_page_address((struct page const *)page); kmemcheck_mark_initialized(tmp___0 + offset, (unsigned int )size); tmp___1 = valid_dma_direction((int )dir); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (84), "i" (12UL)); ldv_25942: ; goto ldv_25942; } else { } addr = (*(ops->map_page))(dev, page, offset, size, dir, (struct dma_attrs *)0); debug_dma_map_page(dev, page, offset, size, (int )dir, addr, 0); return (addr); } } __inline static void dma_unmap_page(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (96), "i" (12UL)); ldv_25950: ; goto ldv_25950; } else { } if ((unsigned long )ops->unmap_page != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_page))(dev, addr, size, dir, (struct dma_attrs *)0); } else { } debug_dma_unmap_page(dev, addr, size, (int )dir, 0); return; } } __inline static int dma_mapping_error(struct device *dev , dma_addr_t dma_addr ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; { tmp = get_dma_ops(dev); ops = tmp; debug_dma_mapping_error(dev, dma_addr); if ((unsigned long )ops->mapping_error != (unsigned long )((int (*)(struct device * , dma_addr_t ))0)) { tmp___0 = (*(ops->mapping_error))(dev, dma_addr); return (tmp___0); } else { } return (dma_addr == 0ULL); } } extern int dma_supported(struct device * , u64 ) ; extern int dma_set_mask(struct device * , u64 ) ; __inline static int dma_set_coherent_mask(struct device *dev , u64 mask ) { int tmp ; { tmp = dma_supported(dev, mask); if (tmp == 0) { return (-5); } else { } dev->coherent_dma_mask = mask; return (0); } } extern int set_pages_uc(struct page * , int ) ; extern int set_pages_wb(struct page * , int ) ; __inline static dma_addr_t pci_map_page(struct pci_dev *hwdev , struct page *page , unsigned long offset , size_t size , int direction ) { dma_addr_t tmp ; { tmp = dma_map_page((unsigned long )hwdev != (unsigned long )((struct pci_dev *)0) ? & hwdev->dev : (struct device *)0, page, offset, size, (enum dma_data_direction )direction); return (tmp); } } __inline static void pci_unmap_page(struct pci_dev *hwdev , dma_addr_t dma_address , size_t size , int direction ) { { dma_unmap_page((unsigned long )hwdev != (unsigned long )((struct pci_dev *)0) ? & hwdev->dev : (struct device *)0, dma_address, size, (enum dma_data_direction )direction); return; } } __inline static int pci_dma_mapping_error(struct pci_dev *pdev , dma_addr_t dma_addr ) { int tmp ; { tmp = dma_mapping_error(& pdev->dev, dma_addr); return (tmp); } } __inline static int pci_set_dma_mask(struct pci_dev *dev , u64 mask ) { int tmp ; { tmp = dma_set_mask(& dev->dev, mask); return (tmp); } } __inline static int pci_set_consistent_dma_mask(struct pci_dev *dev , u64 mask ) { int tmp ; { tmp = dma_set_coherent_mask(& dev->dev, mask); return (tmp); } } __inline static bool drm_mm_initialized(struct drm_mm *mm ) { { return ((unsigned long )mm->hole_stack.next != (unsigned long )((struct list_head *)0)); } } __inline static u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node ) { { return (hole_node->start + hole_node->size); } } __inline static u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node ) { long tmp ; u64 tmp___0 ; { tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)hole_node + 32UL) == 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/drm/drm_mm.h"), "i" (145), "i" (12UL)); ldv_38482: ; goto ldv_38482; } else { } tmp___0 = __drm_mm_hole_node_start(hole_node); return (tmp___0); } } __inline static u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node ) { struct list_head const *__mptr ; { __mptr = (struct list_head const *)hole_node->node_list.next; return (((struct drm_mm_node *)__mptr)->start); } } __inline static u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node ) { u64 tmp ; { tmp = __drm_mm_hole_node_end(hole_node); return (tmp); } } extern int drm_mm_reserve_node(struct drm_mm * , struct drm_mm_node * ) ; extern int drm_mm_insert_node_in_range_generic(struct drm_mm * , struct drm_mm_node * , u64 , unsigned int , unsigned long , u64 , u64 , enum drm_mm_search_flags , enum drm_mm_allocator_flags ) ; extern void drm_mm_remove_node(struct drm_mm_node * ) ; extern void drm_mm_init(struct drm_mm * , u64 , u64 ) ; extern void drm_mm_takedown(struct drm_mm * ) ; struct i915_ggtt_view const i915_ggtt_view_normal ; struct i915_ggtt_view const i915_ggtt_view_rotated ; __inline static uint32_t i915_pte_index(uint64_t address , uint32_t pde_shift ) { uint32_t mask ; { mask = (uint32_t const )((1 << (int )(pde_shift - 12U)) + -1); return ((uint32_t )(address >> 12) & mask); } } __inline static uint32_t i915_pte_count(uint64_t addr , size_t length , uint32_t pde_shift ) { uint64_t mask ; uint64_t end ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; long tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; { mask = (uint64_t const )(- (1 << (int )pde_shift)); __ret_warn_on = length == 0UL; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_gem_gtt.h", 383); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = ((unsigned long )(addr | (unsigned long long )length) & 4095UL) != 0UL; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_gem_gtt.h", 384); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); end = addr + (unsigned long long )length; if (((addr ^ end) & mask) != 0ULL) { tmp___1 = i915_pte_index(addr, pde_shift); return ((uint32_t )(1 << (int )(pde_shift - 12U)) - tmp___1); } else { } tmp___2 = i915_pte_index(end, pde_shift); tmp___3 = i915_pte_index(addr, pde_shift); return (tmp___2 - tmp___3); } } __inline static uint32_t i915_pde_index(uint64_t addr , uint32_t shift ) { { return ((uint32_t )(addr >> (int )shift) & 511U); } } __inline static uint32_t gen6_pte_index(uint32_t addr ) { uint32_t tmp ; { tmp = i915_pte_index((uint64_t )addr, 22U); return (tmp); } } __inline static size_t gen6_pte_count(uint32_t addr , uint32_t length ) { uint32_t tmp ; { tmp = i915_pte_count((uint64_t )addr, (size_t )length, 22U); return ((size_t )tmp); } } __inline static uint32_t gen6_pde_index(uint32_t addr ) { uint32_t tmp ; { tmp = i915_pde_index((uint64_t )addr, 22U); return (tmp); } } __inline static uint64_t gen8_clamp_pd(uint64_t start , uint64_t length ) { uint64_t next_pd ; { next_pd = (start + 1073741824ULL) & 0xffffffffc0000000ULL; if (start + length < next_pd) { return (length); } else { } return (next_pd - start); } } __inline static uint32_t gen8_pte_index(uint64_t address ) { uint32_t tmp ; { tmp = i915_pte_index(address, 21U); return (tmp); } } __inline static uint32_t gen8_pde_index(uint64_t address ) { uint32_t tmp ; { tmp = i915_pde_index(address, 21U); return (tmp); } } __inline static uint32_t gen8_pdpe_index(uint64_t address ) { { return ((uint32_t )(address >> 30) & 3U); } } __inline static size_t gen8_pte_count(uint64_t address , uint64_t length ) { uint32_t tmp ; { tmp = i915_pte_count(address, (size_t )length, 21U); return ((size_t )tmp); } } int i915_gem_gtt_init(struct drm_device *dev ) ; void i915_gem_init_global_gtt(struct drm_device *dev ) ; void i915_global_gtt_cleanup(struct drm_device *dev ) ; int i915_ppgtt_init(struct drm_device *dev , struct i915_hw_ppgtt *ppgtt ) ; int i915_ppgtt_init_hw(struct drm_device *dev ) ; __inline static void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt ) { { if ((unsigned long )ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0)) { kref_get___4(& ppgtt->ref); } else { } return; } } void i915_check_and_clear_faults(struct drm_device *dev ) ; int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj ) ; void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj ) ; size_t i915_ggtt_view_size(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) ; extern void intel_gtt_get(size_t * , size_t * , phys_addr_t * , unsigned long * ) ; extern int intel_gmch_probe(struct pci_dev * , struct pci_dev * , struct agp_bridge_data * ) ; extern void intel_gmch_remove(void) ; extern void intel_gtt_insert_sg_entries(struct sg_table * , unsigned int , unsigned int ) ; extern void intel_gtt_clear_range(unsigned int , unsigned int ) ; unsigned int intel_tile_height(struct drm_device *dev , uint32_t pixel_format , uint64_t fb_format_modifier ) ; struct tracepoint __tracepoint_i915_va_alloc ; __inline static void trace_i915_va_alloc(struct i915_address_space *vm , u64 start , u64 length , char const *name ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_320 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_322 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___4(& __tracepoint_i915_va_alloc.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_va_alloc.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___4(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 187, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45355: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct i915_address_space * , u64 , u64 , char const * ))it_func))(__data, vm, start, length, name); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45355; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_va_alloc.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___4(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 187, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_page_table_entry_alloc ; __inline static void trace_i915_page_table_entry_alloc(struct i915_address_space *vm , u32 pde , u64 start , u64 pde_shift ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_324 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_326 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___4(& __tracepoint_i915_page_table_entry_alloc.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_page_table_entry_alloc.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___4(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 214, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45421: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct i915_address_space * , u32 , u64 , u64 ))it_func))(__data, vm, pde, start, pde_shift); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45421; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_page_table_entry_alloc.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___4(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 214, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_page_table_entry_map ; __inline static void trace_i915_page_table_entry_map(struct i915_address_space *vm , u32 pde , struct i915_page_table *pt , u32 first , u32 count , u32 bits ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_328 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_330 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___4(& __tracepoint_i915_page_table_entry_map.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_page_table_entry_map.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___4(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 255, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45491: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct i915_address_space * , u32 , struct i915_page_table * , u32 , u32 , u32 ))it_func))(__data, vm, pde, pt, first, count, bits); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45491; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_page_table_entry_map.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___4(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 255, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_ppgtt_create ; __inline static void trace_i915_ppgtt_create(struct i915_address_space *vm ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_420 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_422 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___4(& __tracepoint_i915_ppgtt_create.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_ppgtt_create.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___4(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 703, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46794: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct i915_address_space * ))it_func))(__data, vm); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46794; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_ppgtt_create.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___4(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 703, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_ppgtt_release ; __inline static void trace_i915_ppgtt_release(struct i915_address_space *vm ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_424 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_426 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___4(& __tracepoint_i915_ppgtt_release.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_ppgtt_release.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___4(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 708, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46845: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct i915_address_space * ))it_func))(__data, vm); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46845; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_ppgtt_release.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___4(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 708, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } void i915_init_vm(struct drm_i915_private *dev_priv , struct i915_address_space *vm ) ; struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj , struct i915_address_space *vm ) ; struct i915_vma *i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) ; __inline static struct i915_hw_ppgtt *i915_vm_to_ppgtt(struct i915_address_space *vm ) { int __ret_warn_on ; bool tmp ; long tmp___0 ; struct i915_address_space const *__mptr ; { tmp = i915_is_ggtt(vm); __ret_warn_on = (int )tmp; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2971, "WARN_ON(i915_is_ggtt(vm))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __mptr = (struct i915_address_space const *)vm; return ((struct i915_hw_ppgtt *)__mptr); } } int intel_vgt_balloon(struct drm_device *dev ) ; void intel_vgt_deballoon(void) ; static int i915_get_ggtt_vma_pages(struct i915_vma *vma ) ; struct i915_ggtt_view const i915_ggtt_view_rotated = {1, {{0UL, 0U}}, 0, {{0U, 0U, 0U, 0ULL}}}; static int sanitize_enable_ppgtt(struct drm_device *dev , int enable_ppgtt ) { bool has_aliasing_ppgtt ; bool has_full_ppgtt ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; bool tmp ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; long tmp___0 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; { __p = to_i915((struct drm_device const *)dev); has_aliasing_ppgtt = (unsigned int )((unsigned char )__p->info.gen) > 5U; __p___0 = to_i915((struct drm_device const *)dev); has_full_ppgtt = (unsigned int )((unsigned char )__p___0->info.gen) > 6U; tmp = intel_vgpu_active(dev); if ((int )tmp) { has_full_ppgtt = 0; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) <= 8U && (enable_ppgtt == 0 || ! has_aliasing_ppgtt)) { return (0); } else { } if (enable_ppgtt == 1) { return (1); } else { } if (enable_ppgtt == 2 && (int )has_full_ppgtt) { return (2); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 6U && intel_iommu_gfx_mapped != 0) { printk("\016[drm] Disabling PPGTT because VT-d is on\n"); return (0); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { goto _L; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) != 8U) { _L: /* CIL Label */ if ((unsigned int )(dev->pdev)->revision <= 10U) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("sanitize_enable_ppgtt", "disabling PPGTT on pre-B3 step VLV\n"); } else { } return (0); } else { } } else { } } } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 7U && i915.enable_execlists != 0) { return (2); } else { return ((int )has_aliasing_ppgtt); } } } static int ppgtt_bind_vma(struct i915_vma *vma , enum i915_cache_level cache_level , u32 unused ) { u32 pte_flags___0 ; { pte_flags___0 = 0U; if ((unsigned int )*((unsigned char *)vma->obj + 410UL) != 0U) { pte_flags___0 = pte_flags___0 | 1U; } else { } (*((vma->vm)->insert_entries))(vma->vm, (vma->obj)->pages, vma->node.start, cache_level, pte_flags___0); return (0); } } static void ppgtt_unbind_vma(struct i915_vma *vma ) { { (*((vma->vm)->clear_range))(vma->vm, vma->node.start, (uint64_t )(vma->obj)->base.size, 1); return; } } static gen8_pte_t gen8_pte_encode(dma_addr_t addr , enum i915_cache_level level , bool valid ) { gen8_pte_t pte ; { pte = (int )valid ? 3ULL : 0ULL; pte = pte | addr; switch ((unsigned int )level) { case 0U: pte = pte | 24ULL; goto ldv_48080; case 3U: pte = pte | 16ULL; goto ldv_48080; default: pte = pte | 128ULL; goto ldv_48080; } ldv_48080: ; return (pte); } } static gen8_pde_t gen8_pde_encode(struct drm_device *dev , dma_addr_t addr , enum i915_cache_level level ) { gen8_pde_t pde ; { pde = 3ULL; pde = pde | addr; if ((unsigned int )level != 0U) { pde = pde; } else { pde = pde | 24ULL; } return (pde); } } static gen6_pte_t snb_pte_encode(dma_addr_t addr , enum i915_cache_level level , bool valid , u32 unused ) { gen6_pte_t pte ; int __ret_warn_on ; long tmp ; { pte = (gen6_pte_t )valid; pte = (((gen6_pte_t )(addr >> 28) & 4080U) | (gen6_pte_t )addr) | pte; switch ((unsigned int )level) { case 2U: ; case 1U: pte = pte | 4U; goto ldv_48098; case 0U: pte = pte | 2U; goto ldv_48098; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 224, "Missing switch case (%lu) in %s\n", (long )level, "snb_pte_encode"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } ldv_48098: ; return (pte); } } static gen6_pte_t ivb_pte_encode(dma_addr_t addr , enum i915_cache_level level , bool valid , u32 unused ) { gen6_pte_t pte ; int __ret_warn_on ; long tmp ; { pte = (gen6_pte_t )valid; pte = (((gen6_pte_t )(addr >> 28) & 4080U) | (gen6_pte_t )addr) | pte; switch ((unsigned int )level) { case 2U: pte = pte | 6U; goto ldv_48112; case 1U: pte = pte | 4U; goto ldv_48112; case 0U: pte = pte | 2U; goto ldv_48112; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 248, "Missing switch case (%lu) in %s\n", (long )level, "ivb_pte_encode"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } ldv_48112: ; return (pte); } } static gen6_pte_t byt_pte_encode(dma_addr_t addr , enum i915_cache_level level , bool valid , u32 flags ) { gen6_pte_t pte ; { pte = (gen6_pte_t )valid; pte = (((gen6_pte_t )(addr >> 28) & 4080U) | (gen6_pte_t )addr) | pte; if ((flags & 1U) == 0U) { pte = pte | 2U; } else { } if ((unsigned int )level != 0U) { pte = pte | 4U; } else { } return (pte); } } static gen6_pte_t hsw_pte_encode(dma_addr_t addr , enum i915_cache_level level , bool valid , u32 unused ) { gen6_pte_t pte ; { pte = (gen6_pte_t )valid; pte = (((gen6_pte_t )(addr >> 28) & 2032U) | (gen6_pte_t )addr) | pte; if ((unsigned int )level != 0U) { pte = pte | 4U; } else { } return (pte); } } static gen6_pte_t iris_pte_encode(dma_addr_t addr , enum i915_cache_level level , bool valid , u32 unused ) { gen6_pte_t pte ; { pte = (gen6_pte_t )valid; pte = (((gen6_pte_t )(addr >> 28) & 2032U) | (gen6_pte_t )addr) | pte; switch ((unsigned int )level) { case 0U: ; goto ldv_48141; case 3U: pte = pte | 14U; goto ldv_48141; default: pte = pte | 2048U; goto ldv_48141; } ldv_48141: ; return (pte); } } static void __i915_dma_unmap_single(dma_addr_t daddr , struct drm_device *dev ) { struct device *device ; { device = & (dev->pdev)->dev; dma_unmap_page(device, daddr, 4096UL, 0); return; } } static int i915_dma_map_page_single(struct page *page , struct drm_device *dev , dma_addr_t *daddr ) { struct device *device ; int tmp ; { device = & (dev->pdev)->dev; *daddr = dma_map_page(device, page, 0UL, 4096UL, 0); tmp = dma_mapping_error(device, *daddr); if (tmp != 0) { return (-12); } else { } return (0); } } static void unmap_and_free_pt(struct i915_page_table *pt , struct drm_device *dev ) { int __ret_warn_on ; long tmp ; long tmp___0 ; { __ret_warn_on = (unsigned long )pt->page == (unsigned long )((struct page *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 345, "WARN_ON(!pt->page)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } __i915_dma_unmap_single(pt->daddr, dev); __free_pages(pt->page, 0U); kfree((void const *)pt->used_ptes); kfree((void const *)pt); return; } } static void gen8_initialize_pt(struct i915_address_space *vm , struct i915_page_table *pt ) { gen8_pte_t *pt_vaddr ; gen8_pte_t scratch_pte ; int i ; void *tmp ; struct drm_i915_private *__p ; { tmp = kmap_atomic(pt->page); pt_vaddr = (gen8_pte_t *)tmp; scratch_pte = gen8_pte_encode(vm->scratch.addr, 1, 1); i = 0; goto ldv_48169; ldv_48168: *(pt_vaddr + (unsigned long )i) = scratch_pte; i = i + 1; ldv_48169: ; if ((unsigned int )i <= 511U) { goto ldv_48168; } else { } __p = to_i915((struct drm_device const *)vm->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { drm_clflush_virt_range((void *)pt_vaddr, 4096UL); } else { } __kunmap_atomic((void *)pt_vaddr); return; } } static struct i915_page_table *alloc_pt_single(struct drm_device *dev ) { struct i915_page_table *pt ; size_t count ; struct drm_i915_private *__p ; int ret ; void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; { __p = to_i915((struct drm_device const *)dev); count = (unsigned int )((unsigned char )__p->info.gen) > 7U ? 512UL : 1024UL; ret = -12; tmp = kzalloc(24UL, 208U); pt = (struct i915_page_table *)tmp; if ((unsigned long )pt == (unsigned long )((struct i915_page_table *)0)) { tmp___0 = ERR_PTR(-12L); return ((struct i915_page_table *)tmp___0); } else { } tmp___1 = kcalloc((count + 63UL) / 64UL, 8UL, 208U); pt->used_ptes = (unsigned long *)tmp___1; if ((unsigned long )pt->used_ptes == (unsigned long )((unsigned long *)0UL)) { goto fail_bitmap; } else { } pt->page = alloc_pages(208U, 0U); if ((unsigned long )pt->page == (unsigned long )((struct page *)0)) { goto fail_page; } else { } ret = i915_dma_map_page_single(pt->page, dev, & pt->daddr); if (ret != 0) { goto fail_dma; } else { } return (pt); fail_dma: __free_pages(pt->page, 0U); fail_page: kfree((void const *)pt->used_ptes); fail_bitmap: kfree((void const *)pt); tmp___2 = ERR_PTR((long )ret); return ((struct i915_page_table *)tmp___2); } } static void unmap_and_free_pd(struct i915_page_directory *pd , struct drm_device *dev ) { { if ((unsigned long )pd->page != (unsigned long )((struct page *)0)) { __i915_dma_unmap_single(pd->__annonCompField79.daddr, dev); __free_pages(pd->page, 0U); kfree((void const *)pd->used_pdes); kfree((void const *)pd); } else { } return; } } static struct i915_page_directory *alloc_pd_single(struct drm_device *dev ) { struct i915_page_directory *pd ; int ret ; void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; { ret = -12; tmp = kzalloc(4120UL, 208U); pd = (struct i915_page_directory *)tmp; if ((unsigned long )pd == (unsigned long )((struct i915_page_directory *)0)) { tmp___0 = ERR_PTR(-12L); return ((struct i915_page_directory *)tmp___0); } else { } tmp___1 = kcalloc(8UL, 8UL, 208U); pd->used_pdes = (unsigned long *)tmp___1; if ((unsigned long )pd->used_pdes == (unsigned long )((unsigned long *)0UL)) { goto free_pd; } else { } pd->page = alloc_pages(208U, 0U); if ((unsigned long )pd->page == (unsigned long )((struct page *)0)) { goto free_bitmap; } else { } ret = i915_dma_map_page_single(pd->page, dev, & pd->__annonCompField79.daddr); if (ret != 0) { goto free_page; } else { } return (pd); free_page: __free_pages(pd->page, 0U); free_bitmap: kfree((void const *)pd->used_pdes); free_pd: kfree((void const *)pd); tmp___2 = ERR_PTR((long )ret); return ((struct i915_page_directory *)tmp___2); } } static int gen8_write_pdp(struct intel_engine_cs *ring , unsigned int entry , dma_addr_t addr ) { int ret ; long tmp ; { tmp = ldv__builtin_expect(entry > 3U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c"), "i" (461), "i" (12UL)); ldv_48210: ; goto ldv_48210; } else { } ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 285212673U); intel_ring_emit(ring, (ring->mmio_base + entry * 8U) + 628U); intel_ring_emit(ring, (unsigned int )(addr >> 32ULL)); intel_ring_emit(ring, 285212673U); intel_ring_emit(ring, (ring->mmio_base + entry * 8U) + 624U); intel_ring_emit(ring, (unsigned int )addr); intel_ring_advance(ring); return (0); } } static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt , struct intel_engine_cs *ring ) { int i ; int ret ; struct i915_page_directory *pd ; dma_addr_t pd_daddr ; { i = 3; goto ldv_48220; ldv_48219: pd = ppgtt->__annonCompField80.pdp.page_directory[i]; pd_daddr = (unsigned long )pd != (unsigned long )((struct i915_page_directory *)0) ? pd->__annonCompField79.daddr : (ppgtt->scratch_pd)->__annonCompField79.daddr; ret = gen8_write_pdp(ring, (unsigned int )i, pd_daddr); if (ret != 0) { return (ret); } else { } i = i - 1; ldv_48220: ; if (i >= 0) { goto ldv_48219; } else { } return (0); } } static void gen8_ppgtt_clear_range(struct i915_address_space *vm , uint64_t start , uint64_t length , bool use_scratch ) { struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr ; gen8_pte_t *pt_vaddr ; gen8_pte_t scratch_pte ; unsigned int pdpe ; unsigned int pde ; unsigned int pte ; unsigned int num_entries ; unsigned int last_pte ; unsigned int i ; struct i915_page_directory *pd ; struct i915_page_table *pt ; struct page *page_table ; int __ret_warn_on ; long tmp ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; long tmp___2 ; int __ret_warn_on___1 ; long tmp___3 ; long tmp___4 ; void *tmp___5 ; struct drm_i915_private *__p ; { __mptr = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr; pdpe = (unsigned int )(start >> 30) & 3U; pde = (unsigned int )(start >> 21) & 511U; pte = (unsigned int )(start >> 12) & 511U; num_entries = (unsigned int )(length >> 12); scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, 1, (int )use_scratch); goto ldv_48259; ldv_48258: __ret_warn_on = (unsigned long )ppgtt->__annonCompField80.pdp.page_directory[pdpe] == (unsigned long )((struct i915_page_directory *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 518, "WARN_ON(!ppgtt->pdp.page_directory[pdpe])"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { goto ldv_48244; } else { } pd = ppgtt->__annonCompField80.pdp.page_directory[pdpe]; __ret_warn_on___0 = (unsigned long )pd->page_table[pde] == (unsigned long )((struct i915_page_table *)0); tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 523, "WARN_ON(!pd->page_table[pde])"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { goto ldv_48244; } else { } pt = pd->page_table[pde]; __ret_warn_on___1 = (unsigned long )pt->page == (unsigned long )((struct page *)0); tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 528, "WARN_ON(!pt->page)"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { goto ldv_48244; } else { } page_table = pt->page; last_pte = pte + num_entries; if (last_pte > 512U) { last_pte = 512U; } else { } tmp___5 = kmap_atomic(page_table); pt_vaddr = (gen8_pte_t *)tmp___5; i = pte; goto ldv_48250; ldv_48249: *(pt_vaddr + (unsigned long )i) = scratch_pte; num_entries = num_entries - 1U; i = i + 1U; ldv_48250: ; if (i < last_pte) { goto ldv_48249; } else { } __p = to_i915((struct drm_device const *)ppgtt->base.dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { drm_clflush_virt_range((void *)pt_vaddr, 4096UL); } else { } __kunmap_atomic((void *)pt_vaddr); pte = 0U; pde = pde + 1U; if (pde == 512U) { pdpe = pdpe + 1U; pde = 0U; } else { } ldv_48259: ; if (num_entries != 0U) { goto ldv_48258; } else { } ldv_48244: ; return; } } static void gen8_ppgtt_insert_entries(struct i915_address_space *vm , struct sg_table *pages , uint64_t start , enum i915_cache_level cache_level , u32 unused ) { struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr ; gen8_pte_t *pt_vaddr ; unsigned int pdpe ; unsigned int pde ; unsigned int pte ; struct sg_page_iter sg_iter ; int __ret_warn_on ; long tmp ; long tmp___0 ; struct i915_page_directory *pd ; struct i915_page_table *pt ; struct page *page_table ; void *tmp___1 ; dma_addr_t tmp___2 ; struct drm_i915_private *__p ; bool tmp___3 ; struct drm_i915_private *__p___0 ; { __mptr = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr; pdpe = (unsigned int )(start >> 30) & 3U; pde = (unsigned int )(start >> 21) & 511U; pte = (unsigned int )(start >> 12) & 511U; pt_vaddr = (gen8_pte_t *)0ULL; __sg_page_iter_start(& sg_iter, pages->sgl, pages->nents, 0UL); goto ldv_48288; ldv_48287: __ret_warn_on = pdpe > 3U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 572, "WARN_ON(pdpe >= GEN8_LEGACY_PDPES)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { goto ldv_48277; } else { } if ((unsigned long )pt_vaddr == (unsigned long )((gen8_pte_t *)0ULL)) { pd = ppgtt->__annonCompField80.pdp.page_directory[pdpe]; pt = pd->page_table[pde]; page_table = pt->page; tmp___1 = kmap_atomic(page_table); pt_vaddr = (gen8_pte_t *)tmp___1; } else { } tmp___2 = sg_page_iter_dma_address(& sg_iter); *(pt_vaddr + (unsigned long )pte) = gen8_pte_encode(tmp___2, cache_level, 1); pte = pte + 1U; if (pte == 512U) { __p = to_i915((struct drm_device const *)ppgtt->base.dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { drm_clflush_virt_range((void *)pt_vaddr, 4096UL); } else { } __kunmap_atomic((void *)pt_vaddr); pt_vaddr = (gen8_pte_t *)0ULL; pde = pde + 1U; if (pde == 512U) { pdpe = pdpe + 1U; pde = 0U; } else { } pte = 0U; } else { } ldv_48288: tmp___3 = __sg_page_iter_next(& sg_iter); if ((int )tmp___3) { goto ldv_48287; } else { } ldv_48277: ; if ((unsigned long )pt_vaddr != (unsigned long )((gen8_pte_t *)0ULL)) { __p___0 = to_i915((struct drm_device const *)ppgtt->base.dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) == 0U) { drm_clflush_virt_range((void *)pt_vaddr, 4096UL); } else { } __kunmap_atomic((void *)pt_vaddr); } else { } return; } } static void __gen8_do_map_pt(gen8_pde_t * const pde , struct i915_page_table *pt , struct drm_device *dev ) { gen8_pde_t entry ; gen8_pde_t tmp ; { tmp = gen8_pde_encode(dev, pt->daddr, 1); entry = tmp; *pde = entry; return; } } static void gen8_initialize_pd(struct i915_address_space *vm , struct i915_page_directory *pd ) { struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr ; gen8_pde_t *page_directory ; struct i915_page_table *pt ; int i ; void *tmp ; struct drm_i915_private *__p ; { __mptr = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr; tmp = kmap_atomic(pd->page); page_directory = (gen8_pde_t *)tmp; pt = ppgtt->scratch_pt; i = 0; goto ldv_48312; ldv_48311: __gen8_do_map_pt(page_directory + (unsigned long )i, pt, vm->dev); i = i + 1; ldv_48312: ; if (i <= 511) { goto ldv_48311; } else { } __p = to_i915((struct drm_device const *)vm->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { drm_clflush_virt_range((void *)page_directory, 4096UL); } else { } __kunmap_atomic((void *)page_directory); return; } } static void gen8_free_page_tables(struct i915_page_directory *pd , struct drm_device *dev ) { int i ; unsigned long tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; { if ((unsigned long )pd->page == (unsigned long )((struct page *)0)) { return; } else { } tmp = find_first_bit((unsigned long const *)pd->used_pdes, 512UL); i = (int )tmp; goto ldv_48329; ldv_48328: __ret_warn_on = (unsigned long )pd->page_table[i] == (unsigned long )((struct i915_page_table *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 642, "WARN_ON(!pd->page_table[i])"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { goto ldv_48327; } else { } unmap_and_free_pt(pd->page_table[i], dev); pd->page_table[i] = (struct i915_page_table *)0; ldv_48327: tmp___2 = find_next_bit((unsigned long const *)pd->used_pdes, 512UL, (unsigned long )(i + 1)); i = (int )tmp___2; ldv_48329: ; if (i <= 511) { goto ldv_48328; } else { } return; } } static void gen8_ppgtt_cleanup(struct i915_address_space *vm ) { struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr ; int i ; unsigned long tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; { __mptr = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr; tmp = find_first_bit((unsigned long const *)(& ppgtt->__annonCompField80.pdp.used_pdpes), 4UL); i = (int )tmp; goto ldv_48342; ldv_48341: __ret_warn_on = (unsigned long )ppgtt->__annonCompField80.pdp.page_directory[i] == (unsigned long )((struct i915_page_directory *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 657, "WARN_ON(!ppgtt->pdp.page_directory[i])"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { goto ldv_48340; } else { } gen8_free_page_tables(ppgtt->__annonCompField80.pdp.page_directory[i], ppgtt->base.dev); unmap_and_free_pd(ppgtt->__annonCompField80.pdp.page_directory[i], ppgtt->base.dev); ldv_48340: tmp___2 = find_next_bit((unsigned long const *)(& ppgtt->__annonCompField80.pdp.used_pdpes), 4UL, (unsigned long )(i + 1)); i = (int )tmp___2; ldv_48342: ; if (i <= 3) { goto ldv_48341; } else { } unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev); unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); return; } } static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt , struct i915_page_directory *pd , uint64_t start , uint64_t length , unsigned long *new_pts ) { struct drm_device *dev ; struct i915_page_table *pt ; uint64_t temp ; uint32_t pde ; int __ret_warn_on ; long tmp ; bool tmp___0 ; uint64_t _min1 ; uint64_t _min2 ; unsigned long tmp___1 ; unsigned long tmp___2 ; { dev = ppgtt->base.dev; pde = gen8_pde_index(start); goto ldv_48363; ldv_48362: ; if ((unsigned long )pt != (unsigned long )((struct i915_page_table *)0)) { __ret_warn_on = (unsigned long )ppgtt->scratch_pt == (unsigned long )pt; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 701, "WARN_ON(pt == ppgtt->scratch_pt)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); goto ldv_48360; } else { } pt = alloc_pt_single(dev); tmp___0 = IS_ERR((void const *)pt); if ((int )tmp___0) { goto unwind_out; } else { } gen8_initialize_pt(& ppgtt->base, pt); pd->page_table[pde] = pt; set_bit((long )pde, (unsigned long volatile *)new_pts); ldv_48360: pde = pde + 1U; temp = ((start + 2097152ULL) & 0xffffffffffe00000ULL) - start; _min1 = temp; _min2 = length; temp = _min1 < _min2 ? _min1 : _min2; start = start + temp; length = length - temp; ldv_48363: pt = pd->page_table[pde]; if ((length != 0ULL && pde <= 511U) != 0) { goto ldv_48362; } else { } return (0); unwind_out: tmp___1 = find_first_bit((unsigned long const *)new_pts, 512UL); pde = (uint32_t )tmp___1; goto ldv_48366; ldv_48365: unmap_and_free_pt(pd->page_table[pde], dev); tmp___2 = find_next_bit((unsigned long const *)new_pts, 512UL, (unsigned long )(pde + 1U)); pde = (uint32_t )tmp___2; ldv_48366: ; if (pde <= 511U) { goto ldv_48365; } else { } return (-12); } } static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt , struct i915_page_directory_pointer *pdp , uint64_t start , uint64_t length , unsigned long *new_pds ) { struct drm_device *dev ; struct i915_page_directory *pd ; uint64_t temp ; uint32_t pdpe ; int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; bool tmp___2 ; uint64_t _min1 ; uint64_t _min2 ; unsigned long tmp___3 ; unsigned long tmp___4 ; { dev = ppgtt->base.dev; tmp = bitmap_empty((unsigned long const *)new_pds, 4U); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 757, "WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = start + length > 4294967296ULL; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 760, "WARN_ON((start + length) > (1ULL << 32))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); pdpe = gen8_pdpe_index(start); goto ldv_48389; ldv_48388: ; if ((unsigned long )pd != (unsigned long )((struct i915_page_directory *)0)) { goto ldv_48386; } else { } pd = alloc_pd_single(dev); tmp___2 = IS_ERR((void const *)pd); if ((int )tmp___2) { goto unwind_out; } else { } gen8_initialize_pd(& ppgtt->base, pd); pdp->page_directory[pdpe] = pd; set_bit((long )pdpe, (unsigned long volatile *)new_pds); ldv_48386: pdpe = pdpe + 1U; temp = ((start + 1073741824ULL) & 0xffffffffc0000000ULL) - start; _min1 = temp; _min2 = length; temp = _min1 < _min2 ? _min1 : _min2; start = start + temp; length = length - temp; ldv_48389: pd = pdp->page_directory[pdpe]; if ((length != 0ULL && pdpe <= 3U) != 0) { goto ldv_48388; } else { } return (0); unwind_out: tmp___3 = find_first_bit((unsigned long const *)new_pds, 4UL); pdpe = (uint32_t )tmp___3; goto ldv_48392; ldv_48391: unmap_and_free_pd(pdp->page_directory[pdpe], dev); tmp___4 = find_next_bit((unsigned long const *)new_pds, 4UL, (unsigned long )(pdpe + 1U)); pdpe = (uint32_t )tmp___4; ldv_48392: ; if (pdpe <= 3U) { goto ldv_48391; } else { } return (-12); } } static void free_gen8_temp_bitmaps(unsigned long *new_pds , unsigned long **new_pts ) { int i ; { i = 0; goto ldv_48400; ldv_48399: kfree((void const *)*(new_pts + (unsigned long )i)); i = i + 1; ldv_48400: ; if (i <= 3) { goto ldv_48399; } else { } kfree((void const *)new_pts); kfree((void const *)new_pds); return; } } static int alloc_gen8_temp_bitmaps(unsigned long **new_pds , unsigned long ***new_pts ) { int i ; unsigned long *pds ; unsigned long **pts ; void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = kcalloc(1UL, 8UL, 208U); pds = (unsigned long *)tmp; if ((unsigned long )pds == (unsigned long )((unsigned long *)0UL)) { return (-12); } else { } tmp___0 = kcalloc(4UL, 8UL, 208U); pts = (unsigned long **)tmp___0; if ((unsigned long )pts == (unsigned long )((unsigned long **)0UL)) { kfree((void const *)pds); return (-12); } else { } i = 0; goto ldv_48411; ldv_48410: tmp___1 = kcalloc(8UL, 8UL, 208U); *(pts + (unsigned long )i) = (unsigned long *)tmp___1; if ((unsigned long )*(pts + (unsigned long )i) == (unsigned long )((unsigned long *)0UL)) { goto err_out; } else { } i = i + 1; ldv_48411: ; if (i <= 3) { goto ldv_48410; } else { } *new_pds = pds; *new_pts = pts; return (0); err_out: free_gen8_temp_bitmaps(pds, pts); return (-12); } } static int gen8_alloc_va_range(struct i915_address_space *vm , uint64_t start , uint64_t length ) { struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr ; unsigned long *new_page_dirs ; unsigned long **new_page_tables ; struct i915_page_directory *pd ; uint64_t orig_start ; uint64_t orig_length ; uint64_t temp ; uint32_t pdpe ; int ret ; int __ret_warn_on ; long tmp ; long tmp___0 ; uint64_t _min1 ; uint64_t _min2 ; gen8_pde_t *page_directory ; void *tmp___1 ; struct i915_page_table *pt ; uint64_t pd_len ; uint64_t tmp___2 ; uint64_t pd_start ; uint32_t pde ; int __ret_warn_on___0 ; long tmp___3 ; int __ret_warn_on___1 ; long tmp___4 ; int __ret_warn_on___2 ; long tmp___5 ; int __ret_warn_on___3 ; size_t tmp___6 ; long tmp___7 ; size_t tmp___8 ; uint32_t tmp___9 ; uint64_t _min1___0 ; uint64_t _min2___0 ; struct drm_i915_private *__p ; uint64_t _min1___1 ; uint64_t _min2___1 ; unsigned long tmp___10 ; unsigned long tmp___11 ; uint32_t tmp___12 ; unsigned long tmp___13 ; unsigned long tmp___14 ; { __mptr = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr; orig_start = start; orig_length = length; __ret_warn_on = start + length < start; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 850, "WARN_ON(start + length < start)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-34); } else { } ret = alloc_gen8_temp_bitmaps(& new_page_dirs, & new_page_tables); if (ret != 0) { return (ret); } else { } ret = gen8_ppgtt_alloc_page_directories(ppgtt, & ppgtt->__annonCompField80.pdp, start, length, new_page_dirs); if (ret != 0) { free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); return (ret); } else { } pdpe = gen8_pdpe_index(start); goto ldv_48436; ldv_48435: ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length, *(new_page_tables + (unsigned long )pdpe)); if (ret != 0) { goto err_out; } else { } pdpe = pdpe + 1U; temp = ((start + 1073741824ULL) & 0xffffffffc0000000ULL) - start; _min1 = temp; _min2 = length; temp = _min1 < _min2 ? _min1 : _min2; start = start + temp; length = length - temp; ldv_48436: pd = ppgtt->__annonCompField80.pdp.page_directory[pdpe]; if ((length != 0ULL && pdpe <= 3U) != 0) { goto ldv_48435; } else { } start = orig_start; length = orig_length; pdpe = gen8_pdpe_index(start); goto ldv_48467; ldv_48466: tmp___1 = kmap_atomic(pd->page); page_directory = (gen8_pde_t */* const */)tmp___1; tmp___2 = gen8_clamp_pd(start, length); pd_len = tmp___2; pd_start = start; __ret_warn_on___0 = (unsigned long )pd == (unsigned long )((struct i915_page_directory *)0); tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 886, "WARN_ON(!pd)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); pde = gen8_pde_index(pd_start); goto ldv_48458; ldv_48457: __ret_warn_on___1 = (unsigned long )pt == (unsigned long )((struct i915_page_table *)0); tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 890, "WARN_ON(!pt)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); __ret_warn_on___2 = pd_len == 0ULL; tmp___5 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 891, "WARN_ON(!pd_len)"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); tmp___6 = gen8_pte_count(pd_start, pd_len); __ret_warn_on___3 = tmp___6 == 0UL; tmp___7 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___7 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 892, "WARN_ON(!gen8_pte_count(pd_start, pd_len))"); } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); tmp___8 = gen8_pte_count(pd_start, pd_len); tmp___9 = gen8_pte_index(pd_start); bitmap_set(pt->used_ptes, tmp___9, (int )tmp___8); set_bit((long )pde, (unsigned long volatile *)pd->used_pdes); __gen8_do_map_pt(page_directory + (unsigned long )pde, pt, vm->dev); pde = pde + 1U; temp = ((pd_start + 2097152ULL) & 0xffffffffffe00000ULL) - pd_start; _min1___0 = temp; _min2___0 = pd_len; temp = _min1___0 < _min2___0 ? _min1___0 : _min2___0; pd_start = pd_start + temp; pd_len = pd_len - temp; ldv_48458: pt = pd->page_table[pde]; if ((pd_len != 0ULL && pde <= 511U) != 0) { goto ldv_48457; } else { } __p = to_i915((struct drm_device const *)vm->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { drm_clflush_virt_range((void *)page_directory, 4096UL); } else { } __kunmap_atomic((void *)page_directory); set_bit((long )pdpe, (unsigned long volatile *)(& ppgtt->__annonCompField80.pdp.used_pdpes)); pdpe = pdpe + 1U; temp = ((start + 1073741824ULL) & 0xffffffffc0000000ULL) - start; _min1___1 = temp; _min2___1 = length; temp = _min1___1 < _min2___1 ? _min1___1 : _min2___1; start = start + temp; length = length - temp; ldv_48467: pd = ppgtt->__annonCompField80.pdp.page_directory[pdpe]; if ((length != 0ULL && pdpe <= 3U) != 0) { goto ldv_48466; } else { } free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); return (0); err_out: ; goto ldv_48473; ldv_48472: tmp___10 = find_first_bit((unsigned long const *)*(new_page_tables + (unsigned long )pdpe), 512UL); temp = (uint64_t )tmp___10; goto ldv_48470; ldv_48469: unmap_and_free_pt((ppgtt->__annonCompField80.pdp.page_directory[pdpe])->page_table[temp], vm->dev); tmp___11 = find_next_bit((unsigned long const *)*(new_page_tables + (unsigned long )pdpe), 512UL, (unsigned long )(temp + 1ULL)); temp = (uint64_t )tmp___11; ldv_48470: ; if (temp <= 511ULL) { goto ldv_48469; } else { } ldv_48473: tmp___12 = pdpe; pdpe = pdpe - 1U; if (tmp___12 != 0U) { goto ldv_48472; } else { } tmp___13 = find_first_bit((unsigned long const *)new_page_dirs, 4UL); pdpe = (uint32_t )tmp___13; goto ldv_48476; ldv_48475: unmap_and_free_pd(ppgtt->__annonCompField80.pdp.page_directory[pdpe], vm->dev); tmp___14 = find_next_bit((unsigned long const *)new_page_dirs, 4UL, (unsigned long )(pdpe + 1U)); pdpe = (uint32_t )tmp___14; ldv_48476: ; if (pdpe <= 3U) { goto ldv_48475; } else { } free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); return (ret); } } static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt ) { long tmp ; bool tmp___0 ; long tmp___1 ; bool tmp___2 ; { ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev); tmp___0 = IS_ERR((void const *)ppgtt->scratch_pt); if ((int )tmp___0) { tmp = PTR_ERR((void const *)ppgtt->scratch_pt); return ((int )tmp); } else { } ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev); tmp___2 = IS_ERR((void const *)ppgtt->scratch_pd); if ((int )tmp___2) { tmp___1 = PTR_ERR((void const *)ppgtt->scratch_pd); return ((int )tmp___1); } else { } gen8_initialize_pt(& ppgtt->base, ppgtt->scratch_pt); gen8_initialize_pd(& ppgtt->base, ppgtt->scratch_pd); ppgtt->base.start = 0UL; ppgtt->base.total = 4294967296UL; ppgtt->base.cleanup = & gen8_ppgtt_cleanup; ppgtt->base.allocate_va_range = & gen8_alloc_va_range; ppgtt->base.insert_entries = & gen8_ppgtt_insert_entries; ppgtt->base.clear_range = & gen8_ppgtt_clear_range; ppgtt->base.unbind_vma = & ppgtt_unbind_vma; ppgtt->base.bind_vma = & ppgtt_bind_vma; ppgtt->switch_mm = & gen8_mm_switch; return (0); } } static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt , struct seq_file *m ) { struct i915_address_space *vm ; struct i915_page_table *unused ; gen6_pte_t scratch_pte ; uint32_t pd_entry ; uint32_t pte ; uint32_t pde ; uint32_t temp ; uint32_t start ; uint32_t length ; u32 expected ; gen6_pte_t *pt_vaddr ; dma_addr_t pt_addr ; void *tmp ; unsigned long va ; int i ; bool found ; unsigned int __min1 ; unsigned int __min2 ; { vm = & ppgtt->base; start = (uint32_t )ppgtt->base.start; length = (uint32_t )ppgtt->base.total; scratch_pte = (*(vm->pte_encode))(vm->scratch.addr, 1, 1, 0U); pde = gen6_pde_index(start); goto ldv_48514; ldv_48513: pt_addr = (ppgtt->__annonCompField80.pd.page_table[pde])->daddr; pd_entry = readl((void const volatile *)ppgtt->pd_addr + (unsigned long )pde); expected = (((u32 )(pt_addr >> 28) & 4080U) | (u32 )pt_addr) | 1U; if (pd_entry != expected) { seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", pde, pd_entry, expected); } else { } seq_printf(m, "\tPDE: %x\n", pd_entry); tmp = kmap_atomic((ppgtt->__annonCompField80.pd.page_table[pde])->page); pt_vaddr = (gen6_pte_t *)tmp; pte = 0U; goto ldv_48511; ldv_48510: va = ((unsigned long )pde * 1024UL + (unsigned long )pte) * 4096UL; found = 0; i = 0; goto ldv_48504; ldv_48503: ; if (*(pt_vaddr + (unsigned long )(pte + (uint32_t )i)) != scratch_pte) { found = 1; } else { } i = i + 1; ldv_48504: ; if (i <= 3) { goto ldv_48503; } else { } if (! found) { goto ldv_48506; } else { } seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); i = 0; goto ldv_48508; ldv_48507: ; if (*(pt_vaddr + (unsigned long )(pte + (uint32_t )i)) != scratch_pte) { seq_printf(m, " %08x", *(pt_vaddr + (unsigned long )(pte + (uint32_t )i))); } else { seq_puts(m, " SCRATCH "); } i = i + 1; ldv_48508: ; if (i <= 3) { goto ldv_48507; } else { } seq_puts(m, "\n"); ldv_48506: pte = pte + 4U; ldv_48511: ; if (pte <= 1023U) { goto ldv_48510; } else { } __kunmap_atomic((void *)pt_vaddr); pde = pde + 1U; temp = ((start + 4194304U) & 4290772992U) - start; __min1 = temp; __min2 = length; temp = __min1 < __min2 ? __min1 : __min2; start = start + temp; length = length - temp; ldv_48514: unused = ppgtt->__annonCompField80.pd.page_table[pde]; if ((length != 0U && pde <= 511U) != 0) { goto ldv_48513; } else { } return; } } static void gen6_write_pde(struct i915_page_directory *pd , int const pde , struct i915_page_table *pt ) { struct i915_hw_ppgtt *ppgtt ; struct i915_page_directory const *__mptr ; u32 pd_entry ; { __mptr = (struct i915_page_directory const *)pd; ppgtt = (struct i915_hw_ppgtt *)__mptr + 0xfffffffffffffe70UL; pd_entry = (u32 )pt->daddr | ((u32 )(pt->daddr >> 28) & 4080U); pd_entry = pd_entry | 1U; writel(pd_entry, (void volatile *)ppgtt->pd_addr + (unsigned long )pde); return; } } static void gen6_write_page_range(struct drm_i915_private *dev_priv , struct i915_page_directory *pd , uint32_t start , uint32_t length ) { struct i915_page_table *pt ; uint32_t pde ; uint32_t temp ; unsigned int __min1 ; unsigned int __min2 ; { pde = gen6_pde_index(start); goto ldv_48538; ldv_48537: gen6_write_pde(pd, (int const )pde, pt); pde = pde + 1U; temp = ((start + 4194304U) & 4290772992U) - start; __min1 = temp; __min2 = length; temp = __min1 < __min2 ? __min1 : __min2; start = start + temp; length = length - temp; ldv_48538: pt = pd->page_table[pde]; if ((length != 0U && pde <= 511U) != 0) { goto ldv_48537; } else { } readl((void const volatile *)dev_priv->gtt.gsm); return; } } static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt ) { long tmp ; { tmp = ldv__builtin_expect((ppgtt->__annonCompField80.pd.__annonCompField79.pd_offset & 63U) != 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c"), "i" (1059), "i" (12UL)); ldv_48543: ; goto ldv_48543; } else { } return (ppgtt->__annonCompField80.pd.__annonCompField79.pd_offset / 64U << 16); } } static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt , struct intel_engine_cs *ring ) { int ret ; uint32_t tmp ; { ret = (*(ring->flush))(ring, 62U, 62U); if (ret != 0) { return (ret); } else { } ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 285212675U); intel_ring_emit(ring, ring->mmio_base + 544U); intel_ring_emit(ring, 4294967295U); intel_ring_emit(ring, ring->mmio_base + 552U); tmp = get_pd_offset(ppgtt); intel_ring_emit(ring, tmp); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt , struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; uint32_t tmp___0 ; { tmp = to_i915((struct drm_device const *)ppgtt->base.dev); dev_priv = tmp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 544U), 4294967295U, 1); tmp___0 = get_pd_offset(ppgtt); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 552U), tmp___0, 1); return (0); } } static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt , struct intel_engine_cs *ring ) { int ret ; uint32_t tmp ; { ret = (*(ring->flush))(ring, 62U, 62U); if (ret != 0) { return (ret); } else { } ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 285212675U); intel_ring_emit(ring, ring->mmio_base + 544U); intel_ring_emit(ring, 4294967295U); intel_ring_emit(ring, ring->mmio_base + 552U); tmp = get_pd_offset(ppgtt); intel_ring_emit(ring, tmp); intel_ring_emit(ring, 0U); intel_ring_advance(ring); if ((unsigned int )ring->id != 0U) { ret = (*(ring->flush))(ring, 62U, 62U); if (ret != 0) { return (ret); } else { } } else { } return (0); } } static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt , struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev = ppgtt->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 544U), 4294967295U, 1); tmp = get_pd_offset(ppgtt); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 552U), tmp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 544U), 0); return (0); } } static void gen8_ppgtt_enable(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int j ; int _a ; bool tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; j = 0; goto ldv_48587; ldv_48586: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )j; tmp = intel_ring_initialized(ring); if ((int )tmp) { _a = 512; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 668U), (uint32_t )((_a << 16) | _a), 1); } else { } j = j + 1; ldv_48587: ; if (j <= 4) { goto ldv_48586; } else { } return; } } static void gen7_ppgtt_enable(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; uint32_t ecochk ; uint32_t ecobits ; int i ; struct drm_i915_private *__p ; int _a ; bool tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ecobits = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 82064L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 82064L, ecobits | 768U, 1); ecochk = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16528L, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { ecochk = ecochk | 24U; } else { ecochk = ecochk | 8U; ecochk = ecochk & 4294967279U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16528L, ecochk, 1); i = 0; goto ldv_48619; ldv_48618: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { _a = 512; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 668U), (uint32_t )((_a << 16) | _a), 1); } else { } i = i + 1; ldv_48619: ; if (i <= 4) { goto ldv_48618; } else { } return; } } static void gen6_ppgtt_enable(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t ecochk ; uint32_t gab_ctl ; uint32_t ecobits ; int _a ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ecobits = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 82064L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 82064L, ecobits | 8960U, 1); gab_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 147456L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 147456L, gab_ctl | 256U, 1); ecochk = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16528L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16528L, ecochk | 1048U, 1); _a = 512; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 9504L, (uint32_t )((_a << 16) | _a), 1); return; } } static void gen6_ppgtt_clear_range(struct i915_address_space *vm , uint64_t start , uint64_t length , bool use_scratch ) { struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr ; gen6_pte_t *pt_vaddr ; gen6_pte_t scratch_pte ; unsigned int first_entry ; unsigned int num_entries ; unsigned int act_pt ; unsigned int first_pte ; unsigned int last_pte ; unsigned int i ; void *tmp ; { __mptr = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr; first_entry = (unsigned int )(start >> 12); num_entries = (unsigned int )(length >> 12); act_pt = first_entry / 1024U; first_pte = first_entry & 1023U; scratch_pte = (*(vm->pte_encode))(vm->scratch.addr, 1, 1, 0U); goto ldv_48664; ldv_48663: last_pte = first_pte + num_entries; if (last_pte > 1024U) { last_pte = 1024U; } else { } tmp = kmap_atomic((ppgtt->__annonCompField80.pd.page_table[act_pt])->page); pt_vaddr = (gen6_pte_t *)tmp; i = first_pte; goto ldv_48661; ldv_48660: *(pt_vaddr + (unsigned long )i) = scratch_pte; i = i + 1U; ldv_48661: ; if (i < last_pte) { goto ldv_48660; } else { } __kunmap_atomic((void *)pt_vaddr); num_entries = (first_pte - last_pte) + num_entries; first_pte = 0U; act_pt = act_pt + 1U; ldv_48664: ; if (num_entries != 0U) { goto ldv_48663; } else { } return; } } static void gen6_ppgtt_insert_entries(struct i915_address_space *vm , struct sg_table *pages , uint64_t start , enum i915_cache_level cache_level , u32 flags ) { struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr ; gen6_pte_t *pt_vaddr ; unsigned int first_entry ; unsigned int act_pt ; unsigned int act_pte ; struct sg_page_iter sg_iter ; void *tmp ; dma_addr_t tmp___0 ; bool tmp___1 ; { __mptr = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr; first_entry = (unsigned int )(start >> 12); act_pt = first_entry / 1024U; act_pte = first_entry & 1023U; pt_vaddr = (gen6_pte_t *)0U; __sg_page_iter_start(& sg_iter, pages->sgl, pages->nents, 0UL); goto ldv_48682; ldv_48681: ; if ((unsigned long )pt_vaddr == (unsigned long )((gen6_pte_t *)0U)) { tmp = kmap_atomic((ppgtt->__annonCompField80.pd.page_table[act_pt])->page); pt_vaddr = (gen6_pte_t *)tmp; } else { } tmp___0 = sg_page_iter_dma_address(& sg_iter); *(pt_vaddr + (unsigned long )act_pte) = (*(vm->pte_encode))(tmp___0, cache_level, 1, flags); act_pte = act_pte + 1U; if (act_pte == 1024U) { __kunmap_atomic((void *)pt_vaddr); pt_vaddr = (gen6_pte_t *)0U; act_pt = act_pt + 1U; act_pte = 0U; } else { } ldv_48682: tmp___1 = __sg_page_iter_next(& sg_iter); if ((int )tmp___1) { goto ldv_48681; } else { } if ((unsigned long )pt_vaddr != (unsigned long )((gen6_pte_t *)0U)) { __kunmap_atomic((void *)pt_vaddr); } else { } return; } } static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)ppgtt->base.dev); ppgtt->pd_dirty_rings = (unsigned long )__p->info.ring_mask; return; } } static void gen6_initialize_pt(struct i915_address_space *vm , struct i915_page_table *pt ) { gen6_pte_t *pt_vaddr ; gen6_pte_t scratch_pte ; int i ; int __ret_warn_on ; long tmp ; void *tmp___0 ; { __ret_warn_on = vm->scratch.addr == 0ULL; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1286, "WARN_ON(vm->scratch.addr == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); scratch_pte = (*(vm->pte_encode))(vm->scratch.addr, 1, 1, 0U); tmp___0 = kmap_atomic(pt->page); pt_vaddr = (gen6_pte_t *)tmp___0; i = 0; goto ldv_48703; ldv_48702: *(pt_vaddr + (unsigned long )i) = scratch_pte; i = i + 1; ldv_48703: ; if ((unsigned int )i <= 1023U) { goto ldv_48702; } else { } __kunmap_atomic((void *)pt_vaddr); return; } } static int gen6_alloc_va_range(struct i915_address_space *vm , uint64_t start , uint64_t length ) { unsigned long new_page_tables[8U] ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr ; struct i915_page_table *pt ; uint32_t start_save ; uint32_t length_save ; uint32_t pde ; uint32_t temp ; int ret ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; int tmp___0 ; long tmp___1 ; int __ret_warn_on___1 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; bool tmp___5 ; unsigned int __min1 ; unsigned int __min2 ; unsigned long tmp_bitmap[16U] ; size_t tmp___6 ; uint32_t tmp___7 ; int tmp___8 ; size_t tmp___9 ; uint32_t tmp___10 ; unsigned int __min1___0 ; unsigned int __min2___0 ; int __ret_warn_on___2 ; int tmp___11 ; long tmp___12 ; unsigned long tmp___13 ; struct i915_page_table *pt___0 ; unsigned long tmp___14 ; { dev = vm->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr; start_save = (uint32_t const )start; length_save = (uint32_t const )length; __ret_warn_on = (unsigned int )(start >> 32ULL) != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1312, "WARN_ON(upper_32_bits(start))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); bitmap_zero((unsigned long *)(& new_page_tables), 512U); pde = gen6_pde_index((uint32_t )start); goto ldv_48734; ldv_48733: ; if ((unsigned long )ppgtt->scratch_pt != (unsigned long )pt) { tmp___0 = bitmap_empty((unsigned long const *)pt->used_ptes, 1024U); __ret_warn_on___0 = tmp___0 != 0; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1323, "WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); goto ldv_48729; } else { } tmp___2 = bitmap_empty((unsigned long const *)pt->used_ptes, 1024U); __ret_warn_on___1 = tmp___2 == 0; tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1328, "WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES))"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); pt = alloc_pt_single(dev); tmp___5 = IS_ERR((void const *)pt); if ((int )tmp___5) { tmp___4 = PTR_ERR((void const *)pt); ret = (int )tmp___4; goto unwind_out; } else { } gen6_initialize_pt(vm, pt); ppgtt->__annonCompField80.pd.page_table[pde] = pt; set_bit((long )pde, (unsigned long volatile *)(& new_page_tables)); trace_i915_page_table_entry_alloc(vm, pde, start, 22ULL); ldv_48729: pde = pde + 1U; temp = (((uint32_t )start + 4194304U) & 4290772992U) - (uint32_t )start; __min1 = temp; __min2 = (unsigned int )length; temp = __min1 < __min2 ? __min1 : __min2; start = (uint64_t )temp + start; length = length - (uint64_t )temp; ldv_48734: pt = ppgtt->__annonCompField80.pd.page_table[pde]; if ((length != 0ULL && pde <= 511U) != 0) { goto ldv_48733; } else { } start = (uint64_t )start_save; length = (uint64_t )length_save; pde = gen6_pde_index((uint32_t )start); goto ldv_48741; ldv_48740: bitmap_zero((unsigned long *)(& tmp_bitmap), 1024U); tmp___6 = gen6_pte_count((uint32_t )start, (uint32_t )length); tmp___7 = gen6_pte_index((uint32_t )start); bitmap_set((unsigned long *)(& tmp_bitmap), tmp___7, (int )tmp___6); tmp___8 = test_and_clear_bit((long )pde, (unsigned long volatile *)(& new_page_tables)); if (tmp___8 != 0) { gen6_write_pde(& ppgtt->__annonCompField80.pd, (int const )pde, pt); } else { } tmp___9 = gen6_pte_count((uint32_t )start, (uint32_t )length); tmp___10 = gen6_pte_index((uint32_t )start); trace_i915_page_table_entry_map(vm, pde, pt, tmp___10, (u32 )tmp___9, 1024U); bitmap_or(pt->used_ptes, (unsigned long const *)(& tmp_bitmap), (unsigned long const *)pt->used_ptes, 1024U); pde = pde + 1U; temp = (((uint32_t )start + 4194304U) & 4290772992U) - (uint32_t )start; __min1___0 = temp; __min2___0 = (unsigned int )length; temp = __min1___0 < __min2___0 ? __min1___0 : __min2___0; start = (uint64_t )temp + start; length = length - (uint64_t )temp; ldv_48741: pt = ppgtt->__annonCompField80.pd.page_table[pde]; if ((length != 0ULL && pde <= 511U) != 0) { goto ldv_48740; } else { } tmp___11 = bitmap_empty((unsigned long const *)(& new_page_tables), 512U); __ret_warn_on___2 = tmp___11 == 0; tmp___12 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___12 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1364, "WARN_ON(!bitmap_empty(new_page_tables, I915_PDES))"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); readl((void const volatile *)dev_priv->gtt.gsm); mark_tlbs_dirty(ppgtt); return (0); unwind_out: tmp___13 = find_first_bit((unsigned long const *)(& new_page_tables), 512UL); pde = (uint32_t )tmp___13; goto ldv_48747; ldv_48746: pt___0 = ppgtt->__annonCompField80.pd.page_table[pde]; ppgtt->__annonCompField80.pd.page_table[pde] = ppgtt->scratch_pt; unmap_and_free_pt(pt___0, vm->dev); tmp___14 = find_next_bit((unsigned long const *)(& new_page_tables), 512UL, (unsigned long )(pde + 1U)); pde = (uint32_t )tmp___14; ldv_48747: ; if (pde <= 511U) { goto ldv_48746; } else { } mark_tlbs_dirty(ppgtt); return (ret); } } static void gen6_ppgtt_cleanup(struct i915_address_space *vm ) { struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr ; struct i915_page_table *pt ; uint32_t pde ; { __mptr = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr; drm_mm_remove_node(& ppgtt->node); pde = 0U; goto ldv_48758; ldv_48757: ; if ((unsigned long )ppgtt->scratch_pt != (unsigned long )pt) { unmap_and_free_pt(pt, ppgtt->base.dev); } else { } pde = pde + 1U; ldv_48758: pt = ppgtt->__annonCompField80.pd.page_table[pde]; if (pde <= 511U) { goto ldv_48757; } else { } unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); unmap_and_free_pd(& ppgtt->__annonCompField80.pd, ppgtt->base.dev); return; } } static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; bool retried ; int ret ; bool tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; bool tmp___3 ; long tmp___4 ; { dev = ppgtt->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; retried = 0; tmp = drm_mm_initialized(& dev_priv->gtt.base.mm); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } tmp___1 = ldv__builtin_expect((long )tmp___0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c"), "i" (1415), "i" (12UL)); ldv_48767: ; goto ldv_48767; } else { } ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev); tmp___3 = IS_ERR((void const *)ppgtt->scratch_pt); if ((int )tmp___3) { tmp___2 = PTR_ERR((void const *)ppgtt->scratch_pt); return ((int )tmp___2); } else { } gen6_initialize_pt(& ppgtt->base, ppgtt->scratch_pt); alloc: ret = drm_mm_insert_node_in_range_generic(& dev_priv->gtt.base.mm, & ppgtt->node, 2097152ULL, 65536U, 0UL, 0ULL, (u64 )dev_priv->gtt.base.total, 2, 1); if (ret == -28 && ! retried) { ret = i915_gem_evict_something(dev, & dev_priv->gtt.base, 2097152, 65536U, 0U, 0UL, dev_priv->gtt.base.total, 0U); if (ret != 0) { goto err_out; } else { } retried = 1; goto alloc; } else { } if (ret != 0) { goto err_out; } else { } if (ppgtt->node.start < (unsigned long long )dev_priv->gtt.mappable_end) { tmp___4 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("gen6_ppgtt_allocate_page_directories", "Forced to use aperture for PDEs\n"); } else { } } else { } return (0); err_out: unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); return (ret); } } static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt ) { int tmp ; { tmp = gen6_ppgtt_allocate_page_directories(ppgtt); return (tmp); } } static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt , uint64_t start , uint64_t length ) { struct i915_page_table *unused ; uint32_t pde ; uint32_t temp ; unsigned int __min1 ; unsigned int __min2 ; { pde = gen6_pde_index((uint32_t )start); goto ldv_48786; ldv_48785: ppgtt->__annonCompField80.pd.page_table[pde] = ppgtt->scratch_pt; pde = pde + 1U; temp = (((uint32_t )start + 4194304U) & 4290772992U) - (uint32_t )start; __min1 = temp; __min2 = (unsigned int )length; temp = __min1 < __min2 ? __min1 : __min2; start = (uint64_t )temp + start; length = length - (uint64_t )temp; ldv_48786: unused = ppgtt->__annonCompField80.pd.page_table[pde]; if ((length != 0ULL && pde <= 511U) != 0) { goto ldv_48785; } else { } return; } } static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; bool tmp ; long tmp___0 ; long tmp___1 ; { dev = ppgtt->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 6U) { ppgtt->switch_mm = & gen6_mm_switch; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { ppgtt->switch_mm = & hsw_mm_switch; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 7U) { ppgtt->switch_mm = & gen7_mm_switch; } else { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c"), "i" (1484), "i" (12UL)); ldv_48812: ; goto ldv_48812; } } } tmp = intel_vgpu_active(dev); if ((int )tmp) { ppgtt->switch_mm = & vgpu_mm_switch; } else { } ret = gen6_ppgtt_alloc(ppgtt); if (ret != 0) { return (ret); } else { } ppgtt->base.allocate_va_range = & gen6_alloc_va_range; ppgtt->base.clear_range = & gen6_ppgtt_clear_range; ppgtt->base.insert_entries = & gen6_ppgtt_insert_entries; ppgtt->base.unbind_vma = & ppgtt_unbind_vma; ppgtt->base.bind_vma = & ppgtt_bind_vma; ppgtt->base.cleanup = & gen6_ppgtt_cleanup; ppgtt->base.start = 0UL; ppgtt->base.total = 2147483648UL; ppgtt->debug_dump = & gen6_dump_ppgtt; ppgtt->__annonCompField80.pd.__annonCompField79.pd_offset = (uint32_t )(ppgtt->node.start / 4096ULL) * 4U; ppgtt->pd_addr = (gen6_pte_t *)dev_priv->gtt.gsm + (unsigned long )(ppgtt->__annonCompField80.pd.__annonCompField79.pd_offset / 4U); gen6_scratch_va_range(ppgtt, 0ULL, (uint64_t )ppgtt->base.total); gen6_write_page_range(dev_priv, & ppgtt->__annonCompField80.pd, 0U, (uint32_t )ppgtt->base.total); tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("gen6_ppgtt_init", "Allocated pde space (%lldM) at GTT entry: %llx\n", ppgtt->node.size >> 20, ppgtt->node.start / 4096ULL); } else { } tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("gen6_ppgtt_init", "Adding PPGTT at offset %x\n", ppgtt->__annonCompField80.pd.__annonCompField79.pd_offset << 10); } else { } return (0); } } static int __hw_ppgtt_init(struct drm_device *dev , struct i915_hw_ppgtt *ppgtt ) { struct drm_i915_private *dev_priv ; int tmp ; int tmp___0 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ppgtt->base.dev = dev; ppgtt->base.scratch = dev_priv->gtt.base.scratch; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 7U) { tmp = gen6_ppgtt_init(ppgtt); return (tmp); } else { tmp___0 = gen8_ppgtt_init(ppgtt); return (tmp___0); } } } int i915_ppgtt_init(struct drm_device *dev , struct i915_hw_ppgtt *ppgtt ) { struct drm_i915_private *dev_priv ; int ret ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; ret = __hw_ppgtt_init(dev, ppgtt); if (ret == 0) { kref_init(& ppgtt->ref); drm_mm_init(& ppgtt->base.mm, (u64 )ppgtt->base.start, (u64 )ppgtt->base.total); i915_init_vm(dev_priv, & ppgtt->base); } else { } return (ret); } } int i915_ppgtt_init_hw(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; struct i915_hw_ppgtt *ppgtt ; int i ; int ret ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; bool tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ppgtt = dev_priv->mm.aliasing_ppgtt; ret = 0; if (i915.enable_execlists != 0) { return (0); } else { } if (i915.enable_ppgtt == 0) { return (0); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 6U) { gen6_ppgtt_enable(dev); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 7U) { gen7_ppgtt_enable(dev); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { gen8_ppgtt_enable(dev); } else { __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { __p = to_i915((struct drm_device const *)dev); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1574, "Missing switch case (%lu) in %s\n", (long )__p->info.gen, "i915_ppgtt_init_hw"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } } } if ((unsigned long )ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0)) { i = 0; goto ldv_48867; ldv_48866: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(ring); if ((int )tmp___0) { ret = (*(ppgtt->switch_mm))(ppgtt, ring); if (ret != 0) { return (ret); } else { } } else { } i = i + 1; ldv_48867: ; if (i <= 4) { goto ldv_48866; } else { } } else { } return (ret); } } struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev , struct drm_i915_file_private *fpriv ) { struct i915_hw_ppgtt *ppgtt ; int ret ; void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = kzalloc(4576UL, 208U); ppgtt = (struct i915_hw_ppgtt *)tmp; if ((unsigned long )ppgtt == (unsigned long )((struct i915_hw_ppgtt *)0)) { tmp___0 = ERR_PTR(-12L); return ((struct i915_hw_ppgtt *)tmp___0); } else { } ret = i915_ppgtt_init(dev, ppgtt); if (ret != 0) { kfree((void const *)ppgtt); tmp___1 = ERR_PTR((long )ret); return ((struct i915_hw_ppgtt *)tmp___1); } else { } ppgtt->file_priv = fpriv; trace_i915_ppgtt_create(& ppgtt->base); return (ppgtt); } } void i915_ppgtt_release(struct kref *kref ) { struct i915_hw_ppgtt *ppgtt ; struct kref const *__mptr ; int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; int tmp___1 ; long tmp___2 ; { __mptr = (struct kref const *)kref; ppgtt = (struct i915_hw_ppgtt *)__mptr + 0xfffffffffffffec8UL; trace_i915_ppgtt_release(& ppgtt->base); tmp = list_empty((struct list_head const *)(& ppgtt->base.active_list)); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1617, "WARN_ON(!list_empty(&ppgtt->base.active_list))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = list_empty((struct list_head const *)(& ppgtt->base.inactive_list)); __ret_warn_on___0 = tmp___1 == 0; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1618, "WARN_ON(!list_empty(&ppgtt->base.inactive_list))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); list_del(& ppgtt->base.global_link); drm_mm_takedown(& ppgtt->base.mm); (*(ppgtt->base.cleanup))(& ppgtt->base); kfree((void const *)ppgtt); return; } } static bool needs_idle_maps(struct drm_device *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 5U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { if (intel_iommu_gfx_mapped != 0) { return (1); } else { } } else { } } else { } return (0); } } static bool do_idling(struct drm_i915_private *dev_priv ) { bool ret ; int tmp ; long tmp___0 ; { ret = dev_priv->mm.interruptible; tmp___0 = ldv__builtin_expect((long )dev_priv->gtt.do_idle_maps, 0L); if (tmp___0 != 0L) { dev_priv->mm.interruptible = 0; tmp = i915_gpu_idle(dev_priv->dev); if (tmp != 0) { drm_err("Couldn\'t idle GPU\n"); __const_udelay(42950UL); } else { } } else { } return (ret); } } static void undo_idling(struct drm_i915_private *dev_priv , bool interruptible ) { long tmp ; { tmp = ldv__builtin_expect((long )dev_priv->gtt.do_idle_maps, 0L); if (tmp != 0L) { dev_priv->mm.interruptible = interruptible; } else { } return; } } void i915_check_and_clear_faults(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int i ; struct drm_i915_private *__p ; u32 fault_reg ; long tmp ; bool tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return; } else { } i = 0; goto ldv_48924; ldv_48923: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(ring); if ((int )tmp___0) { fault_reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )ring->id * 256U + 16532U), 1); if ((int )fault_reg & 1) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_check_and_clear_faults", "Unexpected fault\n\tAddr: 0x%08lx\n\tAddress space: %s\n\tSource ID: %d\n\tType: %d\n", (unsigned long )fault_reg & 0xfffffffffffff000UL, (fault_reg & 2048U) != 0U ? (char *)"GGTT" : (char *)"PPGTT", (fault_reg >> 3) & 255U, (fault_reg >> 1) & 3U); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )ring->id * 256U + 16532U), fault_reg & 4294967294U, 1); } else { } } else { } i = i + 1; ldv_48924: ; if (i <= 4) { goto ldv_48923; } else { } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )((struct intel_engine_cs *)(& dev_priv->ring))->id * 256U + 16532U), 0); return; } } static void i915_ggtt_flush(struct drm_i915_private *dev_priv ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { intel_gtt_chipset_flush(); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1052680L, 1U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1052680L, 0); } return; } } void i915_gem_suspend_gtt_mappings(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return; } else { } i915_check_and_clear_faults(dev); (*(dev_priv->gtt.base.clear_range))(& dev_priv->gtt.base, (uint64_t )dev_priv->gtt.base.start, (uint64_t )dev_priv->gtt.base.total, 1); i915_ggtt_flush(dev_priv); return; } } int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj ) { int tmp ; { if ((unsigned int )*((unsigned char *)obj + 411UL) != 0U) { return (0); } else { } tmp = dma_map_sg_attrs(& ((obj->base.dev)->pdev)->dev, (obj->pages)->sgl, (int )(obj->pages)->nents, 0, (struct dma_attrs *)0); if (tmp == 0) { return (-28); } else { } return (0); } } static void gen8_set_pte(void *addr , gen8_pte_t pte ) { { writeq((unsigned long )pte, (void volatile *)addr); return; } } static void gen8_ggtt_insert_entries(struct i915_address_space *vm , struct sg_table *st , uint64_t start , enum i915_cache_level level , u32 unused ) { struct drm_i915_private *dev_priv ; unsigned int first_entry ; gen8_pte_t *gtt_entries ; int i ; struct sg_page_iter sg_iter ; dma_addr_t addr ; gen8_pte_t tmp ; bool tmp___0 ; int __ret_warn_on ; unsigned long tmp___1 ; gen8_pte_t tmp___2 ; long tmp___3 ; { dev_priv = (struct drm_i915_private *)(vm->dev)->dev_private; first_entry = (unsigned int )(start >> 12); gtt_entries = (gen8_pte_t *)dev_priv->gtt.gsm + (unsigned long )first_entry; i = 0; addr = 0ULL; __sg_page_iter_start(& sg_iter, st->sgl, st->nents, 0UL); goto ldv_48966; ldv_48965: addr = (sg_iter.sg)->dma_address + (dma_addr_t )(sg_iter.sg_pgoffset << 12); tmp = gen8_pte_encode(addr, level, 1); gen8_set_pte((void *)gtt_entries + (unsigned long )i, tmp); i = i + 1; ldv_48966: tmp___0 = __sg_page_iter_next(& sg_iter); if ((int )tmp___0) { goto ldv_48965; } else { } if (i != 0) { tmp___1 = readq((void const volatile *)(gtt_entries + ((unsigned long )i + 0xffffffffffffffffUL))); tmp___2 = gen8_pte_encode(addr, level, 1); __ret_warn_on = (unsigned long long )tmp___1 != tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1777, "WARN_ON(readq(>t_entries[i-1]) != gen8_pte_encode(addr, level, true))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1052680L, 1U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1052680L, 0); return; } } static void gen6_ggtt_insert_entries(struct i915_address_space *vm , struct sg_table *st , uint64_t start , enum i915_cache_level level , u32 flags ) { struct drm_i915_private *dev_priv ; unsigned int first_entry ; gen6_pte_t *gtt_entries ; int i ; struct sg_page_iter sg_iter ; dma_addr_t addr ; gen6_pte_t tmp ; bool tmp___0 ; unsigned long gtt ; unsigned int tmp___1 ; int __ret_warn_on ; gen6_pte_t tmp___2 ; long tmp___3 ; { dev_priv = (struct drm_i915_private *)(vm->dev)->dev_private; first_entry = (unsigned int )(start >> 12); gtt_entries = (gen6_pte_t *)dev_priv->gtt.gsm + (unsigned long )first_entry; i = 0; addr = 0ULL; __sg_page_iter_start(& sg_iter, st->sgl, st->nents, 0UL); goto ldv_48984; ldv_48983: addr = sg_page_iter_dma_address(& sg_iter); tmp = (*(vm->pte_encode))(addr, level, 1, flags); iowrite32(tmp, (void *)gtt_entries + (unsigned long )i); i = i + 1; ldv_48984: tmp___0 = __sg_page_iter_next(& sg_iter); if ((int )tmp___0) { goto ldv_48983; } else { } if (i != 0) { tmp___1 = readl((void const volatile *)(gtt_entries + ((unsigned long )i + 0xffffffffffffffffUL))); gtt = (unsigned long )tmp___1; tmp___2 = (*(vm->pte_encode))(addr, level, 1, flags); __ret_warn_on = (unsigned long )tmp___2 != gtt; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1820, "WARN_ON(gtt != vm->pte_encode(addr, level, true, flags))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1052680L, 1U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1052680L, 0); return; } } static void gen8_ggtt_clear_range(struct i915_address_space *vm , uint64_t start , uint64_t length , bool use_scratch ) { struct drm_i915_private *dev_priv ; unsigned int first_entry ; unsigned int num_entries ; gen8_pte_t scratch_pte ; gen8_pte_t *gtt_base ; int max_entries ; int i ; int __ret_warn_on ; long tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)(vm->dev)->dev_private; first_entry = (unsigned int )(start >> 12); num_entries = (unsigned int )(length >> 12); gtt_base = (gen8_pte_t *)dev_priv->gtt.gsm + (unsigned long )first_entry; max_entries = (int const )((unsigned int )(dev_priv->gtt.base.total >> 12) - first_entry); __ret_warn_on = (unsigned int )max_entries < num_entries; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1846, "First entry = %d; Num entries = %d (max=%d)\n", first_entry, num_entries, max_entries); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { num_entries = (unsigned int )max_entries; } else { } scratch_pte = gen8_pte_encode(vm->scratch.addr, 1, (int )use_scratch); i = 0; goto ldv_49005; ldv_49004: gen8_set_pte((void *)gtt_base + (unsigned long )i, scratch_pte); i = i + 1; ldv_49005: ; if ((unsigned int )i < num_entries) { goto ldv_49004; } else { } readl((void const volatile *)gtt_base); return; } } static void gen6_ggtt_clear_range(struct i915_address_space *vm , uint64_t start , uint64_t length , bool use_scratch ) { struct drm_i915_private *dev_priv ; unsigned int first_entry ; unsigned int num_entries ; gen6_pte_t scratch_pte ; gen6_pte_t *gtt_base ; int max_entries ; int i ; int __ret_warn_on ; long tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)(vm->dev)->dev_private; first_entry = (unsigned int )(start >> 12); num_entries = (unsigned int )(length >> 12); gtt_base = (gen6_pte_t *)dev_priv->gtt.gsm + (unsigned long )first_entry; max_entries = (int const )((unsigned int )(dev_priv->gtt.base.total >> 12) - first_entry); __ret_warn_on = (unsigned int )max_entries < num_entries; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 1872, "First entry = %d; Num entries = %d (max=%d)\n", first_entry, num_entries, max_entries); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { num_entries = (unsigned int )max_entries; } else { } scratch_pte = (*(vm->pte_encode))(vm->scratch.addr, 1, (int )use_scratch, 0U); i = 0; goto ldv_49023; ldv_49022: iowrite32(scratch_pte, (void *)gtt_base + (unsigned long )i); i = i + 1; ldv_49023: ; if ((unsigned int )i < num_entries) { goto ldv_49022; } else { } readl((void const volatile *)gtt_base); return; } } static void i915_ggtt_insert_entries(struct i915_address_space *vm , struct sg_table *pages , uint64_t start , enum i915_cache_level cache_level , u32 unused ) { unsigned int flags ; { flags = (unsigned int )cache_level == 0U ? 65536U : 65537U; intel_gtt_insert_sg_entries(pages, (unsigned int )(start >> 12), flags); return; } } static void i915_ggtt_clear_range(struct i915_address_space *vm , uint64_t start , uint64_t length , bool unused ) { unsigned int first_entry ; unsigned int num_entries ; { first_entry = (unsigned int )(start >> 12); num_entries = (unsigned int )(length >> 12); intel_gtt_clear_range(first_entry, num_entries); return; } } static int ggtt_bind_vma(struct i915_vma *vma , enum i915_cache_level cache_level , u32 flags ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; struct sg_table *pages ; u32 pte_flags___0 ; int ret ; struct i915_hw_ppgtt *appgtt ; { dev = (vma->vm)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; obj = vma->obj; pages = obj->pages; pte_flags___0 = 0U; ret = i915_get_ggtt_vma_pages(vma); if (ret != 0) { return (ret); } else { } pages = vma->ggtt_view.pages; if ((unsigned int )*((unsigned char *)obj + 410UL) != 0U) { pte_flags___0 = pte_flags___0 | 1U; } else { } if ((unsigned long )dev_priv->mm.aliasing_ppgtt == (unsigned long )((struct i915_hw_ppgtt *)0) || (int )flags & 1) { (*((vma->vm)->insert_entries))(vma->vm, pages, vma->node.start, cache_level, pte_flags___0); } else { } if ((unsigned long )dev_priv->mm.aliasing_ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0) && (flags & 2U) != 0U) { appgtt = dev_priv->mm.aliasing_ppgtt; (*(appgtt->base.insert_entries))(& appgtt->base, pages, vma->node.start, cache_level, pte_flags___0); } else { } return (0); } } static void ggtt_unbind_vma(struct i915_vma *vma ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; uint64_t size ; uint64_t __min1 ; uint64_t __min2 ; struct i915_hw_ppgtt *appgtt ; { dev = (vma->vm)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; obj = vma->obj; __min1 = (uint64_t )obj->base.size; __min2 = vma->node.size; size = __min1 < __min2 ? __min1 : __min2; if ((int )vma->bound & 1) { (*((vma->vm)->clear_range))(vma->vm, vma->node.start, size, 1); } else { } if ((unsigned long )dev_priv->mm.aliasing_ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0) && ((int )vma->bound & 2) != 0) { appgtt = dev_priv->mm.aliasing_ppgtt; (*(appgtt->base.clear_range))(& appgtt->base, vma->node.start, size, 1); } else { } return; } } void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; bool interruptible ; { dev = obj->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; interruptible = do_idling(dev_priv); if ((unsigned int )*((unsigned char *)obj + 411UL) == 0U) { dma_unmap_sg_attrs(& (dev->pdev)->dev, (obj->pages)->sgl, (int )(obj->pages)->nents, 0, (struct dma_attrs *)0); } else { } undo_idling(dev_priv, (int )interruptible); return; } } static void i915_gtt_color_adjust(struct drm_mm_node *node , unsigned long color , u64 *start , u64 *end ) { struct list_head const *__mptr ; int tmp ; { if (node->color != color) { *start = *start + 4096ULL; } else { } tmp = list_empty((struct list_head const *)(& node->node_list)); if (tmp == 0) { __mptr = (struct list_head const *)node->node_list.next; node = (struct drm_mm_node *)__mptr; if ((unsigned int )*((unsigned char *)node + 32UL) != 0U && node->color != color) { *end = *end - 4096ULL; } else { } } else { } return; } } static int i915_gem_setup_global_gtt(struct drm_device *dev , unsigned long start , unsigned long mappable_end , unsigned long end ) { struct drm_i915_private *dev_priv ; struct i915_address_space *ggtt_vm ; struct drm_mm_node *entry ; struct drm_i915_gem_object *obj ; unsigned long hole_start ; unsigned long hole_end ; int ret ; long tmp ; bool tmp___0 ; struct drm_i915_private *__p ; struct list_head const *__mptr ; struct i915_vma *vma ; struct i915_vma *tmp___1 ; unsigned long tmp___2 ; long tmp___3 ; int __ret_warn_on ; bool tmp___4 ; long tmp___5 ; long tmp___6 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; long tmp___7 ; struct list_head const *__mptr___2 ; u64 tmp___8 ; u64 tmp___9 ; int tmp___10 ; struct i915_hw_ppgtt *ppgtt ; void *tmp___11 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ggtt_vm = & dev_priv->gtt.base; tmp = ldv__builtin_expect(mappable_end > end, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c"), "i" (2021), "i" (12UL)); ldv_49091: ; goto ldv_49091; } else { } drm_mm_init(& ggtt_vm->mm, (u64 )start, (u64 )((end - start) - 4096UL)); dev_priv->gtt.base.start = start; dev_priv->gtt.base.total = end - start; tmp___0 = intel_vgpu_active(dev); if ((int )tmp___0) { ret = intel_vgt_balloon(dev); if (ret != 0) { return (ret); } else { } } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { dev_priv->gtt.base.mm.color_adjust = & i915_gtt_color_adjust; } else { } __mptr = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; goto ldv_49107; ldv_49106: tmp___1 = i915_gem_obj_to_vma(obj, ggtt_vm); vma = tmp___1; tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { tmp___2 = i915_gem_obj_ggtt_offset(obj); drm_ut_debug_printk("i915_gem_setup_global_gtt", "reserving preallocated space: %lx + %zx\n", tmp___2, obj->base.size); } else { } tmp___4 = i915_gem_obj_ggtt_bound(obj); __ret_warn_on = (int )tmp___4; tmp___5 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 2045, "WARN_ON(i915_gem_obj_ggtt_bound(obj))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = drm_mm_reserve_node(& ggtt_vm->mm, & vma->node); if (ret != 0) { tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("i915_gem_setup_global_gtt", "Reservation failed: %i\n", ret); } else { } return (ret); } else { } vma->bound = (unsigned char )((unsigned int )vma->bound | 1U); __mptr___0 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; ldv_49107: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_49106; } else { } __mptr___1 = (struct list_head const *)ggtt_vm->mm.hole_stack.next; entry = (struct drm_mm_node *)__mptr___1 + 0xfffffffffffffff0UL; goto ldv_49114; ldv_49113: tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("i915_gem_setup_global_gtt", "clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); } else { } (*(ggtt_vm->clear_range))(ggtt_vm, (uint64_t )hole_start, (uint64_t )(hole_end - hole_start), 1); __mptr___2 = (struct list_head const *)entry->hole_stack.next; entry = (struct drm_mm_node *)__mptr___2 + 0xfffffffffffffff0UL; ldv_49114: ; if ((unsigned long )(& entry->hole_stack) != (unsigned long )(& ggtt_vm->mm.hole_stack)) { tmp___8 = drm_mm_hole_node_start(entry); hole_start = (unsigned long )tmp___8; tmp___9 = drm_mm_hole_node_end(entry); hole_end = (unsigned long )tmp___9; tmp___10 = 1; } else { tmp___10 = 0; } if (tmp___10) { goto ldv_49113; } else { } (*(ggtt_vm->clear_range))(ggtt_vm, (uint64_t )(end - 4096UL), 4096ULL, 1); if (i915.enable_ppgtt != 0 && i915.enable_ppgtt != 2) { tmp___11 = kzalloc(4576UL, 208U); ppgtt = (struct i915_hw_ppgtt *)tmp___11; if ((unsigned long )ppgtt == (unsigned long )((struct i915_hw_ppgtt *)0)) { return (-12); } else { } ret = __hw_ppgtt_init(dev, ppgtt); if (ret != 0) { (*(ppgtt->base.cleanup))(& ppgtt->base); kfree((void const *)ppgtt); return (ret); } else { } if ((unsigned long )ppgtt->base.allocate_va_range != (unsigned long )((int (*)(struct i915_address_space * , uint64_t , uint64_t ))0)) { ret = (*(ppgtt->base.allocate_va_range))(& ppgtt->base, 0ULL, (uint64_t )ppgtt->base.total); } else { } if (ret != 0) { (*(ppgtt->base.cleanup))(& ppgtt->base); kfree((void const *)ppgtt); return (ret); } else { } (*(ppgtt->base.clear_range))(& ppgtt->base, (uint64_t )ppgtt->base.start, (uint64_t )ppgtt->base.total, 1); dev_priv->mm.aliasing_ppgtt = ppgtt; } else { } return (0); } } void i915_gem_init_global_gtt(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; unsigned long gtt_size ; unsigned long mappable_size ; { dev_priv = (struct drm_i915_private *)dev->dev_private; gtt_size = dev_priv->gtt.base.total; mappable_size = dev_priv->gtt.mappable_end; i915_gem_setup_global_gtt(dev, 0UL, mappable_size, gtt_size); return; } } void i915_global_gtt_cleanup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct i915_address_space *vm ; struct i915_hw_ppgtt *ppgtt ; bool tmp ; bool tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; vm = & dev_priv->gtt.base; if ((unsigned long )dev_priv->mm.aliasing_ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0)) { ppgtt = dev_priv->mm.aliasing_ppgtt; (*(ppgtt->base.cleanup))(& ppgtt->base); } else { } tmp___0 = drm_mm_initialized(& vm->mm); if ((int )tmp___0) { tmp = intel_vgpu_active(dev); if ((int )tmp) { intel_vgt_deballoon(); } else { } drm_mm_takedown(& vm->mm); list_del(& vm->global_link); } else { } (*(vm->cleanup))(vm); return; } } static int setup_scratch_page(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct page *page ; dma_addr_t dma_addr ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; page = alloc_pages(32980U, 0U); if ((unsigned long )page == (unsigned long )((struct page *)0)) { return (-12); } else { } set_pages_uc(page, 1); dma_addr = pci_map_page(dev->pdev, page, 0UL, 4096UL, 0); tmp = pci_dma_mapping_error(dev->pdev, dma_addr); if (tmp != 0) { return (-22); } else { } dev_priv->gtt.base.scratch.page = page; dev_priv->gtt.base.scratch.addr = dma_addr; return (0); } } static void teardown_scratch_page(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct page *page ; { dev_priv = (struct drm_i915_private *)dev->dev_private; page = dev_priv->gtt.base.scratch.page; set_pages_wb(page, 1); pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, 4096UL, 0); __free_pages(page, 0U); return; } } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl ) { { snb_gmch_ctl = (u16 )((int )snb_gmch_ctl >> 8); snb_gmch_ctl = (unsigned int )snb_gmch_ctl & 3U; return ((unsigned int )((int )snb_gmch_ctl << 20)); } } static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl ) { { bdw_gmch_ctl = (u16 )((int )bdw_gmch_ctl >> 6); bdw_gmch_ctl = (unsigned int )bdw_gmch_ctl & 3U; if ((unsigned int )bdw_gmch_ctl != 0U) { bdw_gmch_ctl = (u16 )(1 << (int )bdw_gmch_ctl); } else { } return ((unsigned int )((int )bdw_gmch_ctl << 20)); } } static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl ) { { gmch_ctrl = (u16 )((int )gmch_ctrl >> 8); gmch_ctrl = (unsigned int )gmch_ctrl & 3U; if ((unsigned int )gmch_ctrl != 0U) { return ((unsigned int )(1 << ((int )gmch_ctrl + 20))); } else { } return (0U); } } static size_t gen6_get_stolen_size(u16 snb_gmch_ctl ) { { snb_gmch_ctl = (u16 )((int )snb_gmch_ctl >> 3); snb_gmch_ctl = (unsigned int )snb_gmch_ctl & 31U; return ((size_t )((int )snb_gmch_ctl << 25)); } } static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl ) { { bdw_gmch_ctl = (u16 )((int )bdw_gmch_ctl >> 8); bdw_gmch_ctl = (unsigned int )bdw_gmch_ctl & 255U; return ((size_t )((int )bdw_gmch_ctl << 25)); } } static size_t chv_get_stolen_size(u16 gmch_ctrl ) { { gmch_ctrl = (u16 )((int )gmch_ctrl >> 3); gmch_ctrl = (unsigned int )gmch_ctrl & 31U; if ((unsigned int )gmch_ctrl <= 16U) { return ((size_t )((int )gmch_ctrl << 25)); } else if ((unsigned int )gmch_ctrl <= 22U) { return ((size_t )(((int )gmch_ctrl + -15) << 22)); } else { return ((size_t )(((int )gmch_ctrl + -14) << 22)); } } } static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl ) { { gen9_gmch_ctl = (u16 )((int )gen9_gmch_ctl >> 8); gen9_gmch_ctl = (unsigned int )gen9_gmch_ctl & 255U; if ((unsigned int )gen9_gmch_ctl <= 239U) { return ((size_t )((int )gen9_gmch_ctl << 25)); } else { return ((size_t )(((int )gen9_gmch_ctl + -239) << 22)); } } } static int ggtt_probe_common(struct drm_device *dev , size_t gtt_size ) { struct drm_i915_private *dev_priv ; phys_addr_t gtt_phys_addr ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; gtt_phys_addr = (dev->pdev)->resource[0].start + ((dev->pdev)->resource[0].start != 0ULL || (dev->pdev)->resource[0].end != (dev->pdev)->resource[0].start ? (((dev->pdev)->resource[0].end - (dev->pdev)->resource[0].start) + 1ULL) / 2ULL : 0ULL); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size); } else { dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); } } else { dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); } if ((unsigned long )dev_priv->gtt.gsm == (unsigned long )((void *)0)) { drm_err("Failed to map the gtt page table\n"); return (-12); } else { } ret = setup_scratch_page(dev); if (ret != 0) { drm_err("Scratch setup failed\n"); iounmap((void volatile *)dev_priv->gtt.gsm); } else { } return (ret); } } static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv ) { uint64_t pat ; { pat = 4263531206295554311ULL; if (i915.enable_ppgtt == 0) { pat = 0ULL; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16608L, (uint32_t )pat, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16612L, (uint32_t )(pat >> 32), 1); return; } } static void chv_setup_private_ppat(struct drm_i915_private *dev_priv ) { uint64_t pat ; { pat = 4629771060558954560ULL; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16608L, (uint32_t )pat, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16612L, (uint32_t )(pat >> 32), 1); return; } } static int gen8_gmch_probe(struct drm_device *dev , size_t *gtt_total , size_t *stolen , phys_addr_t *mappable_base , unsigned long *mappable_end ) { struct drm_i915_private *dev_priv ; unsigned int gtt_size ; u16 snb_gmch_ctl ; int ret ; int tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; *mappable_base = (dev->pdev)->resource[2].start; *mappable_end = (dev->pdev)->resource[2].start != 0ULL || (dev->pdev)->resource[2].end != (dev->pdev)->resource[2].start ? (unsigned long )(((dev->pdev)->resource[2].end - (dev->pdev)->resource[2].start) + 1ULL) : 0UL; tmp = pci_set_dma_mask(dev->pdev, 549755813887ULL); if (tmp == 0) { pci_set_consistent_dma_mask(dev->pdev, 549755813887ULL); } else { } pci_read_config_word((struct pci_dev const *)dev->pdev, 80, & snb_gmch_ctl); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 8U) { *stolen = gen9_get_stolen_size((int )snb_gmch_ctl); gtt_size = gen8_get_total_gtt_size((int )snb_gmch_ctl); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { *stolen = chv_get_stolen_size((int )snb_gmch_ctl); gtt_size = chv_get_total_gtt_size((int )snb_gmch_ctl); } else { *stolen = gen8_get_stolen_size((int )snb_gmch_ctl); gtt_size = gen8_get_total_gtt_size((int )snb_gmch_ctl); } } else { *stolen = gen8_get_stolen_size((int )snb_gmch_ctl); gtt_size = gen8_get_total_gtt_size((int )snb_gmch_ctl); } } *gtt_total = (unsigned long )(gtt_size / 8U) << 12; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { chv_setup_private_ppat(dev_priv); } else { goto _L; } } else { _L: /* CIL Label */ __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 9U) { chv_setup_private_ppat(dev_priv); } else { bdw_setup_private_ppat(dev_priv); } } else { bdw_setup_private_ppat(dev_priv); } } ret = ggtt_probe_common(dev, (size_t )gtt_size); dev_priv->gtt.base.clear_range = & gen8_ggtt_clear_range; dev_priv->gtt.base.insert_entries = & gen8_ggtt_insert_entries; dev_priv->gtt.base.bind_vma = & ggtt_bind_vma; dev_priv->gtt.base.unbind_vma = & ggtt_unbind_vma; return (ret); } } static int gen6_gmch_probe(struct drm_device *dev , size_t *gtt_total , size_t *stolen , phys_addr_t *mappable_base , unsigned long *mappable_end ) { struct drm_i915_private *dev_priv ; unsigned int gtt_size ; u16 snb_gmch_ctl ; int ret ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; *mappable_base = (dev->pdev)->resource[2].start; *mappable_end = (dev->pdev)->resource[2].start != 0ULL || (dev->pdev)->resource[2].end != (dev->pdev)->resource[2].start ? (unsigned long )(((dev->pdev)->resource[2].end - (dev->pdev)->resource[2].start) + 1ULL) : 0UL; if (*mappable_end <= 67108863UL || *mappable_end > 536870912UL) { drm_err("Unknown GMADR size (%lx)\n", dev_priv->gtt.mappable_end); return (-6); } else { } tmp = pci_set_dma_mask(dev->pdev, 1099511627775ULL); if (tmp == 0) { pci_set_consistent_dma_mask(dev->pdev, 1099511627775ULL); } else { } pci_read_config_word((struct pci_dev const *)dev->pdev, 80, & snb_gmch_ctl); *stolen = gen6_get_stolen_size((int )snb_gmch_ctl); gtt_size = gen6_get_total_gtt_size((int )snb_gmch_ctl); *gtt_total = (unsigned long )(gtt_size / 4U) << 12; ret = ggtt_probe_common(dev, (size_t )gtt_size); dev_priv->gtt.base.clear_range = & gen6_ggtt_clear_range; dev_priv->gtt.base.insert_entries = & gen6_ggtt_insert_entries; dev_priv->gtt.base.bind_vma = & ggtt_bind_vma; dev_priv->gtt.base.unbind_vma = & ggtt_unbind_vma; return (ret); } } static void gen6_gmch_remove(struct i915_address_space *vm ) { struct i915_gtt *gtt ; struct i915_address_space const *__mptr ; { __mptr = (struct i915_address_space const *)vm; gtt = (struct i915_gtt *)__mptr; iounmap((void volatile *)gtt->gsm); teardown_scratch_page(vm->dev); return; } } static int i915_gmch_probe(struct drm_device *dev , size_t *gtt_total , size_t *stolen , phys_addr_t *mappable_base , unsigned long *mappable_end ) { struct drm_i915_private *dev_priv ; int ret ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ret = intel_gmch_probe(dev_priv->bridge_dev, (dev_priv->dev)->pdev, (struct agp_bridge_data *)0); if (ret == 0) { drm_err("failed to set up gmch\n"); return (-5); } else { } intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); dev_priv->gtt.base.insert_entries = & i915_ggtt_insert_entries; dev_priv->gtt.base.clear_range = & i915_ggtt_clear_range; dev_priv->gtt.base.bind_vma = & ggtt_bind_vma; dev_priv->gtt.base.unbind_vma = & ggtt_unbind_vma; tmp = ldv__builtin_expect((long )dev_priv->gtt.do_idle_maps, 0L); if (tmp != 0L) { printk("\016[drm] applying Ironlake quirks for intel_iommu\n"); } else { } return (0); } } static void i915_gmch_remove(struct i915_address_space *vm ) { { intel_gmch_remove(); return; } } int i915_gem_gtt_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct i915_gtt *gtt ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; long tmp ; long tmp___0 ; long tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; gtt = & dev_priv->gtt; __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) <= 5U) { gtt->gtt_probe = & i915_gmch_probe; gtt->base.cleanup = & i915_gmch_remove; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) <= 7U) { gtt->gtt_probe = & gen6_gmch_probe; gtt->base.cleanup = & gen6_gmch_remove; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U && dev_priv->ellc_size != 0UL) { gtt->base.pte_encode = & iris_pte_encode; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { gtt->base.pte_encode = & hsw_pte_encode; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { gtt->base.pte_encode = & byt_pte_encode; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 6U) { gtt->base.pte_encode = & ivb_pte_encode; } else { gtt->base.pte_encode = & snb_pte_encode; } } } } } else { dev_priv->gtt.gtt_probe = & gen8_gmch_probe; dev_priv->gtt.base.cleanup = & gen6_gmch_remove; } } ret = (*(gtt->gtt_probe))(dev, & gtt->base.total, & gtt->stolen_size, & gtt->mappable_base, & gtt->mappable_end); if (ret != 0) { return (ret); } else { } gtt->base.dev = dev; printk("\016[drm] Memory usable by graphics device = %zdM\n", gtt->base.total >> 20); tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_gem_gtt_init", "GMADR size = %ldM\n", gtt->mappable_end >> 20); } else { } tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_gem_gtt_init", "GTT stolen size = %zdM\n", gtt->stolen_size >> 20); } else { } if (intel_iommu_gfx_mapped != 0) { printk("\016[drm] VT-d active for gfx access\n"); } else { } i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i915_gem_gtt_init", "ppgtt mode: %i\n", i915.enable_ppgtt); } else { } return (0); } } void i915_gem_restore_gtt_mappings(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; struct i915_address_space *vm ; struct list_head const *__mptr ; struct i915_vma *vma ; struct i915_vma *tmp ; int __ret_warn_on ; int tmp___0 ; long tmp___1 ; struct list_head const *__mptr___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct list_head const *__mptr___1 ; struct i915_hw_ppgtt *ppgtt ; struct i915_address_space const *__mptr___2 ; bool tmp___2 ; struct list_head const *__mptr___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i915_check_and_clear_faults(dev); (*(dev_priv->gtt.base.clear_range))(& dev_priv->gtt.base, (uint64_t )dev_priv->gtt.base.start, (uint64_t )dev_priv->gtt.base.total, 1); __mptr = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; goto ldv_49328; ldv_49327: tmp = i915_gem_obj_to_vma(obj, & dev_priv->gtt.base); vma = tmp; if ((unsigned long )vma == (unsigned long )((struct i915_vma *)0)) { goto ldv_49324; } else { } i915_gem_clflush_object(obj, obj->pin_display != 0U); tmp___0 = i915_vma_bind(vma, (enum i915_cache_level )obj->cache_level, 32U); __ret_warn_on = tmp___0 != 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 2565, "WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ldv_49324: __mptr___0 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; ldv_49328: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_49327; } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 7U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { chv_setup_private_ppat(dev_priv); } else { goto _L; } } else { _L: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { chv_setup_private_ppat(dev_priv); } else { bdw_setup_private_ppat(dev_priv); } } else { bdw_setup_private_ppat(dev_priv); } } return; } else { } if (i915.enable_ppgtt != 0) { __mptr___1 = (struct list_head const *)dev_priv->vm_list.next; vm = (struct i915_address_space *)__mptr___1 + 0xffffffffffffff50UL; goto ldv_49368; ldv_49367: __mptr___2 = (struct i915_address_space const *)vm; ppgtt = (struct i915_hw_ppgtt *)__mptr___2; tmp___2 = i915_is_ggtt(vm); if ((int )tmp___2) { ppgtt = dev_priv->mm.aliasing_ppgtt; } else { } gen6_write_page_range(dev_priv, & ppgtt->__annonCompField80.pd, 0U, (uint32_t )ppgtt->base.total); __mptr___3 = (struct list_head const *)vm->global_link.next; vm = (struct i915_address_space *)__mptr___3 + 0xffffffffffffff50UL; ldv_49368: ; if ((unsigned long )(& vm->global_link) != (unsigned long )(& dev_priv->vm_list)) { goto ldv_49367; } else { } } else { } i915_ggtt_flush(dev_priv); return; } } static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj , struct i915_address_space *vm , struct i915_ggtt_view const *ggtt_view ) { struct i915_vma *vma ; void *tmp ; int __ret_warn_on ; bool tmp___0 ; long tmp___1 ; long tmp___2 ; struct drm_i915_private *tmp___3 ; void *tmp___4 ; void *tmp___5 ; bool tmp___6 ; struct i915_hw_ppgtt *tmp___7 ; bool tmp___8 ; int tmp___9 ; { tmp___0 = i915_is_ggtt(vm); __ret_warn_on = (int )tmp___0 != ((unsigned long )ggtt_view != (unsigned long )((struct i915_ggtt_view const *)0)); tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 2604, "WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { tmp = ERR_PTR(-22L); return ((struct i915_vma *)tmp); } else { } tmp___3 = to_i915((struct drm_device const *)obj->base.dev); tmp___4 = kmem_cache_zalloc(tmp___3->vmas, 208U); vma = (struct i915_vma *)tmp___4; if ((unsigned long )vma == (unsigned long )((struct i915_vma *)0)) { tmp___5 = ERR_PTR(-12L); return ((struct i915_vma *)tmp___5); } else { } INIT_LIST_HEAD(& vma->vma_link); INIT_LIST_HEAD(& vma->mm_list); INIT_LIST_HEAD(& vma->exec_list); vma->vm = vm; vma->obj = obj; tmp___6 = i915_is_ggtt(vm); if ((int )tmp___6) { vma->ggtt_view = *ggtt_view; } else { } list_add_tail(& vma->vma_link, & obj->vma_list); tmp___8 = i915_is_ggtt(vm); if (tmp___8) { tmp___9 = 0; } else { tmp___9 = 1; } if (tmp___9) { tmp___7 = i915_vm_to_ppgtt(vm); i915_ppgtt_get(tmp___7); } else { } return (vma); } } struct i915_vma *i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj , struct i915_address_space *vm ) { struct i915_vma *vma ; bool tmp ; { vma = i915_gem_obj_to_vma(obj, vm); if ((unsigned long )vma == (unsigned long )((struct i915_vma *)0)) { tmp = i915_is_ggtt(vm); vma = __i915_gem_vma_create(obj, vm, (int )tmp ? & i915_ggtt_view_normal : (struct i915_ggtt_view const *)0); } else { } return (vma); } } struct i915_vma *i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) { struct i915_address_space *ggtt ; struct i915_vma *vma ; void *tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; bool tmp___2 ; { ggtt = & ((struct drm_i915_private *)(obj->base.dev)->dev_private)->gtt.base; __ret_warn_on = (unsigned long )view == (unsigned long )((struct i915_ggtt_view const *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 2648, "WARN_ON(!view)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { tmp = ERR_PTR(-22L); return ((struct i915_vma *)tmp); } else { } vma = i915_gem_obj_to_ggtt_view(obj, view); tmp___2 = IS_ERR((void const *)vma); if ((int )tmp___2) { return (vma); } else { } if ((unsigned long )vma == (unsigned long )((struct i915_vma *)0)) { vma = __i915_gem_vma_create(obj, ggtt, view); } else { } return (vma); } } static void rotate_pages(dma_addr_t *in , unsigned int width , unsigned int height , struct sg_table *st ) { unsigned int column ; unsigned int row ; unsigned int src_idx ; struct scatterlist *sg ; { sg = st->sgl; st->nents = 0U; column = 0U; goto ldv_49405; ldv_49404: src_idx = (height - 1U) * width + column; row = 0U; goto ldv_49402; ldv_49401: st->nents = st->nents + 1U; sg_set_page(sg, (struct page *)0, 4096U, 0U); sg->dma_address = *(in + (unsigned long )src_idx); sg->dma_length = 4096U; sg = sg_next(sg); src_idx = src_idx - width; row = row + 1U; ldv_49402: ; if (row < height) { goto ldv_49401; } else { } column = column + 1U; ldv_49405: ; if (column < width) { goto ldv_49404; } else { } return; } } static struct sg_table *intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view , struct drm_i915_gem_object *obj ) { struct drm_device *dev ; struct intel_rotation_info *rot_info ; unsigned long size ; unsigned long pages ; unsigned long rot_pages ; struct sg_page_iter sg_iter ; unsigned long i ; dma_addr_t *page_addr_list ; struct sg_table *st ; unsigned int tile_pitch ; unsigned int tile_height ; unsigned int width_pages ; unsigned int height_pages ; int ret ; void *tmp ; void *tmp___0 ; void *tmp___1 ; bool tmp___2 ; long tmp___3 ; long tmp___4 ; void *tmp___5 ; { dev = obj->base.dev; rot_info = & ggtt_view->__annonCompField78.rotation_info; ret = -12; pages = obj->base.size / 4096UL; tile_height = intel_tile_height(dev, rot_info->pixel_format, rot_info->fb_modifier); tile_pitch = 4096U / tile_height; width_pages = ((rot_info->pitch + tile_pitch) - 1U) / tile_pitch; height_pages = ((rot_info->height + tile_height) - 1U) / tile_height; rot_pages = (unsigned long )(width_pages * height_pages); size = rot_pages * 4096UL; tmp = drm_malloc_ab(pages, 8UL); page_addr_list = (dma_addr_t *)tmp; if ((unsigned long )page_addr_list == (unsigned long )((dma_addr_t *)0ULL)) { tmp___0 = ERR_PTR((long )ret); return ((struct sg_table *)tmp___0); } else { } tmp___1 = kmalloc(16UL, 208U); st = (struct sg_table *)tmp___1; if ((unsigned long )st == (unsigned long )((struct sg_table *)0)) { goto err_st_alloc; } else { } ret = sg_alloc_table(st, (unsigned int )rot_pages, 208U); if (ret != 0) { goto err_sg_alloc; } else { } i = 0UL; __sg_page_iter_start(& sg_iter, (obj->pages)->sgl, (obj->pages)->nents, 0UL); goto ldv_49428; ldv_49427: *(page_addr_list + i) = sg_page_iter_dma_address(& sg_iter); i = i + 1UL; ldv_49428: tmp___2 = __sg_page_iter_next(& sg_iter); if ((int )tmp___2) { goto ldv_49427; } else { } rotate_pages(page_addr_list, width_pages, height_pages, st); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_rotate_fb_obj_pages", "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n", size, rot_info->pitch, rot_info->height, rot_info->pixel_format, width_pages, height_pages, rot_pages); } else { } drm_free_large((void *)page_addr_list); return (st); err_sg_alloc: kfree((void const *)st); err_st_alloc: drm_free_large((void *)page_addr_list); tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_rotate_fb_obj_pages", "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n", size, ret, rot_info->pitch, rot_info->height, rot_info->pixel_format, width_pages, height_pages, rot_pages); } else { } tmp___5 = ERR_PTR((long )ret); return ((struct sg_table *)tmp___5); } } static struct sg_table *intel_partial_pages(struct i915_ggtt_view const *view , struct drm_i915_gem_object *obj ) { struct sg_table *st ; struct scatterlist *sg ; struct sg_page_iter obj_sg_iter ; int ret ; void *tmp ; bool tmp___0 ; void *tmp___1 ; { ret = -12; tmp = kmalloc(16UL, 208U); st = (struct sg_table *)tmp; if ((unsigned long )st == (unsigned long )((struct sg_table *)0)) { goto err_st_alloc; } else { } ret = sg_alloc_table(st, view->params.partial.size, 208U); if (ret != 0) { goto err_sg_alloc; } else { } sg = st->sgl; st->nents = 0U; __sg_page_iter_start(& obj_sg_iter, (obj->pages)->sgl, (obj->pages)->nents, view->params.partial.offset); goto ldv_49443; ldv_49442: ; if (st->nents >= (unsigned int )view->params.partial.size) { goto ldv_49441; } else { } sg_set_page(sg, (struct page *)0, 4096U, 0U); sg->dma_address = sg_page_iter_dma_address(& obj_sg_iter); sg->dma_length = 4096U; sg = sg_next(sg); st->nents = st->nents + 1U; ldv_49443: tmp___0 = __sg_page_iter_next(& obj_sg_iter); if ((int )tmp___0) { goto ldv_49442; } else { } ldv_49441: ; return (st); err_sg_alloc: kfree((void const *)st); err_st_alloc: tmp___1 = ERR_PTR((long )ret); return ((struct sg_table *)tmp___1); } } static int i915_get_ggtt_vma_pages(struct i915_vma *vma ) { int ret ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; bool tmp___3 ; { ret = 0; if ((unsigned long )vma->ggtt_view.pages != (unsigned long )((struct sg_table *)0)) { return (0); } else { } if ((unsigned int )vma->ggtt_view.type == 0U) { vma->ggtt_view.pages = (vma->obj)->pages; } else if ((unsigned int )vma->ggtt_view.type == 1U) { vma->ggtt_view.pages = intel_rotate_fb_obj_pages(& vma->ggtt_view, vma->obj); } else if ((unsigned int )vma->ggtt_view.type == 2U) { vma->ggtt_view.pages = intel_partial_pages((struct i915_ggtt_view const *)(& vma->ggtt_view), vma->obj); } else { __ret_warn_once = 1; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 2822, "GGTT view %u not implemented!\n", (unsigned int )vma->ggtt_view.type); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); } if ((unsigned long )vma->ggtt_view.pages == (unsigned long )((struct sg_table *)0)) { drm_err("Failed to get pages for GGTT view type %u!\n", (unsigned int )vma->ggtt_view.type); ret = -22; } else { tmp___3 = IS_ERR((void const *)vma->ggtt_view.pages); if ((int )tmp___3) { tmp___2 = PTR_ERR((void const *)vma->ggtt_view.pages); ret = (int )tmp___2; vma->ggtt_view.pages = (struct sg_table *)0; drm_err("Failed to get pages for VMA view type %u (%d)!\n", (unsigned int )vma->ggtt_view.type, ret); } else { } } return (ret); } } int i915_vma_bind(struct i915_vma *vma , enum i915_cache_level cache_level , u32 flags ) { int ret ; u32 bind_flags ; int __ret_warn_on ; long tmp ; long tmp___0 ; bool tmp___1 ; { __ret_warn_on = flags == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 2854, "WARN_ON(flags == 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-22); } else { } bind_flags = 0U; if ((flags & 4U) != 0U) { bind_flags = bind_flags | 1U; } else { } if ((flags & 16U) != 0U) { bind_flags = bind_flags | 2U; } else { } if ((flags & 32U) != 0U) { bind_flags = (u32 )vma->bound | bind_flags; } else { bind_flags = (u32 )(~ ((int )vma->bound)) & bind_flags; } if (bind_flags == 0U) { return (0); } else { } if ((unsigned int )*((unsigned char *)vma + 88UL) == 0U && (unsigned long )(vma->vm)->allocate_va_range != (unsigned long )((int (*)(struct i915_address_space * , uint64_t , uint64_t ))0)) { tmp___1 = i915_is_ggtt(vma->vm); trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size, (int )tmp___1 ? "G" : "P"); ret = (*((vma->vm)->allocate_va_range))(vma->vm, vma->node.start, vma->node.size); if (ret != 0) { return (ret); } else { } } else { } ret = (*((vma->vm)->bind_vma))(vma, cache_level, bind_flags); if (ret != 0) { return (ret); } else { } vma->bound = (unsigned char )((int )vma->bound | (int )((unsigned char )bind_flags)); return (0); } } size_t i915_ggtt_view_size(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { if ((unsigned int )view->type == 0U || (unsigned int )view->type == 1U) { return (obj->base.size); } else if ((unsigned int )view->type == 2U) { return ((size_t )(view->params.partial.size << 12)); } else { __ret_warn_once = 1; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_gtt.c", 2910, "GGTT view %u not implemented!\n", (unsigned int )view->type); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return (obj->base.size); } } } bool ldv_queue_work_on_277(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_278(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_279(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_280(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_281(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern pgprot_t pgprot_writecombine(pgprot_t ) ; __inline static int variable_test_bit(long nr , unsigned long const volatile *addr ) { int oldbit ; { __asm__ volatile ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)), "Ir" (nr)); return (oldbit); } } __inline static unsigned long arch_local_save_flags___7(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static int __atomic_add_unless___0(atomic_t *v , int a , int u ) { int c ; int old ; long tmp ; long tmp___0 ; { c = atomic_read((atomic_t const *)v); ldv_5708: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5707; } else { } old = atomic_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5707; } else { } c = old; goto ldv_5708; ldv_5707: ; return (c); } } __inline static long atomic64_read(atomic64_t const *v ) { long __var ; { __var = 0L; return ((long )*((long const volatile *)(& v->counter))); } } __inline static int atomic_add_unless___0(atomic_t *v , int a , int u ) { int tmp ; { tmp = __atomic_add_unless___0(v, a, u); return (tmp != u); } } __inline static long atomic_long_read(atomic_long_t *l ) { atomic64_t *v ; long tmp ; { v = l; tmp = atomic64_read((atomic64_t const *)v); return (tmp); } } __inline static int test_ti_thread_flag(struct thread_info *ti , int flag ) { int tmp ; { tmp = variable_test_bit((long )flag, (unsigned long const volatile *)(& ti->flags)); return (tmp); } } __inline static bool static_key_false___5(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } extern void prepare_to_wait(wait_queue_head_t * , wait_queue_t * , int ) ; extern int autoremove_wake_function(wait_queue_t * , unsigned int , int , void * ) ; extern void down_write(struct rw_semaphore * ) ; extern void up_write(struct rw_semaphore * ) ; extern unsigned int jiffies_to_usecs(unsigned long const ) ; extern u64 nsecs_to_jiffies64(u64 ) ; extern unsigned long get_seconds(void) ; __inline static int rcu_read_lock_sched_held___5(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___7(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } extern void destroy_timer_on_stack(struct timer_list * ) ; extern int mod_timer(struct timer_list * , unsigned long ) ; int ldv_mod_timer_296(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; extern int del_timer_sync(struct timer_list * ) ; int ldv_del_timer_sync_297(struct timer_list *ldv_func_arg1 ) ; bool ldv_queue_work_on_291(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_293(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_292(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_295(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; extern bool mod_delayed_work_on(int , struct workqueue_struct * , struct delayed_work * , unsigned long ) ; void ldv_flush_workqueue_294(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_300(struct delayed_work *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_sync_298(struct delayed_work *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_sync_299(struct delayed_work *ldv_func_arg1 ) ; __inline static bool queue_delayed_work___0(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = ldv_queue_delayed_work_on_292(8192, wq, dwork, delay); return (tmp); } } __inline static bool mod_delayed_work(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = mod_delayed_work_on(8192, wq, dwork, delay); return (tmp); } } __inline static void kref_get___5(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___5(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___5(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___5(kref, 1U, release); return (tmp); } } __inline static int kref_put_mutex___0(struct kref *kref , void (*release)(struct kref * ) , struct mutex *lock ) { int __ret_warn_on ; long tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; long tmp___3 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 138); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = atomic_add_unless___0(& kref->refcount, -1, 1); tmp___3 = ldv__builtin_expect(tmp___2 == 0, 0L); if (tmp___3 != 0L) { mutex_lock_nested(lock, 0U); tmp___0 = atomic_dec_and_test(& kref->refcount); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { mutex_unlock(lock); return (0); } else { } (*release)(kref); return (1); } else { } return (0); } } __inline static struct pid *get_pid(struct pid *pid ) { { if ((unsigned long )pid != (unsigned long )((struct pid *)0)) { atomic_inc(& pid->count); } else { } return (pid); } } extern void put_pid(struct pid * ) ; extern long schedule_timeout(long ) ; extern long io_schedule_timeout(long ) ; __inline static void io_schedule(void) { { io_schedule_timeout(9223372036854775807L); return; } } __inline static struct pid *task_pid(struct task_struct *task ) { { return (task->pids[0].pid); } } extern int wake_up_process(struct task_struct * ) ; __inline static int test_tsk_thread_flag(struct task_struct *tsk , int flag ) { int tmp ; { tmp = test_ti_thread_flag((struct thread_info *)tsk->stack, flag); return (tmp); } } __inline static int signal_pending(struct task_struct *p ) { int tmp ; long tmp___0 ; { tmp = test_tsk_thread_flag(p, 2); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); return ((int )tmp___0); } } __inline static bool need_resched(void) { struct thread_info *tmp ; int tmp___0 ; long tmp___1 ; { tmp = current_thread_info(); tmp___0 = test_ti_thread_flag(tmp, 3); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); return (tmp___1 != 0L); } } extern struct kmem_cache *kmem_cache_create(char const * , size_t , size_t , unsigned long , void (*)(void * ) ) ; extern void kmem_cache_free(struct kmem_cache * , void * ) ; void call_and_disable_work_3(struct work_struct *work ) ; void disable_work_3(struct work_struct *work ) ; void invoke_work_4(void) ; void call_and_disable_work_4(struct work_struct *work ) ; void activate_work_3(struct work_struct *work , int state ) ; void activate_pending_timer_22(struct timer_list *timer , unsigned long data , int pending_flag ) ; void activate_work_4(struct work_struct *work , int state ) ; void activate_suitable_timer_22(struct timer_list *timer , unsigned long data ) ; void activate_pending_timer_21(struct timer_list *timer , unsigned long data , int pending_flag ) ; void invoke_work_3(void) ; void call_and_disable_all_4(int state ) ; int reg_timer_21(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) ; void disable_suitable_timer_21(struct timer_list *timer ) ; void choose_timer_22(void) ; void disable_work_4(struct work_struct *work ) ; void disable_suitable_timer_22(struct timer_list *timer ) ; void call_and_disable_all_3(int state ) ; void ldv_timer_22(int state , struct timer_list *timer ) ; int reg_timer_22(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) ; extern void put_page(struct page * ) ; extern void unmap_mapping_range(struct address_space * , loff_t const , loff_t const , int ) ; extern int set_page_dirty(struct page * ) ; extern unsigned long vm_mmap(struct file * , unsigned long , unsigned long , unsigned long , unsigned long , unsigned long ) ; extern struct vm_area_struct *find_vma(struct mm_struct * , unsigned long ) ; extern pgprot_t vm_get_page_prot(unsigned long ) ; extern int vm_insert_pfn(struct vm_area_struct * , unsigned long , unsigned long ) ; __inline static void sg_assign_page___1(struct scatterlist *sg , struct page *page ) { unsigned long page_link ; long tmp ; long tmp___0 ; long tmp___1 ; { page_link = sg->page_link & 3UL; tmp = ldv__builtin_expect(((unsigned long )page & 3UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (90), "i" (12UL)); ldv_25350: ; goto ldv_25350; } else { } tmp___0 = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (92), "i" (12UL)); ldv_25351: ; goto ldv_25351; } else { } tmp___1 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (93), "i" (12UL)); ldv_25352: ; goto ldv_25352; } else { } sg->page_link = page_link | (unsigned long )page; return; } } __inline static void sg_set_page___1(struct scatterlist *sg , struct page *page , unsigned int len , unsigned int offset ) { { sg_assign_page___1(sg, page); sg->offset = offset; sg->length = len; return; } } __inline static struct page *sg_page___5(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_25362: ; goto ldv_25362; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_25363: ; goto ldv_25363; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } __inline static void sg_mark_end(struct scatterlist *sg ) { long tmp ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (193), "i" (12UL)); ldv_25378: ; goto ldv_25378; } else { } sg->page_link = sg->page_link | 2UL; sg->page_link = sg->page_link & 0xfffffffffffffffeUL; return; } } __inline static struct page *sg_page_iter_page___1(struct sg_page_iter *piter ) { struct page *tmp ; { tmp = sg_page___5(piter->sg); return ((struct page *)-24189255811072L + ((unsigned long )(((long )tmp + 24189255811072L) / 64L) + (unsigned long )piter->sg_pgoffset)); } } extern unsigned long swiotlb_nr_tbl(void) ; __inline static struct inode *file_inode(struct file const *f ) { { return ((struct inode *)f->f_inode); } } extern unsigned long invalidate_mapping_pages(struct address_space * , unsigned long , unsigned long ) ; __inline static int __copy_from_user(void *dst , void const *src , unsigned int size ) { int tmp ; { __might_fault("./arch/x86/include/asm/uaccess_64.h", 97); tmp = __copy_from_user_nocheck(dst, src, size); return (tmp); } } extern long __copy_user_nocache(void * , void const * , unsigned int , int ) ; __inline static int __copy_from_user_inatomic_nocache(void *dst , void const *src , unsigned int size ) { long tmp ; { tmp = __copy_user_nocache(dst, src, size, 0); return ((int )tmp); } } __inline static gfp_t mapping_gfp_mask(struct address_space *mapping ) { { return ((gfp_t )mapping->flags & 33554431U); } } __inline static void mapping_set_gfp_mask(struct address_space *m , gfp_t mask ) { { m->flags = (m->flags & 0xfffffffffe000000UL) | (unsigned long )mask; return; } } __inline static int fault_in_multipages_writeable(char *uaddr , int size ) { int ret ; char *end ; long tmp ; int __pu_err ; int __pu_err___0 ; { ret = 0; end = uaddr + ((unsigned long )size + 0xffffffffffffffffUL); tmp = ldv__builtin_expect(size == 0, 0L); if (tmp != 0L) { return (ret); } else { } goto ldv_34226; ldv_34225: __pu_err = 0; switch (1UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "iq" (0), "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__pu_err)); goto ldv_34219; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" (0), "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__pu_err)); goto ldv_34219; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "ir" (0), "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__pu_err)); goto ldv_34219; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err): "er" (0), "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__pu_err)); goto ldv_34219; default: __put_user_bad(); } ldv_34219: ret = __pu_err; if (ret != 0) { return (ret); } else { } uaddr = uaddr + 4096UL; ldv_34226: ; if ((unsigned long )uaddr <= (unsigned long )end) { goto ldv_34225; } else { } if ((((unsigned long )uaddr ^ (unsigned long )end) & 0xfffffffffffff000UL) == 0UL) { __pu_err___0 = 0; switch (1UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %b1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "iq" (0), "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__pu_err___0)); goto ldv_34230; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %w1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" (0), "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__pu_err___0)); goto ldv_34230; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %k1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "ir" (0), "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__pu_err___0)); goto ldv_34230; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %1,%2\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__pu_err___0): "er" (0), "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__pu_err___0)); goto ldv_34230; default: __put_user_bad(); } ldv_34230: ret = __pu_err___0; } else { } return (ret); } } __inline static int fault_in_multipages_readable___0(char const *uaddr , int size ) { char volatile c ; int ret ; char const *end ; long tmp ; int __gu_err ; unsigned long __gu_val ; int tmp___0 ; int __gu_err___0 ; unsigned long __gu_val___0 ; int tmp___1 ; { ret = 0; end = uaddr + ((unsigned long )size + 0xffffffffffffffffUL); tmp = ldv__builtin_expect(size == 0, 0L); if (tmp != 0L) { return (ret); } else { } goto ldv_34253; ldv_34252: __gu_err = 0; switch (1UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %2,%b1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorb %b1,%b1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err), "=q" (__gu_val): "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__gu_err)); goto ldv_34246; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %2,%w1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorw %w1,%w1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err), "=r" (__gu_val): "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__gu_err)); goto ldv_34246; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %2,%k1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorl %k1,%k1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err), "=r" (__gu_val): "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__gu_err)); goto ldv_34246; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %2,%1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err), "=r" (__gu_val): "m" (*((struct __large_struct *)uaddr)), "i" (-14), "0" (__gu_err)); goto ldv_34246; default: tmp___0 = __get_user_bad(); __gu_val = (unsigned long )tmp___0; } ldv_34246: c = (char )__gu_val; ret = __gu_err; if (ret != 0) { return (ret); } else { } uaddr = uaddr + 4096UL; ldv_34253: ; if ((unsigned long )uaddr <= (unsigned long )end) { goto ldv_34252; } else { } if ((((unsigned long )uaddr ^ (unsigned long )end) & 0xfffffffffffff000UL) == 0UL) { __gu_err___0 = 0; switch (1UL) { case 1UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovb %2,%b1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorb %b1,%b1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err___0), "=q" (__gu_val___0): "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__gu_err___0)); goto ldv_34258; case 2UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovw %2,%w1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorw %w1,%w1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err___0), "=r" (__gu_val___0): "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__gu_err___0)); goto ldv_34258; case 4UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovl %2,%k1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorl %k1,%k1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err___0), "=r" (__gu_val___0): "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__gu_err___0)); goto ldv_34258; case 8UL: __asm__ volatile ("661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xcb\n6651:\n\t.popsection\n1:\tmovq %2,%1\n2: 661:\n\t\n662:\n.skip -(((6651f-6641f)-(662b-661b)) > 0) * ((6651f-6641f)-(662b-661b)),0x90\n663:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6641f - .\n .word ( 9*32+20)\n .byte 663b-661b\n .byte 6651f-6641f\n .byte 663b-662b\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6641:\n\t.byte 0x0f,0x01,0xca\n6651:\n\t.popsection\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .pushsection \"__ex_table\",\"a\"\n .balign 8\n .long (1b) - .\n .long (3b) - .\n .popsection\n": "=r" (__gu_err___0), "=r" (__gu_val___0): "m" (*((struct __large_struct *)end)), "i" (-14), "0" (__gu_err___0)); goto ldv_34258; default: tmp___1 = __get_user_bad(); __gu_val___0 = (unsigned long )tmp___1; } ldv_34258: c = (char )__gu_val___0; ret = __gu_err___0; } else { } return (ret); } } __inline static unsigned long drm_vma_node_size(struct drm_vma_offset_node *node ) { { return ((unsigned long )node->vm_node.size); } } __inline static bool drm_vma_node_has_offset(struct drm_vma_offset_node *node ) { bool tmp ; { tmp = drm_mm_node_allocated(& node->vm_node); return (tmp); } } __inline static __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node ) { { return (node->vm_node.start << 12); } } __inline static void drm_vma_node_unmap(struct drm_vma_offset_node *node , struct address_space *file_mapping ) { unsigned long tmp ; __u64 tmp___0 ; bool tmp___1 ; { tmp___1 = drm_vma_node_has_offset(node); if ((int )tmp___1) { tmp = drm_vma_node_size(node); tmp___0 = drm_vma_node_offset_addr(node); unmap_mapping_range(file_mapping, (loff_t const )tmp___0, (loff_t const )(tmp << 12), 1); } else { } return; } } extern void drm_clflush_sg(struct sg_table * ) ; extern void drm_prime_gem_destroy(struct drm_gem_object * , struct sg_table * ) ; extern struct drm_dma_handle *drm_pci_alloc(struct drm_device * , size_t , size_t ) ; extern void drm_pci_free(struct drm_device * , struct drm_dma_handle * ) ; __inline static bool drm_can_sleep___2(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39651; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39651; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39651; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39651; default: __bad_percpu_size(); } ldv_39651: pscr_ret__ = pfo_ret__; goto ldv_39657; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39661; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39661; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39661; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39661; default: __bad_percpu_size(); } ldv_39661: pscr_ret__ = pfo_ret_____0; goto ldv_39657; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39670; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39670; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39670; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39670; default: __bad_percpu_size(); } ldv_39670: pscr_ret__ = pfo_ret_____1; goto ldv_39657; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39679; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39679; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39679; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39679; default: __bad_percpu_size(); } ldv_39679: pscr_ret__ = pfo_ret_____2; goto ldv_39657; default: __bad_size_call_parameter(); goto ldv_39657; } ldv_39657: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___7(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } __inline static u32 intel_ring_sync_index(struct intel_engine_cs *ring , struct intel_engine_cs *other ) { int idx ; { idx = (int )((unsigned int )(((long )other - (long )ring) / 4792L) + 4294967295U); if (idx < 0) { idx = idx + 5; } else { } return ((u32 )idx); } } void intel_stop_ring_buffer(struct intel_engine_cs *ring ) ; void intel_cleanup_ring_buffer(struct intel_engine_cs *ring ) ; int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request ) ; int intel_ring_idle(struct intel_engine_cs *ring ) ; void intel_ring_init_seqno(struct intel_engine_cs *ring , u32 seqno ) ; int intel_ring_flush_all_caches(struct intel_engine_cs *ring ) ; int intel_init_render_ring_buffer(struct drm_device *dev ) ; int intel_init_bsd_ring_buffer(struct drm_device *dev ) ; int intel_init_bsd2_ring_buffer(struct drm_device *dev ) ; int intel_init_blt_ring_buffer(struct drm_device *dev ) ; int intel_init_vebox_ring_buffer(struct drm_device *dev ) ; __inline static u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf ) { { return (ringbuf->tail); } } __inline static struct drm_i915_gem_request *intel_ring_get_request___0(struct intel_engine_cs *ring ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )ring->outstanding_lazy_request == (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/intel_ringbuffer.h"), "i" (450), "i" (12UL)); ldv_40740: ; goto ldv_40740; } else { } return (ring->outstanding_lazy_request); } } int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request , struct intel_context *ctx ) ; void intel_logical_ring_stop(struct intel_engine_cs *ring ) ; void intel_logical_ring_cleanup(struct intel_engine_cs *ring ) ; int intel_logical_rings_init(struct drm_device *dev ) ; int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf , struct intel_context *ctx ) ; void intel_lr_context_unpin(struct intel_engine_cs *ring , struct intel_context *ctx ) ; int intel_sanitize_enable_execlists(struct drm_device *dev , int enable_execlists ) ; int intel_execlists_submission(struct drm_device *dev , struct drm_file *file , struct intel_engine_cs *ring , struct intel_context *ctx , struct drm_i915_gem_execbuffer2 *args , struct list_head *vmas , struct drm_i915_gem_object *batch_obj , u64 exec_start , u32 dispatch_flags ) ; void intel_execlists_retire_requests(struct intel_engine_cs *ring ) ; __inline static void i915_ppgtt_put___0(struct i915_hw_ppgtt *ppgtt ) { { if ((unsigned long )ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0)) { kref_put___5(& ppgtt->ref, & i915_ppgtt_release); } else { } return; } } __inline static bool i915_ggtt_view_equal(struct i915_ggtt_view const *a , struct i915_ggtt_view const *b ) { int __ret_warn_on ; long tmp ; long tmp___0 ; int tmp___1 ; { __ret_warn_on = (unsigned long )a == (unsigned long )((struct i915_ggtt_view const *)0) || (unsigned long )b == (unsigned long )((struct i915_ggtt_view const *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_gem_gtt.h", 503); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (0); } else { } if ((unsigned int )a->type != (unsigned int )b->type) { return (0); } else { } if ((unsigned int )a->type == 2U) { tmp___1 = memcmp((void const *)(& a->params), (void const *)(& b->params), 16UL); return (tmp___1 == 0); } else { } return (1); } } extern bool intel_enable_gtt(void) ; extern void drm_gem_object_release(struct drm_gem_object * ) ; extern int drm_gem_object_init(struct drm_device * , struct drm_gem_object * , size_t ) ; __inline static void drm_gem_object_reference___2(struct drm_gem_object *obj ) { { kref_get___5(& obj->refcount); return; } } __inline static void drm_gem_object_unreference___5(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___5(& obj->refcount, & drm_gem_object_free); } else { } return; } } __inline static void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj ) { struct drm_device *dev ; int tmp ; { if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) { return; } else { } dev = obj->dev; tmp = kref_put_mutex___0(& obj->refcount, & drm_gem_object_free, & dev->struct_mutex); if (tmp != 0) { mutex_unlock(& dev->struct_mutex); } else { lock_acquire(& dev->struct_mutex.dep_map, 0U, 0, 0, 1, (struct lockdep_map *)0, 0UL); lock_release(& dev->struct_mutex.dep_map, 0, 0UL); } return; } } extern int drm_gem_handle_create(struct drm_file * , struct drm_gem_object * , u32 * ) ; extern void drm_gem_free_mmap_offset(struct drm_gem_object * ) ; extern int drm_gem_create_mmap_offset(struct drm_gem_object * ) ; extern struct drm_gem_object *drm_gem_object_lookup(struct drm_device * , struct drm_file * , u32 ) ; void i915_gem_track_fb(struct drm_i915_gem_object *old , struct drm_i915_gem_object *new , unsigned int frontbuffer_bits ) ; int i915_gem_request_alloc(struct intel_engine_cs *ring , struct intel_context *ctx ) ; __inline static struct drm_i915_gem_request *i915_gem_request_reference___1(struct drm_i915_gem_request *req ) { { if ((unsigned long )req != (unsigned long )((struct drm_i915_gem_request *)0)) { kref_get___5(& req->ref); } else { } return (req); } } __inline static void i915_gem_request_unreference___0(struct drm_i915_gem_request *req ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = mutex_is_locked(& ((req->ring)->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2216, "WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); kref_put___5(& req->ref, & i915_gem_request_free); return; } } __inline static void i915_gem_request_unreference__unlocked___0(struct drm_i915_gem_request *req ) { struct drm_device *dev ; int tmp ; { if ((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0)) { return; } else { } dev = (req->ring)->dev; tmp = kref_put_mutex___0(& req->ref, & i915_gem_request_free, & dev->struct_mutex); if (tmp != 0) { mutex_unlock(& dev->struct_mutex); } else { } return; } } __inline static void i915_gem_request_assign___0(struct drm_i915_gem_request **pdst , struct drm_i915_gem_request *src ) { { if ((unsigned long )src != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_reference___1(src); } else { } if ((unsigned long )*pdst != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_unreference___0(*pdst); } else { } *pdst = src; return; } } void intel_fb_obj_flush(struct drm_i915_gem_object *obj , bool retire ) ; void intel_mark_busy(struct drm_device *dev ) ; void intel_mark_idle(struct drm_device *dev ) ; struct tracepoint __tracepoint_i915_gem_object_create ; __inline static void trace_i915_gem_object_create(struct drm_i915_gem_object *obj ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_308 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_310 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_object_create.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_create.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 108, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45213: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_object * ))it_func))(__data, obj); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45213; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_create.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 108, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_vma_bind ; __inline static void trace_i915_vma_bind(struct i915_vma *vma , unsigned int flags ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_312 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_314 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_vma_bind.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_vma_bind.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 134, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45266: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct i915_vma * , unsigned int ))it_func))(__data, vma, flags); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45266; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_vma_bind.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 134, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_vma_unbind ; __inline static void trace_i915_vma_unbind(struct i915_vma *vma ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_316 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_318 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_vma_unbind.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_vma_unbind.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 156, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45320: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct i915_vma * ))it_func))(__data, vma); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45320; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_vma_unbind.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 156, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } __inline static void trace_i915_gem_object_change_domain___0(struct drm_i915_gem_object *obj , u32 old_read , u32 old_write ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_332___0 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_334___0 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_object_change_domain.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_change_domain.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 279, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45583: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_object * , u32 , u32 ))it_func))(__data, obj, old_read, old_write); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45583; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_change_domain.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 279, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_object_pwrite ; __inline static void trace_i915_gem_object_pwrite(struct drm_i915_gem_object *obj , u32 offset , u32 len ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_336 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_338 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_object_pwrite.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_pwrite.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 299, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45644: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_object * , u32 , u32 ))it_func))(__data, obj, offset, len); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45644; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_pwrite.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 299, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_object_pread ; __inline static void trace_i915_gem_object_pread(struct drm_i915_gem_object *obj , u32 offset , u32 len ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_340 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_342 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_object_pread.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_pread.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 319, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45705: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_object * , u32 , u32 ))it_func))(__data, obj, offset, len); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45705; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_pread.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 319, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_object_fault ; __inline static void trace_i915_gem_object_fault(struct drm_i915_gem_object *obj , u32 index , bool gtt , bool write ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_344 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_346 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_object_fault.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_fault.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 344, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45768: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_object * , u32 , bool , bool ))it_func))(__data, obj, index, (int )gtt, (int )write); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45768; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_fault.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 344, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_object_clflush ; __inline static void trace_i915_gem_object_clflush(struct drm_i915_gem_object *obj ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_348 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_350 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_object_clflush.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_clflush.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 364, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45828: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_object * ))it_func))(__data, obj); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45828; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_clflush.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 364, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_object_destroy ; __inline static void trace_i915_gem_object_destroy(struct drm_i915_gem_object *obj ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_352 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_354 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_object_destroy.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_destroy.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 369, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45879: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_object * ))it_func))(__data, obj); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45879; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_object_destroy.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 369, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_ring_sync_to ; __inline static void trace_i915_gem_ring_sync_to(struct intel_engine_cs *from , struct intel_engine_cs *to , struct drm_i915_gem_request *req ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_368 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_370 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_ring_sync_to.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_ring_sync_to.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 450, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46102: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct intel_engine_cs * , struct intel_engine_cs * , struct drm_i915_gem_request * ))it_func))(__data, from, to, req); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46102; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_ring_sync_to.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 450, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_request_add ; __inline static void trace_i915_gem_request_add(struct drm_i915_gem_request *req ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_380 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_382 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_request_add.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_add.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 525, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46276: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_request * ))it_func))(__data, req); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46276; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_add.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 525, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_request_retire ; __inline static void trace_i915_gem_request_retire(struct drm_i915_gem_request *req ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_388 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_390 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_request_retire.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_retire.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 550, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46378: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_request * ))it_func))(__data, req); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46378; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_retire.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 550, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_request_wait_begin ; __inline static void trace_i915_gem_request_wait_begin(struct drm_i915_gem_request *req ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_396 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_398 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_request_wait_begin.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_wait_begin.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 587, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46480: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_request * ))it_func))(__data, req); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46480; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_wait_begin.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 587, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_i915_gem_request_wait_end ; __inline static void trace_i915_gem_request_wait_end(struct drm_i915_gem_request *req ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_400 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_402 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___5(& __tracepoint_i915_gem_request_wait_end.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_wait_end.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___5(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 592, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46531: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_request * ))it_func))(__data, req); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46531; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_wait_end.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___5(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 592, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } void i915_queue_hangcheck(struct drm_device *dev ) ; int i915_gem_create_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_pread_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_pwrite_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_mmap_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_mmap_gtt_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_set_domain_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_sw_finish_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_busy_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_get_caching_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_set_caching_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_throttle_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ; int i915_gem_madvise_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ; int i915_gem_init_userptr(struct drm_device *dev ) ; int i915_gem_get_aperture_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_wait_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; void i915_gem_load(struct drm_device *dev ) ; void i915_gem_object_free(struct drm_i915_gem_object *obj ) ; void i915_gem_vma_destroy(struct i915_vma *vma ) ; int i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view , uint32_t alignment , uint64_t flags ) ; int i915_gem_object_put_pages(struct drm_i915_gem_object *obj ) ; void i915_gem_release_mmap(struct drm_i915_gem_object *obj ) ; __inline static void i915_gem_object_pin_pages___1(struct drm_i915_gem_object *obj ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )obj->pages == (unsigned long )((struct sg_table *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h"), "i" (2773), "i" (12UL)); ldv_47363: ; goto ldv_47363; } else { } obj->pages_pin_count = obj->pages_pin_count + 1; return; } } __inline static void i915_gem_object_unpin_pages___2(struct drm_i915_gem_object *obj ) { long tmp ; { tmp = ldv__builtin_expect(obj->pages_pin_count == 0, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h"), "i" (2778), "i" (12UL)); ldv_47367: ; goto ldv_47367; } else { } obj->pages_pin_count = obj->pages_pin_count - 1; return; } } __inline static bool i915_gem_request_completed___1(struct drm_i915_gem_request *req , bool lazy_coherency ) { u32 seqno ; long tmp ; bool tmp___0 ; { tmp = ldv__builtin_expect((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h"), "i" (2806), "i" (12UL)); ldv_47394: ; goto ldv_47394; } else { } seqno = (*((req->ring)->get_seqno))(req->ring, (int )lazy_coherency); tmp___0 = i915_seqno_passed(seqno, req->seqno); return (tmp___0); } } int i915_gem_get_seqno(struct drm_device *dev , u32 *seqno ) ; struct drm_i915_gem_request *i915_gem_find_active_request(struct intel_engine_cs *ring ) ; int i915_gem_check_wedge(struct i915_gpu_error *error , bool interruptible ) ; int i915_gem_check_olr(struct drm_i915_gem_request *req ) ; __inline static bool i915_terminally_wedged(struct i915_gpu_error *error ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& error->reset_counter)); return (((long )tmp & (-0x7FFFFFFF-1)) != 0L); } } __inline static bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv ) { { return ((bool )(dev_priv->gpu_error.stop_rings == 0U || (int )dev_priv->gpu_error.stop_rings < 0)); } } int i915_gem_init(struct drm_device *dev ) ; int i915_gem_init_rings(struct drm_device *dev ) ; void i915_gem_cleanup_ringbuffer(struct drm_device *dev ) ; int __i915_wait_request(struct drm_i915_gem_request *req , unsigned int reset_counter , bool interruptible , s64 *timeout , struct intel_rps_client *rps ) ; int i915_wait_request(struct drm_i915_gem_request *req ) ; int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj , bool readonly ) ; int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj , u32 alignment , struct intel_engine_cs *pipelined , struct i915_ggtt_view const *view ) ; void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) ; int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj , int align ) ; int i915_gem_open(struct drm_device *dev , struct drm_file *file ) ; void i915_gem_release(struct drm_device *dev , struct drm_file *file ) ; uint32_t i915_gem_get_gtt_size(struct drm_device *dev , uint32_t size , int tiling_mode ) ; uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev , uint32_t size , int tiling_mode , bool fenced ) ; bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o ) ; bool i915_gem_obj_bound(struct drm_i915_gem_object *o , struct i915_address_space *vm ) ; __inline static void i915_gem_context_reference___1(struct intel_context *ctx ) { { kref_get___5(& ctx->ref); return; } } __inline static void i915_gem_context_unreference___1(struct intel_context *ctx ) { { kref_put___5(& ctx->ref, & i915_gem_context_free); return; } } unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv ) ; void i915_gem_shrinker_init(struct drm_i915_private *dev_priv ) ; __inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; return ((bool )(dev_priv->mm.bit_6_swizzle_x == 7U && (unsigned int )*((unsigned char *)obj + 409UL) != 0U)); } } void i915_gem_detect_bit_6_swizzle(struct drm_device *dev ) ; void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj ) ; void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj ) ; __inline static unsigned long nsecs_to_jiffies_timeout(u64 const n ) { u64 __min1 ; u64 __min2 ; u64 tmp ; { __min1 = 4611686018427387902ULL; tmp = nsecs_to_jiffies64(n); __min2 = tmp + 1ULL; return ((unsigned long )(__min1 < __min2 ? __min1 : __min2)); } } extern void mark_page_accessed(struct page * ) ; extern struct page *shmem_read_mapping_page_gfp(struct address_space * , unsigned long , gfp_t ) ; extern void shmem_truncate_range(struct inode * , loff_t , loff_t ) ; __inline static struct page *shmem_read_mapping_page(struct address_space *mapping , unsigned long index ) { gfp_t tmp ; struct page *tmp___0 ; { tmp = mapping_gfp_mask(mapping); tmp___0 = shmem_read_mapping_page_gfp(mapping, index, tmp); return (tmp___0); } } static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj ) ; static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj ) ; static void i915_gem_object_retire__write(struct drm_i915_gem_object *obj ) ; static void i915_gem_object_retire__read(struct drm_i915_gem_object *obj , int ring ) ; static void i915_gem_write_fence(struct drm_device *dev , int reg , struct drm_i915_gem_object *obj ) ; static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj , struct drm_i915_fence_reg *fence , bool enable ) ; static bool cpu_cache_is_coherent(struct drm_device *dev , enum i915_cache_level level ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); return ((bool )((unsigned int )*((unsigned char *)__p + 46UL) != 0U || (unsigned int )level != 0U)); } } static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj ) { bool tmp ; int tmp___0 ; { tmp = cpu_cache_is_coherent(obj->base.dev, (enum i915_cache_level )obj->cache_level); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (1); } else { } return (obj->pin_display != 0U); } } __inline static void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj ) { { if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { i915_gem_release_mmap(obj); } else { } obj->fence_dirty = 0U; obj->fence_reg = -1; return; } } static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv , size_t size ) { { spin_lock(& dev_priv->mm.object_stat_lock); dev_priv->mm.object_count = dev_priv->mm.object_count + 1U; dev_priv->mm.object_memory = dev_priv->mm.object_memory + size; spin_unlock(& dev_priv->mm.object_stat_lock); return; } } static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv , size_t size ) { { spin_lock(& dev_priv->mm.object_stat_lock); dev_priv->mm.object_count = dev_priv->mm.object_count - 1U; dev_priv->mm.object_memory = dev_priv->mm.object_memory - size; spin_unlock(& dev_priv->mm.object_stat_lock); return; } } static int i915_gem_wait_for_error(struct i915_gpu_error *error ) { int ret ; bool tmp ; int tmp___0 ; bool tmp___1 ; long __ret ; wait_queue_t __wait ; long __ret___0 ; long __int ; long tmp___2 ; bool __cond ; bool tmp___3 ; int tmp___4 ; bool tmp___5 ; int tmp___6 ; bool __cond___0 ; bool tmp___7 ; int tmp___8 ; bool tmp___9 ; int tmp___10 ; { tmp = i915_reset_in_progress(error); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { tmp___1 = i915_terminally_wedged(error); if ((int )tmp___1) { return (0); } else { } } __ret = 2500L; __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 117, 0); tmp___7 = i915_reset_in_progress(error); if (tmp___7) { tmp___8 = 0; } else { tmp___8 = 1; } if (tmp___8) { tmp___10 = 1; } else { tmp___9 = i915_terminally_wedged(error); if ((int )tmp___9) { tmp___10 = 1; } else { tmp___10 = 0; } } __cond___0 = (bool )tmp___10; if ((int )__cond___0 && __ret == 0L) { __ret = 1L; } else { } if (((int )__cond___0 || __ret == 0L) == 0) { __ret___0 = 2500L; INIT_LIST_HEAD(& __wait.task_list); __wait.flags = 0U; ldv_49518: tmp___2 = prepare_to_wait_event(& error->reset_queue, & __wait, 1); __int = tmp___2; tmp___3 = i915_reset_in_progress(error); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { tmp___6 = 1; } else { tmp___5 = i915_terminally_wedged(error); if ((int )tmp___5) { tmp___6 = 1; } else { tmp___6 = 0; } } __cond = (bool )tmp___6; if ((int )__cond && __ret___0 == 0L) { __ret___0 = 1L; } else { } if (((int )__cond || __ret___0 == 0L) != 0) { goto ldv_49517; } else { } if (__int != 0L) { __ret___0 = __int; goto ldv_49517; } else { } __ret___0 = schedule_timeout(__ret___0); goto ldv_49518; ldv_49517: finish_wait(& error->reset_queue, & __wait); __ret = __ret___0; } else { } ret = (int )__ret; if (ret == 0) { drm_err("Timed out waiting for the gpu reset to complete\n"); return (-5); } else if (ret < 0) { return (ret); } else { } return (0); } } int i915_mutex_lock_interruptible(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; int __ret_warn_on ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ret = i915_gem_wait_for_error(& dev_priv->gpu_error); if (ret != 0) { return (ret); } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } __ret_warn_on = 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 142, "WARN_ON(i915_verify_lists(dev))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (0); } } int i915_gem_get_aperture_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_get_aperture *args ; struct drm_i915_gem_object *obj ; size_t pinned ; struct list_head const *__mptr ; unsigned long tmp ; bool tmp___0 ; struct list_head const *__mptr___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; args = (struct drm_i915_gem_get_aperture *)data; pinned = 0UL; mutex_lock_nested(& dev->struct_mutex, 0U); __mptr = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; goto ldv_49542; ldv_49541: tmp___0 = i915_gem_obj_is_pinned(obj); if ((int )tmp___0) { tmp = i915_gem_obj_ggtt_size(obj); pinned = tmp + pinned; } else { } __mptr___0 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; ldv_49542: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_49541; } else { } mutex_unlock(& dev->struct_mutex); args->aper_size = (__u64 )dev_priv->gtt.base.total; args->aper_available_size = args->aper_size - (unsigned long long )pinned; return (0); } } static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj ) { struct address_space *mapping ; struct inode *tmp ; char *vaddr ; struct sg_table *st ; struct scatterlist *sg ; int i ; int __ret_warn_on ; bool tmp___0 ; long tmp___1 ; long tmp___2 ; struct page *page ; char *src ; long tmp___3 ; bool tmp___4 ; void *tmp___5 ; void *tmp___6 ; int tmp___7 ; { tmp = file_inode((struct file const *)obj->base.filp); mapping = tmp->i_mapping; vaddr = (char *)(obj->__annonCompField84.phys_handle)->vaddr; tmp___0 = i915_gem_object_needs_bit17_swizzle(obj); __ret_warn_on = (int )tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 177, "WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (-22); } else { } i = 0; goto ldv_49557; ldv_49556: page = shmem_read_mapping_page(mapping, (unsigned long )i); tmp___4 = IS_ERR((void const *)page); if ((int )tmp___4) { tmp___3 = PTR_ERR((void const *)page); return ((int )tmp___3); } else { } tmp___5 = kmap_atomic(page); src = (char *)tmp___5; memcpy((void *)vaddr, (void const *)src, 4096UL); drm_clflush_virt_range((void *)vaddr, 4096UL); __kunmap_atomic((void *)src); put_page(page); vaddr = vaddr + 4096UL; i = i + 1; ldv_49557: ; if ((unsigned long )i < obj->base.size / 4096UL) { goto ldv_49556; } else { } i915_gem_chipset_flush(obj->base.dev); tmp___6 = kmalloc(16UL, 208U); st = (struct sg_table *)tmp___6; if ((unsigned long )st == (unsigned long )((struct sg_table *)0)) { return (-12); } else { } tmp___7 = sg_alloc_table(st, 1U, 208U); if (tmp___7 != 0) { kfree((void const *)st); return (-12); } else { } sg = st->sgl; sg->offset = 0U; sg->length = (unsigned int )obj->base.size; sg->dma_address = (obj->__annonCompField84.phys_handle)->busaddr; sg->dma_length = (unsigned int )obj->base.size; obj->pages = st; obj->has_dma_mapping = 1U; return (0); } } static void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj ) { int ret ; long tmp ; int __ret_warn_on ; long tmp___0 ; uint32_t tmp___1 ; struct address_space *mapping ; struct inode *tmp___2 ; char *vaddr ; int i ; struct page *page ; char *dst ; bool tmp___3 ; void *tmp___4 ; { tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)obj + 409UL) == 32U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (225), "i" (12UL)); ldv_49563: ; goto ldv_49563; } else { } ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret != 0) { __ret_warn_on = ret != -5; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 232, "WARN_ON(ret != -EIO)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = 1U; obj->base.write_domain = tmp___1; obj->base.read_domains = tmp___1; } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) == 16U) { obj->dirty = 0U; } else { } if ((unsigned int )*((unsigned char *)obj + 408UL) != 0U) { tmp___2 = file_inode((struct file const *)obj->base.filp); mapping = tmp___2->i_mapping; vaddr = (char *)(obj->__annonCompField84.phys_handle)->vaddr; i = 0; goto ldv_49573; ldv_49572: page = shmem_read_mapping_page(mapping, (unsigned long )i); tmp___3 = IS_ERR((void const *)page); if ((int )tmp___3) { goto ldv_49571; } else { } tmp___4 = kmap_atomic(page); dst = (char *)tmp___4; drm_clflush_virt_range((void *)vaddr, 4096UL); memcpy((void *)dst, (void const *)vaddr, 4096UL); __kunmap_atomic((void *)dst); set_page_dirty(page); if ((unsigned int )*((unsigned char *)obj + 409UL) == 0U) { mark_page_accessed(page); } else { } put_page(page); vaddr = vaddr + 4096UL; ldv_49571: i = i + 1; ldv_49573: ; if ((unsigned long )i < obj->base.size / 4096UL) { goto ldv_49572; } else { } obj->dirty = 0U; } else { } sg_free_table(obj->pages); kfree((void const *)obj->pages); obj->has_dma_mapping = 0U; return; } } static void i915_gem_object_release_phys(struct drm_i915_gem_object *obj ) { { drm_pci_free(obj->base.dev, obj->__annonCompField84.phys_handle); return; } } static struct drm_i915_gem_object_ops const i915_gem_phys_ops = {& i915_gem_object_get_pages_phys, & i915_gem_object_put_pages_phys, 0, & i915_gem_object_release_phys}; static int drop_pages(struct drm_i915_gem_object *obj ) { struct i915_vma *vma ; struct i915_vma *next ; int ret ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp ; struct list_head const *__mptr___1 ; { drm_gem_object_reference___2(& obj->base); __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; __mptr___0 = (struct list_head const *)vma->vma_link.next; next = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; goto ldv_49593; ldv_49592: tmp = i915_vma_unbind(vma); if (tmp != 0) { goto ldv_49591; } else { } vma = next; __mptr___1 = (struct list_head const *)next->vma_link.next; next = (struct i915_vma *)__mptr___1 + 0xffffffffffffff58UL; ldv_49593: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_49592; } else { } ldv_49591: ret = i915_gem_object_put_pages(obj); drm_gem_object_unreference___5(& obj->base); return (ret); } } int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj , int align ) { drm_dma_handle_t *phys ; int ret ; struct drm_dma_handle *tmp ; int tmp___0 ; { if ((unsigned long )obj->__annonCompField84.phys_handle != (unsigned long )((struct drm_dma_handle *)0)) { if (((unsigned long )(obj->__annonCompField84.phys_handle)->vaddr & (unsigned long )(align + -1)) != 0UL) { return (-16); } else { } return (0); } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { return (-14); } else { } if ((unsigned long )obj->base.filp == (unsigned long )((struct file *)0)) { return (-22); } else { } ret = drop_pages(obj); if (ret != 0) { return (ret); } else { } tmp = drm_pci_alloc(obj->base.dev, obj->base.size, (size_t )align); phys = tmp; if ((unsigned long )phys == (unsigned long )((drm_dma_handle_t *)0)) { return (-12); } else { } obj->__annonCompField84.phys_handle = phys; obj->ops = & i915_gem_phys_ops; tmp___0 = i915_gem_object_get_pages(obj); return (tmp___0); } } static int i915_gem_phys_pwrite(struct drm_i915_gem_object *obj , struct drm_i915_gem_pwrite *args , struct drm_file *file_priv ) { struct drm_device *dev ; void *vaddr ; char *user_data ; void *tmp ; int ret ; unsigned long unwritten ; int tmp___0 ; { dev = obj->base.dev; vaddr = (obj->__annonCompField84.phys_handle)->vaddr + args->offset; tmp = to_user_ptr(args->data_ptr); user_data = (char *)tmp; ret = 0; ret = i915_gem_object_wait_rendering(obj, 0); if (ret != 0) { return (ret); } else { } intel_fb_obj_invalidate(obj, (struct intel_engine_cs *)0, 1); tmp___0 = __copy_from_user_inatomic_nocache(vaddr, (void const *)user_data, (unsigned int )args->size); if (tmp___0 != 0) { mutex_unlock(& dev->struct_mutex); unwritten = copy_from_user(vaddr, (void const *)user_data, (unsigned long )args->size); mutex_lock_nested(& dev->struct_mutex, 0U); if (unwritten != 0UL) { ret = -14; goto out; } else { } } else { } drm_clflush_virt_range(vaddr, (unsigned long )args->size); i915_gem_chipset_flush(dev); out: intel_fb_obj_flush(obj, 0); return (ret); } } void *i915_gem_object_alloc(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; void *tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = kmem_cache_zalloc(dev_priv->objects, 208U); return (tmp); } } void i915_gem_object_free(struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; kmem_cache_free(dev_priv->objects, (void *)obj); return; } } static int i915_gem_create(struct drm_file *file , struct drm_device *dev , uint64_t size , uint32_t *handle_p ) { struct drm_i915_gem_object *obj ; int ret ; u32 handle ; unsigned long __y ; { __y = 4096UL; size = ((((unsigned long long )__y + size) - 1ULL) / (unsigned long long )__y) * (unsigned long long )__y; if (size == 0ULL) { return (-22); } else { } obj = i915_gem_alloc_object(dev, (size_t )size); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return (-12); } else { } ret = drm_gem_handle_create(file, & obj->base, & handle); drm_gem_object_unreference_unlocked(& obj->base); if (ret != 0) { return (ret); } else { } *handle_p = handle; return (0); } } int i915_gem_dumb_create(struct drm_file *file , struct drm_device *dev , struct drm_mode_create_dumb *args ) { int tmp ; { args->pitch = (args->width * ((args->bpp + 7U) / 8U) + 63U) & 4294967232U; args->size = (uint64_t )(args->pitch * args->height); tmp = i915_gem_create(file, dev, args->size, & args->handle); return (tmp); } } int i915_gem_create_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_create *args ; int tmp ; { args = (struct drm_i915_gem_create *)data; tmp = i915_gem_create(file, dev, args->size, & args->handle); return (tmp); } } __inline static int __copy_to_user_swizzled(char *cpu_vaddr , char const *gpu_vaddr , int gpu_offset , int length ) { int ret ; int cpu_offset ; int cacheline_end ; int this_length ; int _min1 ; int _min2 ; int swizzled_gpu_offset ; { cpu_offset = 0; goto ldv_49656; ldv_49655: cacheline_end = (gpu_offset + 64) & -64; _min1 = cacheline_end - gpu_offset; _min2 = length; this_length = _min1 < _min2 ? _min1 : _min2; swizzled_gpu_offset = gpu_offset ^ 64; ret = __copy_to_user((void *)cpu_vaddr + (unsigned long )cpu_offset, (void const *)gpu_vaddr + (unsigned long )swizzled_gpu_offset, (unsigned int )this_length); if (ret != 0) { return (ret + length); } else { } cpu_offset = cpu_offset + this_length; gpu_offset = gpu_offset + this_length; length = length - this_length; ldv_49656: ; if (length > 0) { goto ldv_49655; } else { } return (0); } } __inline static int __copy_from_user_swizzled(char *gpu_vaddr , int gpu_offset , char const *cpu_vaddr , int length ) { int ret ; int cpu_offset ; int cacheline_end ; int this_length ; int _min1 ; int _min2 ; int swizzled_gpu_offset ; { cpu_offset = 0; goto ldv_49673; ldv_49672: cacheline_end = (gpu_offset + 64) & -64; _min1 = cacheline_end - gpu_offset; _min2 = length; this_length = _min1 < _min2 ? _min1 : _min2; swizzled_gpu_offset = gpu_offset ^ 64; ret = __copy_from_user((void *)gpu_vaddr + (unsigned long )swizzled_gpu_offset, (void const *)cpu_vaddr + (unsigned long )cpu_offset, (unsigned int )this_length); if (ret != 0) { return (ret + length); } else { } cpu_offset = cpu_offset + this_length; gpu_offset = gpu_offset + this_length; length = length - this_length; ldv_49673: ; if (length > 0) { goto ldv_49672; } else { } return (0); } } int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj , int *needs_clflush ) { int ret ; bool tmp ; int tmp___0 ; { *needs_clflush = 0; if ((unsigned long )obj->base.filp == (unsigned long )((struct file *)0)) { return (-22); } else { } if ((obj->base.read_domains & 1U) == 0U) { tmp = cpu_cache_is_coherent(obj->base.dev, (enum i915_cache_level )obj->cache_level); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } *needs_clflush = tmp___0; ret = i915_gem_object_wait_rendering(obj, 1); if (ret != 0) { return (ret); } else { } } else { } ret = i915_gem_object_get_pages(obj); if (ret != 0) { return (ret); } else { } i915_gem_object_pin_pages___1(obj); return (ret); } } static int shmem_pread_fast(struct page *page , int shmem_page_offset , int page_length , char *user_data , bool page_do_bit17_swizzling , bool needs_clflush ) { char *vaddr ; int ret ; long tmp ; void *tmp___0 ; { tmp = ldv__builtin_expect((long )page_do_bit17_swizzling, 0L); if (tmp != 0L) { return (-22); } else { } tmp___0 = kmap_atomic(page); vaddr = (char *)tmp___0; if ((int )needs_clflush) { drm_clflush_virt_range((void *)vaddr + (unsigned long )shmem_page_offset, (unsigned long )page_length); } else { } ret = __copy_to_user_inatomic((void *)user_data, (void const *)vaddr + (unsigned long )shmem_page_offset, (unsigned int )page_length); __kunmap_atomic((void *)vaddr); return (ret != 0 ? -14 : 0); } } static void shmem_clflush_swizzled_range(char *addr , unsigned long length , bool swizzled ) { unsigned long start ; unsigned long end ; long tmp ; { tmp = ldv__builtin_expect((long )swizzled, 0L); if (tmp != 0L) { start = (unsigned long )addr; end = (unsigned long )addr + length; start = start & 0xffffffffffffff80UL; end = ((end - 1UL) | 127UL) + 1UL; drm_clflush_virt_range((void *)start, end - start); } else { drm_clflush_virt_range((void *)addr, length); } return; } } static int shmem_pread_slow(struct page *page , int shmem_page_offset , int page_length , char *user_data , bool page_do_bit17_swizzling , bool needs_clflush ) { char *vaddr ; int ret ; void *tmp ; { tmp = kmap(page); vaddr = (char *)tmp; if ((int )needs_clflush) { shmem_clflush_swizzled_range(vaddr + (unsigned long )shmem_page_offset, (unsigned long )page_length, (int )page_do_bit17_swizzling); } else { } if ((int )page_do_bit17_swizzling) { ret = __copy_to_user_swizzled(user_data, (char const *)vaddr, shmem_page_offset, page_length); } else { ret = __copy_to_user((void *)user_data, (void const *)vaddr + (unsigned long )shmem_page_offset, (unsigned int )page_length); } kunmap(page); return (ret != 0 ? -14 : 0); } } static int i915_gem_shmem_pread(struct drm_device *dev , struct drm_i915_gem_object *obj , struct drm_i915_gem_pread *args , struct drm_file *file ) { char *user_data ; ssize_t remain ; loff_t offset ; int shmem_page_offset ; int page_length ; int ret ; int obj_do_bit17_swizzling ; int page_do_bit17_swizzling ; int prefaulted ; int needs_clflush ; struct sg_page_iter sg_iter ; void *tmp ; bool tmp___0 ; struct page *page ; struct page *tmp___1 ; long tmp___2 ; bool tmp___3 ; { ret = 0; prefaulted = 0; needs_clflush = 0; tmp = to_user_ptr(args->data_ptr); user_data = (char *)tmp; remain = (ssize_t )args->size; tmp___0 = i915_gem_object_needs_bit17_swizzle(obj); obj_do_bit17_swizzling = (int )tmp___0; ret = i915_gem_obj_prepare_shmem_read(obj, & needs_clflush); if (ret != 0) { return (ret); } else { } offset = (loff_t )args->offset; __sg_page_iter_start(& sg_iter, (obj->pages)->sgl, (obj->pages)->nents, (unsigned long )(offset >> 12)); goto ldv_49729; ldv_49728: tmp___1 = sg_page_iter_page___1(& sg_iter); page = tmp___1; if (remain <= 0L) { goto ldv_49725; } else { } shmem_page_offset = (int )offset & 4095; page_length = (int )remain; if ((unsigned int )(shmem_page_offset + page_length) > 4096U) { page_length = (int )(4096U - (unsigned int )shmem_page_offset); } else { } page_do_bit17_swizzling = obj_do_bit17_swizzling != 0 && (((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) & 131072ULL) != 0ULL; ret = shmem_pread_fast(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling != 0, needs_clflush != 0); if (ret == 0) { goto next_page; } else { } mutex_unlock(& dev->struct_mutex); tmp___2 = ldv__builtin_expect((long )(! i915.prefault_disable), 1L); if (tmp___2 != 0L && prefaulted == 0) { ret = fault_in_multipages_writeable(user_data, (int )remain); prefaulted = 1; } else { } ret = shmem_pread_slow(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling != 0, needs_clflush != 0); mutex_lock_nested(& dev->struct_mutex, 0U); if (ret != 0) { goto out; } else { } next_page: remain = remain - (ssize_t )page_length; user_data = user_data + (unsigned long )page_length; offset = (loff_t )page_length + offset; ldv_49729: tmp___3 = __sg_page_iter_next(& sg_iter); if ((int )tmp___3) { goto ldv_49728; } else { } ldv_49725: ; out: i915_gem_object_unpin_pages___2(obj); return (ret); } } int i915_gem_pread_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_pread *args ; struct drm_i915_gem_object *obj ; int ret ; struct thread_info *tmp ; void *tmp___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp___4 ; { args = (struct drm_i915_gem_pread *)data; ret = 0; if (args->size == 0ULL) { return (0); } else { } tmp = current_thread_info(); tmp___0 = to_user_ptr(args->data_ptr); tmp___1 = __chk_range_not_ok((unsigned long )tmp___0, (unsigned long )args->size, tmp->addr_limit.seg); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } tmp___3 = ldv__builtin_expect((long )tmp___2, 1L); if (tmp___3 == 0L) { return (-14); } else { } ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } tmp___4 = drm_gem_object_lookup(dev, file, args->handle); __mptr = (struct drm_gem_object const *)tmp___4; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { ret = -2; goto unlock; } else { } if (args->offset > (unsigned long long )obj->base.size || args->size > (unsigned long long )obj->base.size - args->offset) { ret = -22; goto out; } else { } if ((unsigned long )obj->base.filp == (unsigned long )((struct file *)0)) { ret = -22; goto out; } else { } trace_i915_gem_object_pread(obj, (u32 )args->offset, (u32 )args->size); ret = i915_gem_shmem_pread(dev, obj, args, file); out: drm_gem_object_unreference___5(& obj->base); unlock: mutex_unlock(& dev->struct_mutex); return (ret); } } __inline static int fast_user_write(struct io_mapping *mapping , loff_t page_base , int page_offset___0 , char *user_data , int length ) { void *vaddr_atomic ; void *vaddr ; unsigned long unwritten ; int tmp ; { vaddr_atomic = io_mapping_map_atomic_wc(mapping, (unsigned long )page_base); vaddr = vaddr_atomic + (unsigned long )page_offset___0; tmp = __copy_from_user_inatomic_nocache(vaddr, (void const *)user_data, (unsigned int )length); unwritten = (unsigned long )tmp; __kunmap_atomic(vaddr_atomic); return ((int )unwritten); } } static int i915_gem_gtt_pwrite_fast(struct drm_device *dev , struct drm_i915_gem_object *obj , struct drm_i915_gem_pwrite *args , struct drm_file *file ) { struct drm_i915_private *dev_priv ; ssize_t remain ; loff_t offset ; loff_t page_base ; char *user_data ; int page_offset___0 ; int page_length ; int ret ; void *tmp ; unsigned long tmp___0 ; int tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ret = i915_gem_obj_ggtt_pin(obj, 0U, 3U); if (ret != 0) { goto out; } else { } ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret != 0) { goto out_unpin; } else { } ret = i915_gem_object_put_fence(obj); if (ret != 0) { goto out_unpin; } else { } tmp = to_user_ptr(args->data_ptr); user_data = (char *)tmp; remain = (ssize_t )args->size; tmp___0 = i915_gem_obj_ggtt_offset(obj); offset = (loff_t )((unsigned long long )tmp___0 + args->offset); intel_fb_obj_invalidate(obj, (struct intel_engine_cs *)0, 0); goto ldv_49771; ldv_49770: page_base = offset & -4096LL; page_offset___0 = (int )offset & 4095; page_length = (int )remain; if ((unsigned long )((ssize_t )page_offset___0 + remain) > 4096UL) { page_length = (int )(4096U - (unsigned int )page_offset___0); } else { } tmp___1 = fast_user_write(dev_priv->gtt.mappable, page_base, page_offset___0, user_data, page_length); if (tmp___1 != 0) { ret = -14; goto out_flush; } else { } remain = remain - (ssize_t )page_length; user_data = user_data + (unsigned long )page_length; offset = (loff_t )page_length + offset; ldv_49771: ; if (remain > 0L) { goto ldv_49770; } else { } out_flush: intel_fb_obj_flush(obj, 0); out_unpin: i915_gem_object_ggtt_unpin(obj); out: ; return (ret); } } static int shmem_pwrite_fast(struct page *page , int shmem_page_offset , int page_length , char *user_data , bool page_do_bit17_swizzling , bool needs_clflush_before , bool needs_clflush_after ) { char *vaddr ; int ret ; long tmp ; void *tmp___0 ; { tmp = ldv__builtin_expect((long )page_do_bit17_swizzling, 0L); if (tmp != 0L) { return (-22); } else { } tmp___0 = kmap_atomic(page); vaddr = (char *)tmp___0; if ((int )needs_clflush_before) { drm_clflush_virt_range((void *)vaddr + (unsigned long )shmem_page_offset, (unsigned long )page_length); } else { } ret = __copy_from_user_inatomic((void *)vaddr + (unsigned long )shmem_page_offset, (void const *)user_data, (unsigned int )page_length); if ((int )needs_clflush_after) { drm_clflush_virt_range((void *)vaddr + (unsigned long )shmem_page_offset, (unsigned long )page_length); } else { } __kunmap_atomic((void *)vaddr); return (ret != 0 ? -14 : 0); } } static int shmem_pwrite_slow(struct page *page , int shmem_page_offset , int page_length , char *user_data , bool page_do_bit17_swizzling , bool needs_clflush_before , bool needs_clflush_after ) { char *vaddr ; int ret ; void *tmp ; long tmp___0 ; { tmp = kmap(page); vaddr = (char *)tmp; tmp___0 = ldv__builtin_expect((long )((int )needs_clflush_before || (int )page_do_bit17_swizzling), 0L); if (tmp___0 != 0L) { shmem_clflush_swizzled_range(vaddr + (unsigned long )shmem_page_offset, (unsigned long )page_length, (int )page_do_bit17_swizzling); } else { } if ((int )page_do_bit17_swizzling) { ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, (char const *)user_data, page_length); } else { ret = __copy_from_user((void *)vaddr + (unsigned long )shmem_page_offset, (void const *)user_data, (unsigned int )page_length); } if ((int )needs_clflush_after) { shmem_clflush_swizzled_range(vaddr + (unsigned long )shmem_page_offset, (unsigned long )page_length, (int )page_do_bit17_swizzling); } else { } kunmap(page); return (ret != 0 ? -14 : 0); } } static int i915_gem_shmem_pwrite(struct drm_device *dev , struct drm_i915_gem_object *obj , struct drm_i915_gem_pwrite *args , struct drm_file *file ) { ssize_t remain ; loff_t offset ; char *user_data ; int shmem_page_offset ; int page_length ; int ret ; int obj_do_bit17_swizzling ; int page_do_bit17_swizzling ; int hit_slowpath ; int needs_clflush_after ; int needs_clflush_before ; struct sg_page_iter sg_iter ; void *tmp ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; int tmp___3 ; struct page *page ; struct page *tmp___4 ; int partial_cacheline_write ; bool tmp___5 ; bool tmp___6 ; { ret = 0; hit_slowpath = 0; needs_clflush_after = 0; needs_clflush_before = 0; tmp = to_user_ptr(args->data_ptr); user_data = (char *)tmp; remain = (ssize_t )args->size; tmp___0 = i915_gem_object_needs_bit17_swizzle(obj); obj_do_bit17_swizzling = (int )tmp___0; if (obj->base.write_domain != 1U) { tmp___1 = cpu_write_needs_clflush(obj); needs_clflush_after = (int )tmp___1; ret = i915_gem_object_wait_rendering(obj, 0); if (ret != 0) { return (ret); } else { } } else { } if ((obj->base.read_domains & 1U) == 0U) { tmp___2 = cpu_cache_is_coherent(dev, (enum i915_cache_level )obj->cache_level); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } needs_clflush_before = tmp___3; } else { } ret = i915_gem_object_get_pages(obj); if (ret != 0) { return (ret); } else { } intel_fb_obj_invalidate(obj, (struct intel_engine_cs *)0, 1); i915_gem_object_pin_pages___1(obj); offset = (loff_t )args->offset; obj->dirty = 1U; __sg_page_iter_start(& sg_iter, (obj->pages)->sgl, (obj->pages)->nents, (unsigned long )(offset >> 12)); goto ldv_49819; ldv_49818: tmp___4 = sg_page_iter_page___1(& sg_iter); page = tmp___4; if (remain <= 0L) { goto ldv_49815; } else { } shmem_page_offset = (int )offset & 4095; page_length = (int )remain; if ((unsigned int )(shmem_page_offset + page_length) > 4096U) { page_length = (int )(4096U - (unsigned int )shmem_page_offset); } else { } partial_cacheline_write = needs_clflush_before != 0 && ((shmem_page_offset | page_length) & ((int )boot_cpu_data.x86_clflush_size + -1)) != 0; page_do_bit17_swizzling = obj_do_bit17_swizzling != 0 && (((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) & 131072ULL) != 0ULL; ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling != 0, partial_cacheline_write != 0, needs_clflush_after != 0); if (ret == 0) { goto next_page; } else { } hit_slowpath = 1; mutex_unlock(& dev->struct_mutex); ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling != 0, partial_cacheline_write != 0, needs_clflush_after != 0); mutex_lock_nested(& dev->struct_mutex, 0U); if (ret != 0) { goto out; } else { } next_page: remain = remain - (ssize_t )page_length; user_data = user_data + (unsigned long )page_length; offset = (loff_t )page_length + offset; ldv_49819: tmp___5 = __sg_page_iter_next(& sg_iter); if ((int )tmp___5) { goto ldv_49818; } else { } ldv_49815: ; out: i915_gem_object_unpin_pages___2(obj); if (hit_slowpath != 0) { if (needs_clflush_after == 0 && obj->base.write_domain != 1U) { tmp___6 = i915_gem_clflush_object(obj, obj->pin_display != 0U); if ((int )tmp___6) { i915_gem_chipset_flush(dev); } else { } } else { } } else { } if (needs_clflush_after != 0) { i915_gem_chipset_flush(dev); } else { } intel_fb_obj_flush(obj, 0); return (ret); } } int i915_gem_pwrite_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_pwrite *args ; struct drm_i915_gem_object *obj ; int ret ; struct thread_info *tmp ; void *tmp___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; void *tmp___4 ; long tmp___5 ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp___6 ; bool tmp___7 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; args = (struct drm_i915_gem_pwrite *)data; if (args->size == 0ULL) { return (0); } else { } tmp = current_thread_info(); tmp___0 = to_user_ptr(args->data_ptr); tmp___1 = __chk_range_not_ok((unsigned long )tmp___0, (unsigned long )args->size, tmp->addr_limit.seg); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } tmp___3 = ldv__builtin_expect((long )tmp___2, 1L); if (tmp___3 == 0L) { return (-14); } else { } tmp___5 = ldv__builtin_expect((long )(! i915.prefault_disable), 1L); if (tmp___5 != 0L) { tmp___4 = to_user_ptr(args->data_ptr); ret = fault_in_multipages_readable___0((char const *)tmp___4, (int )args->size); if (ret != 0) { return (-14); } else { } } else { } intel_runtime_pm_get(dev_priv); ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { goto put_rpm; } else { } tmp___6 = drm_gem_object_lookup(dev, file, args->handle); __mptr = (struct drm_gem_object const *)tmp___6; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { ret = -2; goto unlock; } else { } if (args->offset > (unsigned long long )obj->base.size || args->size > (unsigned long long )obj->base.size - args->offset) { ret = -22; goto out; } else { } if ((unsigned long )obj->base.filp == (unsigned long )((struct file *)0)) { ret = -22; goto out; } else { } trace_i915_gem_object_pwrite(obj, (u32 )args->offset, (u32 )args->size); ret = -14; if ((unsigned int )*((unsigned char *)obj + 409UL) == 0U && obj->base.write_domain != 1U) { tmp___7 = cpu_write_needs_clflush(obj); if ((int )tmp___7) { ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); } else { } } else { } if (ret == -14 || ret == -28) { if ((unsigned long )obj->__annonCompField84.phys_handle != (unsigned long )((struct drm_dma_handle *)0)) { ret = i915_gem_phys_pwrite(obj, args, file); } else { ret = i915_gem_shmem_pwrite(dev, obj, args, file); } } else { } out: drm_gem_object_unreference___5(& obj->base); unlock: mutex_unlock(& dev->struct_mutex); put_rpm: intel_runtime_pm_put(dev_priv); return (ret); } } int i915_gem_check_wedge(struct i915_gpu_error *error , bool interruptible ) { bool tmp ; bool tmp___0 ; { tmp___0 = i915_reset_in_progress(error); if ((int )tmp___0) { if (! interruptible) { return (-5); } else { } tmp = i915_terminally_wedged(error); if ((int )tmp) { return (-5); } else { } if (! error->reload_in_reset) { return (-11); } else { } } else { } return (0); } } int i915_gem_check_olr(struct drm_i915_gem_request *req ) { int ret ; int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = mutex_is_locked(& ((req->ring)->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 1160, "WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = 0; if ((unsigned long )(req->ring)->outstanding_lazy_request == (unsigned long )req) { ret = __i915_add_request(req->ring, (struct drm_file *)0, (struct drm_i915_gem_object *)0); } else { } return (ret); } } static void fake_irq(unsigned long data ) { { wake_up_process((struct task_struct *)data); return; } } static bool missed_irq(struct drm_i915_private *dev_priv , struct intel_engine_cs *ring ) { int tmp ; { tmp = variable_test_bit((long )ring->id, (unsigned long const volatile *)(& dev_priv->gpu_error.missed_irq_rings)); return (tmp != 0); } } static int __i915_spin_request(struct drm_i915_gem_request *req ) { unsigned long timeout ; struct intel_engine_cs *tmp ; bool tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; { tmp = i915_gem_request_get_ring(req); if (tmp->irq_refcount != 0U) { return (-16); } else { } timeout = (unsigned long )jiffies + 1UL; goto ldv_49864; ldv_49863: tmp___0 = i915_gem_request_completed___1(req, 1); if ((int )tmp___0) { return (0); } else { } if ((long )((unsigned long )jiffies - timeout) >= 0L) { goto ldv_49862; } else { } cpu_relax(); ldv_49864: tmp___1 = need_resched(); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto ldv_49863; } else { } ldv_49862: tmp___3 = i915_gem_request_completed___1(req, 0); if ((int )tmp___3) { return (0); } else { } return (-11); } } int __i915_wait_request(struct drm_i915_gem_request *req , unsigned int reset_counter , bool interruptible , s64 *timeout , struct intel_rps_client *rps ) { struct intel_engine_cs *ring ; struct intel_engine_cs *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; bool irq_test_in_progress ; unsigned int __var ; unsigned int tmp___0 ; wait_queue_t wait ; struct task_struct *tmp___1 ; unsigned long timeout_expire ; s64 before ; s64 now ; int ret ; int __ret_warn_on ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; int tmp___5 ; bool tmp___6 ; unsigned long tmp___7 ; struct drm_i915_private *__p ; u64 tmp___8 ; int __ret_warn_on___0 ; bool tmp___9 ; int tmp___10 ; long tmp___11 ; long tmp___12 ; struct timer_list timer ; int tmp___13 ; bool tmp___14 ; struct task_struct *tmp___15 ; int tmp___16 ; unsigned long expire ; struct task_struct *tmp___17 ; bool tmp___18 ; bool tmp___19 ; u64 tmp___20 ; s64 tres ; unsigned int tmp___21 ; { tmp = i915_gem_request_get_ring(req); ring = tmp; dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __var = 0U; tmp___0 = intel_ring_flag(ring); irq_test_in_progress = ((unsigned int )*((unsigned int volatile *)(& dev_priv->gpu_error.test_irq_rings)) & tmp___0) != 0U; tmp___1 = get_current(); wait.flags = 0U; wait.private = (void *)tmp___1; wait.func = & autoremove_wake_function; wait.task_list.next = & wait.task_list; wait.task_list.prev = & wait.task_list; tmp___2 = intel_irqs_enabled(dev_priv); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } __ret_warn_on = tmp___3; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 1236, "IRQs disabled"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___5 = list_empty((struct list_head const *)(& req->list)); if (tmp___5 != 0) { return (0); } else { } tmp___6 = i915_gem_request_completed___1(req, 1); if ((int )tmp___6) { return (0); } else { } if ((unsigned long )timeout != (unsigned long )((s64 *)0LL)) { tmp___7 = nsecs_to_jiffies_timeout((unsigned long long )*timeout); timeout_expire = tmp___7 + (unsigned long )jiffies; } else { timeout_expire = 0UL; } __p = dev_priv; if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); } else { } trace_i915_gem_request_wait_begin(req); tmp___8 = ktime_get_raw_ns(); before = (s64 )tmp___8; ret = __i915_spin_request(req); if (ret == 0) { goto out; } else { } if (! irq_test_in_progress) { tmp___9 = (*(ring->irq_get))(ring); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } __ret_warn_on___0 = tmp___10; tmp___11 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___11 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 1259, "WARN_ON(!ring->irq_get(ring))"); } else { } tmp___12 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___12 != 0L) { ret = -19; goto out; } else { } } else { } ldv_49903: prepare_to_wait(& ring->irq_queue, & wait, (int )interruptible ? 1 : 2); tmp___13 = atomic_read((atomic_t const *)(& dev_priv->gpu_error.reset_counter)); if ((unsigned int )tmp___13 != reset_counter) { ret = i915_gem_check_wedge(& dev_priv->gpu_error, (int )interruptible); if (ret == 0) { ret = -11; } else { } goto ldv_49895; } else { } tmp___14 = i915_gem_request_completed___1(req, 0); if ((int )tmp___14) { ret = 0; goto ldv_49895; } else { } if ((int )interruptible) { tmp___15 = get_current(); tmp___16 = signal_pending(tmp___15); if (tmp___16 != 0) { ret = -512; goto ldv_49895; } else { } } else { } if ((unsigned long )timeout != (unsigned long )((s64 *)0LL) && (long )((unsigned long )jiffies - timeout_expire) >= 0L) { ret = -62; goto ldv_49895; } else { } timer.function = (void (*)(unsigned long ))0; if ((unsigned long )timeout != (unsigned long )((s64 *)0LL)) { tmp___17 = get_current(); reg_timer_21(& timer, & fake_irq, (unsigned long )tmp___17); tmp___18 = missed_irq(dev_priv, ring); expire = (int )tmp___18 ? (unsigned long )jiffies + 1UL : timeout_expire; ldv_mod_timer_296(& timer, expire); } else { tmp___19 = missed_irq(dev_priv, ring); if ((int )tmp___19) { tmp___17 = get_current(); reg_timer_21(& timer, & fake_irq, (unsigned long )tmp___17); tmp___18 = missed_irq(dev_priv, ring); expire = (int )tmp___18 ? (unsigned long )jiffies + 1UL : timeout_expire; ldv_mod_timer_296(& timer, expire); } else { } } io_schedule(); if ((unsigned long )timer.function != (unsigned long )((void (*)(unsigned long ))0)) { ldv_del_timer_sync_297(& timer); destroy_timer_on_stack(& timer); } else { } goto ldv_49903; ldv_49895: ; if (! irq_test_in_progress) { (*(ring->irq_put))(ring); } else { } finish_wait(& ring->irq_queue, & wait); out: tmp___20 = ktime_get_raw_ns(); now = (s64 )tmp___20; trace_i915_gem_request_wait_end(req); if ((unsigned long )timeout != (unsigned long )((s64 *)0LL)) { tres = *timeout + (before - now); *timeout = 0LL > tres ? 0LL : tres; if (ret == -62) { tmp___21 = jiffies_to_usecs(1UL); if (*timeout < (s64 )(tmp___21 * 1000U)) { *timeout = 0LL; } else { } } else { } } else { } return (ret); } } __inline static void i915_gem_request_remove_from_client(struct drm_i915_gem_request *request ) { struct drm_i915_file_private *file_priv ; { file_priv = request->file_priv; if ((unsigned long )file_priv == (unsigned long )((struct drm_i915_file_private *)0)) { return; } else { } spin_lock(& file_priv->mm.lock); list_del(& request->client_list); request->file_priv = (struct drm_i915_file_private *)0; spin_unlock(& file_priv->mm.lock); return; } } static void i915_gem_request_retire(struct drm_i915_gem_request *request ) { { trace_i915_gem_request_retire(request); (request->ringbuf)->last_retired_head = request->postfix; list_del_init(& request->list); i915_gem_request_remove_from_client(request); put_pid(request->pid); i915_gem_request_unreference___0(request); return; } } static void __i915_gem_request_retire__upto(struct drm_i915_gem_request *req ) { struct intel_engine_cs *engine ; struct drm_i915_gem_request *tmp ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; int tmp___3 ; struct list_head const *__mptr ; int __ret_warn_on___0 ; long tmp___4 ; { engine = req->ring; if (debug_locks != 0) { tmp___0 = lock_is_held(& (engine->dev)->struct_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 1382, "WARN_ON(debug_locks && !lockdep_is_held(&engine->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = list_empty((struct list_head const *)(& req->list)); if (tmp___3 != 0) { return; } else { } ldv_49921: __mptr = (struct list_head const *)engine->request_list.next; tmp = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL; i915_gem_request_retire(tmp); if ((unsigned long )tmp != (unsigned long )req) { goto ldv_49921; } else { } __ret_warn_on___0 = 0; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 1394, "WARN_ON(i915_verify_lists(engine->dev))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); return; } } int i915_wait_request(struct drm_i915_gem_request *req ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; bool interruptible ; int ret ; long tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; { tmp = ldv__builtin_expect((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (1409), "i" (12UL)); ldv_49932: ; goto ldv_49932; } else { } dev = (req->ring)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; interruptible = dev_priv->mm.interruptible; tmp___0 = mutex_is_locked(& dev->struct_mutex); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (1415), "i" (12UL)); ldv_49933: ; goto ldv_49933; } else { } ret = i915_gem_check_wedge(& dev_priv->gpu_error, (int )interruptible); if (ret != 0) { return (ret); } else { } ret = i915_gem_check_olr(req); if (ret != 0) { return (ret); } else { } tmp___2 = atomic_read((atomic_t const *)(& dev_priv->gpu_error.reset_counter)); ret = __i915_wait_request(req, (unsigned int )tmp___2, (int )interruptible, (s64 *)0LL, (struct intel_rps_client *)0); if (ret != 0) { return (ret); } else { } __i915_gem_request_retire__upto(req); return (0); } } int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj , bool readonly ) { int ret ; int i ; { if ((unsigned int )*((unsigned char *)obj + 408UL) == 0U) { return (0); } else { } if ((int )readonly) { if ((unsigned long )obj->last_write_req != (unsigned long )((struct drm_i915_gem_request *)0)) { ret = i915_wait_request(obj->last_write_req); if (ret != 0) { return (ret); } else { } i = (int )((obj->last_write_req)->ring)->id; if ((unsigned long )obj->last_read_req[i] == (unsigned long )obj->last_write_req) { i915_gem_object_retire__read(obj, i); } else { i915_gem_object_retire__write(obj); } } else { } } else { i = 0; goto ldv_49942; ldv_49941: ; if ((unsigned long )obj->last_read_req[i] == (unsigned long )((struct drm_i915_gem_request *)0)) { goto ldv_49940; } else { } ret = i915_wait_request(obj->last_read_req[i]); if (ret != 0) { return (ret); } else { } i915_gem_object_retire__read(obj, i); ldv_49940: i = i + 1; ldv_49942: ; if (i <= 4) { goto ldv_49941; } else { } } return (0); } } static void i915_gem_object_retire_request(struct drm_i915_gem_object *obj , struct drm_i915_gem_request *req ) { int ring ; { ring = (int )(req->ring)->id; if ((unsigned long )obj->last_read_req[ring] == (unsigned long )req) { i915_gem_object_retire__read(obj, ring); } else if ((unsigned long )obj->last_write_req == (unsigned long )req) { i915_gem_object_retire__write(obj); } else { } __i915_gem_request_retire__upto(req); return; } } static int i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj , struct intel_rps_client *rps , bool readonly ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_gem_request *requests[5U] ; unsigned int reset_counter ; int ret ; int i ; int n ; int tmp ; long tmp___0 ; long tmp___1 ; int tmp___2 ; struct drm_i915_gem_request *req ; int tmp___3 ; struct drm_i915_gem_request *req___0 ; int tmp___4 ; { dev = obj->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; n = 0; tmp = mutex_is_locked(& dev->struct_mutex); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (1505), "i" (12UL)); ldv_49961: ; goto ldv_49961; } else { } tmp___1 = ldv__builtin_expect((long )(! dev_priv->mm.interruptible), 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (1506), "i" (12UL)); ldv_49962: ; goto ldv_49962; } else { } if ((unsigned int )*((unsigned char *)obj + 408UL) == 0U) { return (0); } else { } ret = i915_gem_check_wedge(& dev_priv->gpu_error, 1); if (ret != 0) { return (ret); } else { } tmp___2 = atomic_read((atomic_t const *)(& dev_priv->gpu_error.reset_counter)); reset_counter = (unsigned int )tmp___2; if ((int )readonly) { req = obj->last_write_req; if ((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0)) { return (0); } else { } ret = i915_gem_check_olr(req); if (ret != 0) { goto err; } else { } tmp___3 = n; n = n + 1; requests[tmp___3] = i915_gem_request_reference___1(req); } else { i = 0; goto ldv_49968; ldv_49967: req___0 = obj->last_read_req[i]; if ((unsigned long )req___0 == (unsigned long )((struct drm_i915_gem_request *)0)) { goto ldv_49966; } else { } ret = i915_gem_check_olr(req___0); if (ret != 0) { goto err; } else { } tmp___4 = n; n = n + 1; requests[tmp___4] = i915_gem_request_reference___1(req___0); ldv_49966: i = i + 1; ldv_49968: ; if (i <= 4) { goto ldv_49967; } else { } } mutex_unlock(& dev->struct_mutex); i = 0; goto ldv_49971; ldv_49970: ret = __i915_wait_request(requests[i], reset_counter, 1, (s64 *)0LL, rps); i = i + 1; ldv_49971: ; if (ret == 0 && i < n) { goto ldv_49970; } else { } mutex_lock_nested(& dev->struct_mutex, 0U); err: i = 0; goto ldv_49974; ldv_49973: ; if (ret == 0) { i915_gem_object_retire_request(obj, requests[i]); } else { } i915_gem_request_unreference___0(requests[i]); i = i + 1; ldv_49974: ; if (i < n) { goto ldv_49973; } else { } return (ret); } } static struct intel_rps_client *to_rps_client(struct drm_file *file ) { struct drm_i915_file_private *fpriv ; { fpriv = (struct drm_i915_file_private *)file->driver_priv; return (& fpriv->rps); } } int i915_gem_set_domain_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_set_domain *args ; struct drm_i915_gem_object *obj ; uint32_t read_domains ; uint32_t write_domain ; int ret ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; struct intel_rps_client *tmp___0 ; { args = (struct drm_i915_gem_set_domain *)data; read_domains = args->read_domains; write_domain = args->write_domain; if ((write_domain & 62U) != 0U) { return (-22); } else { } if ((read_domains & 62U) != 0U) { return (-22); } else { } if (write_domain != 0U && read_domains != write_domain) { return (-22); } else { } ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } tmp = drm_gem_object_lookup(dev, file, args->handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { ret = -2; goto unlock; } else { } tmp___0 = to_rps_client(file); ret = i915_gem_object_wait_rendering__nonblocking(obj, tmp___0, write_domain == 0U); if (ret != 0) { goto unref; } else { } if ((read_domains & 64U) != 0U) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0U); } else { ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0U); } unref: drm_gem_object_unreference___5(& obj->base); unlock: mutex_unlock(& dev->struct_mutex); return (ret); } } int i915_gem_sw_finish_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_sw_finish *args ; struct drm_i915_gem_object *obj ; int ret ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; { args = (struct drm_i915_gem_sw_finish *)data; ret = 0; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } tmp = drm_gem_object_lookup(dev, file, args->handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { ret = -2; goto unlock; } else { } if (obj->pin_display != 0U) { i915_gem_object_flush_cpu_write_domain(obj); } else { } drm_gem_object_unreference___5(& obj->base); unlock: mutex_unlock(& dev->struct_mutex); return (ret); } } int i915_gem_mmap_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_mmap *args ; struct drm_gem_object *obj ; unsigned long addr ; int tmp ; struct mm_struct *mm ; struct task_struct *tmp___0 ; struct vm_area_struct *vma ; pgprot_t tmp___1 ; bool tmp___2 ; { args = (struct drm_i915_gem_mmap *)data; if ((args->flags & 0xfffffffffffffffeULL) != 0ULL) { return (-22); } else { } if ((int )args->flags & 1) { tmp = constant_test_bit(16L, (unsigned long const volatile *)(& boot_cpu_data.x86_capability)); if (tmp == 0) { return (-19); } else { } } else { } obj = drm_gem_object_lookup(dev, file, args->handle); if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) { return (-2); } else { } if ((unsigned long )obj->filp == (unsigned long )((struct file *)0)) { drm_gem_object_unreference_unlocked(obj); return (-22); } else { } addr = vm_mmap(obj->filp, 0UL, (unsigned long )args->size, 3UL, 1UL, (unsigned long )args->offset); if ((int )args->flags & 1) { tmp___0 = get_current(); mm = tmp___0->mm; down_write(& mm->mmap_sem); vma = find_vma(mm, addr); if ((unsigned long )vma != (unsigned long )((struct vm_area_struct *)0)) { tmp___1 = vm_get_page_prot(vma->vm_flags); vma->vm_page_prot = pgprot_writecombine(tmp___1); } else { addr = 0xfffffffffffffff4UL; } up_write(& mm->mmap_sem); } else { } drm_gem_object_unreference_unlocked(obj); tmp___2 = IS_ERR((void const *)addr); if ((int )tmp___2) { return ((int )addr); } else { } args->addr_ptr = (unsigned long long )addr; return (0); } } int i915_gem_fault(struct vm_area_struct *vma , struct vm_fault *vmf ) { struct drm_i915_gem_object *obj ; struct drm_gem_object const *__mptr ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct i915_ggtt_view view ; unsigned long page_offset___0 ; unsigned long pfn ; int ret ; bool write ; struct drm_i915_private *__p ; unsigned int chunk_size ; unsigned long __x ; unsigned int __min1 ; unsigned int __min2 ; unsigned long tmp ; unsigned long base ; unsigned int i ; unsigned long size ; unsigned long __min1___0 ; unsigned long __min2___0 ; int i___0 ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp___3 ; long tmp___4 ; long tmp___5 ; { __mptr = (struct drm_gem_object const *)vma->vm_private_data; obj = (struct drm_i915_gem_object *)__mptr; dev = obj->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; view = i915_ggtt_view_normal; ret = 0; write = ((int )vmf->flags & 1) != 0; intel_runtime_pm_get(dev_priv); page_offset___0 = ((unsigned long )vmf->virtual_address - vma->vm_start) >> 12; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { goto out; } else { } trace_i915_gem_object_fault(obj, (u32 )page_offset___0, 1, (int )write); ret = i915_gem_object_wait_rendering__nonblocking(obj, (struct intel_rps_client *)0, (int )((bool )(! ((int )write != 0)))); if (ret != 0) { goto unlock; } else { } if ((unsigned int )*((unsigned char *)obj + 410UL) != 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { ret = -14; goto unlock; } else { } } else { } if (obj->base.size >= dev_priv->gtt.mappable_end && (unsigned int )*((unsigned char *)obj + 409UL) == 0U) { chunk_size = 256U; memset((void *)(& view), 0, 56UL); view.type = 2; __x = page_offset___0; view.params.partial.offset = __x - __x % (unsigned long )chunk_size; __min1 = chunk_size; __min2 = (unsigned int )((vma->vm_end - vma->vm_start) / 4096UL) - (unsigned int )view.params.partial.offset; view.params.partial.size = __min1 < __min2 ? __min1 : __min2; } else { } ret = i915_gem_object_ggtt_pin(obj, (struct i915_ggtt_view const *)(& view), 0U, 1ULL); if (ret != 0) { goto unlock; } else { } ret = i915_gem_object_set_to_gtt_domain(obj, (int )write); if (ret != 0) { goto unpin; } else { } ret = i915_gem_object_get_fence(obj); if (ret != 0) { goto unpin; } else { } tmp = i915_gem_obj_ggtt_offset_view(obj, (struct i915_ggtt_view const *)(& view)); pfn = (unsigned long )(dev_priv->gtt.mappable_base + (unsigned long long )tmp); pfn = pfn >> 12; tmp___0 = ldv__builtin_expect((unsigned int )view.type == 2U, 0L); if (tmp___0 != 0L) { base = vma->vm_start + (view.params.partial.offset << 12); i = 0U; goto ldv_50048; ldv_50047: ret = vm_insert_pfn(vma, (unsigned long )i * 4096UL + base, (unsigned long )i + pfn); if (ret != 0) { goto ldv_50046; } else { } i = i + 1U; ldv_50048: ; if (view.params.partial.size > i) { goto ldv_50047; } else { } ldv_50046: obj->fault_mappable = 1U; } else if ((unsigned int )*((unsigned char *)obj + 410UL) == 0U) { __min1___0 = vma->vm_end - vma->vm_start; __min2___0 = obj->base.size; size = __min1___0 < __min2___0 ? __min1___0 : __min2___0; i___0 = 0; goto ldv_50056; ldv_50055: ret = vm_insert_pfn(vma, vma->vm_start + (unsigned long )i___0 * 4096UL, (unsigned long )i___0 + pfn); if (ret != 0) { goto ldv_50054; } else { } i___0 = i___0 + 1; ldv_50056: ; if ((unsigned long )i___0 < size >> 12) { goto ldv_50055; } else { } ldv_50054: obj->fault_mappable = 1U; } else { ret = vm_insert_pfn(vma, (unsigned long )vmf->virtual_address, pfn + page_offset___0); } unpin: i915_gem_object_ggtt_unpin_view(obj, (struct i915_ggtt_view const *)(& view)); unlock: mutex_unlock(& dev->struct_mutex); out: ; switch (ret) { case -5: tmp___1 = i915_terminally_wedged(& dev_priv->gpu_error); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { ret = 2; goto ldv_50058; } else { } case -11: ; case 0: ; case -512: ; case -4: ; case -16: ret = 256; goto ldv_50058; case -12: ret = 1; goto ldv_50058; case -28: ; case -14: ret = 2; goto ldv_50058; default: __ret_warn_once = ret != 0; tmp___5 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___5 != 0L) { __ret_warn_on = ! __warned; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 1891, "unhandled error in i915_gem_fault: %i\n", ret); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); ret = 2; goto ldv_50058; } ldv_50058: intel_runtime_pm_put(dev_priv); return (ret); } } void i915_gem_release_mmap(struct drm_i915_gem_object *obj ) { { if ((unsigned int )*((unsigned char *)obj + 410UL) == 0U) { return; } else { } drm_vma_node_unmap(& obj->base.vma_node, ((obj->base.dev)->anon_inode)->i_mapping); obj->fault_mappable = 0U; return; } } void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv ) { struct drm_i915_gem_object *obj ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; goto ldv_50085; ldv_50084: i915_gem_release_mmap(obj); __mptr___0 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; ldv_50085: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_50084; } else { } return; } } uint32_t i915_gem_get_gtt_size(struct drm_device *dev , uint32_t size , int tiling_mode ) { uint32_t gtt_size ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U || tiling_mode == 0) { return (size); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 3U) { gtt_size = 1048576U; } else { gtt_size = 524288U; } goto ldv_50106; ldv_50105: gtt_size = gtt_size << 1; ldv_50106: ; if (gtt_size < size) { goto ldv_50105; } else { } return (gtt_size); } } uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev , uint32_t size , int tiling_mode , bool fenced ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { return (4096U); } else if (! fenced) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { return (4096U); } else { goto _L; } } else _L: /* CIL Label */ if (tiling_mode == 0) { return (4096U); } else { } tmp = i915_gem_get_gtt_size(dev, size, tiling_mode); return (tmp); } } static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; int ret ; bool tmp ; { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; tmp = drm_vma_node_has_offset(& obj->base.vma_node); if ((int )tmp) { return (0); } else { } dev_priv->mm.shrinker_no_lock_stealing = 1; ret = drm_gem_create_mmap_offset(& obj->base); if (ret != -28) { goto out; } else { } i915_gem_shrink(dev_priv, (long )(obj->base.size >> 12), 7U); ret = drm_gem_create_mmap_offset(& obj->base); if (ret != -28) { goto out; } else { } i915_gem_shrink_all(dev_priv); ret = drm_gem_create_mmap_offset(& obj->base); out: dev_priv->mm.shrinker_no_lock_stealing = 0; return (ret); } } static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj ) { { drm_gem_free_mmap_offset(& obj->base); return; } } int i915_gem_mmap_gtt(struct drm_file *file , struct drm_device *dev , uint32_t handle , uint64_t *offset ) { struct drm_i915_gem_object *obj ; int ret ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; long tmp___0 ; { ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } tmp = drm_gem_object_lookup(dev, file, handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { ret = -2; goto unlock; } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { tmp___0 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_gem_mmap_gtt", "Attempting to mmap a purgeable buffer\n"); } else { } ret = -14; goto out; } else { } ret = i915_gem_object_create_mmap_offset(obj); if (ret != 0) { goto out; } else { } *offset = drm_vma_node_offset_addr(& obj->base.vma_node); out: drm_gem_object_unreference___5(& obj->base); unlock: mutex_unlock(& dev->struct_mutex); return (ret); } } int i915_gem_mmap_gtt_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_mmap_gtt *args ; int tmp ; { args = (struct drm_i915_gem_mmap_gtt *)data; tmp = i915_gem_mmap_gtt(file, dev, args->handle, & args->offset); return (tmp); } } static void i915_gem_object_truncate(struct drm_i915_gem_object *obj ) { struct inode *tmp ; { i915_gem_object_free_mmap_offset(obj); if ((unsigned long )obj->base.filp == (unsigned long )((struct file *)0)) { return; } else { } tmp = file_inode((struct file const *)obj->base.filp); shmem_truncate_range(tmp, 0LL, -1LL); obj->madv = 2U; return; } } static void i915_gem_object_invalidate(struct drm_i915_gem_object *obj ) { struct address_space *mapping ; struct inode *tmp ; { switch ((int )obj->madv) { case 1: i915_gem_object_truncate(obj); case 2: ; return; } if ((unsigned long )obj->base.filp == (unsigned long )((struct file *)0)) { return; } else { } tmp = file_inode((struct file const *)obj->base.filp); mapping = tmp->i_mapping; invalidate_mapping_pages(mapping, 0UL, 0xffffffffffffffffUL); return; } } static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj ) { struct sg_page_iter sg_iter ; int ret ; long tmp ; int __ret_warn_on ; long tmp___0 ; uint32_t tmp___1 ; bool tmp___2 ; struct page *page ; struct page *tmp___3 ; bool tmp___4 ; { tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)obj + 409UL) == 32U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (2130), "i" (12UL)); ldv_50168: ; goto ldv_50168; } else { } ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret != 0) { __ret_warn_on = ret != -5; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 2137, "WARN_ON(ret != -EIO)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); i915_gem_clflush_object(obj, 1); tmp___1 = 1U; obj->base.write_domain = tmp___1; obj->base.read_domains = tmp___1; } else { } tmp___2 = i915_gem_object_needs_bit17_swizzle(obj); if ((int )tmp___2) { i915_gem_object_save_bit_17_swizzle(obj); } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) == 16U) { obj->dirty = 0U; } else { } __sg_page_iter_start(& sg_iter, (obj->pages)->sgl, (obj->pages)->nents, 0UL); goto ldv_50173; ldv_50172: tmp___3 = sg_page_iter_page___1(& sg_iter); page = tmp___3; if ((unsigned int )*((unsigned char *)obj + 408UL) != 0U) { set_page_dirty(page); } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) == 0U) { mark_page_accessed(page); } else { } put_page(page); ldv_50173: tmp___4 = __sg_page_iter_next(& sg_iter); if ((int )tmp___4) { goto ldv_50172; } else { } obj->dirty = 0U; sg_free_table(obj->pages); kfree((void const *)obj->pages); return; } } int i915_gem_object_put_pages(struct drm_i915_gem_object *obj ) { struct drm_i915_gem_object_ops const *ops ; bool tmp ; long tmp___0 ; { ops = obj->ops; if ((unsigned long )obj->pages == (unsigned long )((struct sg_table *)0)) { return (0); } else { } if (obj->pages_pin_count != 0) { return (-16); } else { } tmp = i915_gem_obj_bound_any(obj); tmp___0 = ldv__builtin_expect((long )tmp, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (2176), "i" (12UL)); ldv_50179: ; goto ldv_50179; } else { } list_del(& obj->global_list); (*(ops->put_pages))(obj); obj->pages = (struct sg_table *)0; i915_gem_object_invalidate(obj); return (0); } } static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; int page_count___0 ; int i ; struct address_space *mapping ; struct sg_table *st ; struct scatterlist *sg ; struct sg_page_iter sg_iter ; struct page *page ; unsigned long last_pfn ; gfp_t gfp ; long tmp ; long tmp___0 ; void *tmp___1 ; int tmp___2 ; struct inode *tmp___3 ; bool tmp___4 ; bool tmp___5 ; bool tmp___6 ; unsigned long tmp___7 ; int __ret_warn_on ; long tmp___8 ; unsigned long tmp___9 ; bool tmp___10 ; struct page *tmp___11 ; bool tmp___12 ; long tmp___13 ; long tmp___14 ; { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; last_pfn = 0UL; tmp = ldv__builtin_expect((obj->base.read_domains & 62U) != 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (2208), "i" (12UL)); ldv_50193: ; goto ldv_50193; } else { } tmp___0 = ldv__builtin_expect((obj->base.write_domain & 62U) != 0U, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (2209), "i" (12UL)); ldv_50194: ; goto ldv_50194; } else { } tmp___1 = kmalloc(16UL, 208U); st = (struct sg_table *)tmp___1; if ((unsigned long )st == (unsigned long )((struct sg_table *)0)) { return (-12); } else { } page_count___0 = (int )(obj->base.size / 4096UL); tmp___2 = sg_alloc_table(st, (unsigned int )page_count___0, 208U); if (tmp___2 != 0) { kfree((void const *)st); return (-12); } else { } tmp___3 = file_inode((struct file const *)obj->base.filp); mapping = tmp___3->i_mapping; gfp = mapping_gfp_mask(mapping); gfp = gfp | 4198912U; gfp = gfp & 4294967215U; sg = st->sgl; st->nents = 0U; i = 0; goto ldv_50200; ldv_50199: page = shmem_read_mapping_page_gfp(mapping, (unsigned long )i, gfp); tmp___4 = IS_ERR((void const *)page); if ((int )tmp___4) { i915_gem_shrink(dev_priv, (long )page_count___0, 7U); page = shmem_read_mapping_page_gfp(mapping, (unsigned long )i, gfp); } else { } tmp___6 = IS_ERR((void const *)page); if ((int )tmp___6) { i915_gem_shrink_all(dev_priv); page = shmem_read_mapping_page(mapping, (unsigned long )i); tmp___5 = IS_ERR((void const *)page); if ((int )tmp___5) { goto err_pages; } else { } } else { } tmp___7 = swiotlb_nr_tbl(); if (tmp___7 != 0UL) { st->nents = st->nents + 1U; sg_set_page___1(sg, page, 4096U, 0U); sg = sg_next(sg); goto ldv_50196; } else { } if (i == 0 || (unsigned long )(((long )page + 24189255811072L) / 64L) != last_pfn + 1UL) { if (i != 0) { sg = sg_next(sg); } else { } st->nents = st->nents + 1U; sg_set_page___1(sg, page, 4096U, 0U); } else { sg->length = sg->length + 4096U; } last_pfn = (unsigned long )(((long )page + 24189255811072L) / 64L); __ret_warn_on = (gfp & 4U) != 0U && last_pfn > 1048575UL; tmp___8 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 2271, "WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ldv_50196: i = i + 1; ldv_50200: ; if (i < page_count___0) { goto ldv_50199; } else { } tmp___9 = swiotlb_nr_tbl(); if (tmp___9 == 0UL) { sg_mark_end(sg); } else { } obj->pages = st; tmp___10 = i915_gem_object_needs_bit17_swizzle(obj); if ((int )tmp___10) { i915_gem_object_do_bit_17_swizzle(obj); } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U && (dev_priv->quirks & 32UL) != 0UL) { i915_gem_object_pin_pages___1(obj); } else { } return (0); err_pages: sg_mark_end(sg); __sg_page_iter_start(& sg_iter, st->sgl, st->nents, 0UL); goto ldv_50203; ldv_50202: tmp___11 = sg_page_iter_page___1(& sg_iter); put_page(tmp___11); ldv_50203: tmp___12 = __sg_page_iter_next(& sg_iter); if ((int )tmp___12) { goto ldv_50202; } else { } sg_free_table(st); kfree((void const *)st); tmp___14 = PTR_ERR((void const *)page); if (tmp___14 == -28L) { return (-12); } else { tmp___13 = PTR_ERR((void const *)page); return ((int )tmp___13); } } } int i915_gem_object_get_pages(struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_object_ops const *ops ; int ret ; long tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; ops = obj->ops; if ((unsigned long )obj->pages != (unsigned long )((struct sg_table *)0)) { return (0); } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_gem_object_get_pages", "Attempting to obtain a purgeable object\n"); } else { } return (-14); } else { } tmp___0 = ldv__builtin_expect(obj->pages_pin_count != 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (2331), "i" (12UL)); ldv_50212: ; goto ldv_50212; } else { } ret = (*(ops->get_pages))(obj); if (ret != 0) { return (ret); } else { } list_add_tail(& obj->global_list, & dev_priv->mm.unbound_list); obj->get_page.sg = (obj->pages)->sgl; obj->get_page.last = 0; return (0); } } void i915_vma_move_to_active(struct i915_vma *vma , struct intel_engine_cs *ring ) { struct drm_i915_gem_object *obj ; unsigned int tmp ; struct drm_i915_gem_request *tmp___0 ; { obj = vma->obj; if ((unsigned int )*((unsigned char *)obj + 408UL) == 0U) { drm_gem_object_reference___2(& obj->base); } else { } tmp = intel_ring_flag(ring); obj->active = (unsigned char )((int )obj->active | (int )((unsigned char )tmp)); list_move_tail((struct list_head *)(& obj->ring_list) + (unsigned long )ring->id, & ring->active_list); tmp___0 = intel_ring_get_request___0(ring); i915_gem_request_assign___0((struct drm_i915_gem_request **)(& obj->last_read_req) + (unsigned long )ring->id, tmp___0); list_move_tail(& vma->mm_list, & (vma->vm)->active_list); return; } } static void i915_gem_object_retire__write(struct drm_i915_gem_object *obj ) { { i915_gem_request_assign___0(& obj->last_write_req, (struct drm_i915_gem_request *)0); intel_fb_obj_flush(obj, 1); return; } } static void i915_gem_object_retire__read(struct drm_i915_gem_object *obj , int ring ) { struct i915_vma *vma ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; { list_del_init((struct list_head *)(& obj->ring_list) + (unsigned long )ring); i915_gem_request_assign___0((struct drm_i915_gem_request **)(& obj->last_read_req) + (unsigned long )ring, (struct drm_i915_gem_request *)0); if ((unsigned long )obj->last_write_req != (unsigned long )((struct drm_i915_gem_request *)0) && (unsigned int )((obj->last_write_req)->ring)->id == (unsigned int )ring) { i915_gem_object_retire__write(obj); } else { } obj->active = (unsigned char )((int )((signed char )obj->active) & ~ ((int )((signed char )(1 << ring)))); if ((unsigned int )*((unsigned char *)obj + 408UL) != 0U) { return; } else { } __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_50231; ldv_50230: tmp = list_empty((struct list_head const *)(& vma->mm_list)); if (tmp == 0) { list_move_tail(& vma->mm_list, & (vma->vm)->inactive_list); } else { } __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_50231: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_50230; } else { } i915_gem_request_assign___0(& obj->last_fenced_req, (struct drm_i915_gem_request *)0); drm_gem_object_unreference___5(& obj->base); return; } } static int i915_gem_init_seqno(struct drm_device *dev , u32 seqno ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int ret ; int i ; int j ; bool tmp ; bool tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_50243; ldv_50242: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { ret = intel_ring_idle(ring); if (ret != 0) { return (ret); } else { } } else { } i = i + 1; ldv_50243: ; if (i <= 4) { goto ldv_50242; } else { } i915_gem_retire_requests(dev); i = 0; goto ldv_50251; ldv_50250: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(ring); if ((int )tmp___0) { intel_ring_init_seqno(ring, seqno); j = 0; goto ldv_50248; ldv_50247: ring->semaphore.sync_seqno[j] = 0U; j = j + 1; ldv_50248: ; if ((unsigned int )j <= 3U) { goto ldv_50247; } else { } } else { } i = i + 1; ldv_50251: ; if (i <= 4) { goto ldv_50250; } else { } return (0); } } int i915_gem_set_seqno(struct drm_device *dev , u32 seqno ) { struct drm_i915_private *dev_priv ; int ret ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (seqno == 0U) { return (-22); } else { } ret = i915_gem_init_seqno(dev, seqno - 1U); if (ret != 0) { return (ret); } else { } dev_priv->next_seqno = seqno; dev_priv->last_seqno = seqno - 1U; if (dev_priv->last_seqno == 0U) { dev_priv->last_seqno = dev_priv->last_seqno - 1U; } else { } return (0); } } int i915_gem_get_seqno(struct drm_device *dev , u32 *seqno ) { struct drm_i915_private *dev_priv ; int ret ; int tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (dev_priv->next_seqno == 0U) { tmp = i915_gem_init_seqno(dev, 0U); ret = tmp; if (ret != 0) { return (ret); } else { } dev_priv->next_seqno = 1U; } else { } tmp___1 = dev_priv->next_seqno; dev_priv->next_seqno = dev_priv->next_seqno + 1U; tmp___0 = tmp___1; dev_priv->last_seqno = tmp___0; *seqno = tmp___0; return (0); } } int __i915_add_request(struct intel_engine_cs *ring , struct drm_file *file , struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_request *request ; struct intel_ringbuffer *ringbuf ; u32 request_start ; int ret ; int __ret_warn_on ; long tmp ; long tmp___0 ; struct drm_i915_file_private *file_priv ; struct task_struct *tmp___1 ; struct pid *tmp___2 ; unsigned long tmp___3 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; request = ring->outstanding_lazy_request; __ret_warn_on = (unsigned long )request == (unsigned long )((struct drm_i915_gem_request *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 2480, "WARN_ON(request == NULL)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-12); } else { } if (i915.enable_execlists != 0) { ringbuf = (request->ctx)->engine[(unsigned int )ring->id].ringbuf; } else { ringbuf = ring->buffer; } request_start = intel_ring_get_tail(ringbuf); if (i915.enable_execlists != 0) { ret = logical_ring_flush_all_caches(ringbuf, request->ctx); if (ret != 0) { return (ret); } else { } } else { ret = intel_ring_flush_all_caches(ring); if (ret != 0) { return (ret); } else { } } request->postfix = intel_ring_get_tail(ringbuf); if (i915.enable_execlists != 0) { ret = (*(ring->emit_request))(ringbuf, request); if (ret != 0) { return (ret); } else { } } else { ret = (*(ring->add_request))(ring); if (ret != 0) { return (ret); } else { } request->tail = intel_ring_get_tail(ringbuf); } request->head = request_start; request->batch_obj = obj; if (i915.enable_execlists == 0) { request->ctx = ring->last_context; if ((unsigned long )request->ctx != (unsigned long )((struct intel_context *)0)) { i915_gem_context_reference___1(request->ctx); } else { } } else { } request->emitted_jiffies = jiffies; list_add_tail(& request->list, & ring->request_list); request->file_priv = (struct drm_i915_file_private *)0; if ((unsigned long )file != (unsigned long )((struct drm_file *)0)) { file_priv = (struct drm_i915_file_private *)file->driver_priv; spin_lock(& file_priv->mm.lock); request->file_priv = file_priv; list_add_tail(& request->client_list, & file_priv->mm.request_list); spin_unlock(& file_priv->mm.lock); tmp___1 = get_current(); tmp___2 = task_pid(tmp___1); request->pid = get_pid(tmp___2); } else { } trace_i915_gem_request_add(request); ring->outstanding_lazy_request = (struct drm_i915_gem_request *)0; i915_queue_hangcheck(ring->dev); tmp___3 = round_jiffies_up_relative(250UL); queue_delayed_work___0(dev_priv->wq, & dev_priv->mm.retire_work, tmp___3); intel_mark_busy(dev_priv->dev); return (0); } } static bool i915_context_is_banned(struct drm_i915_private *dev_priv , struct intel_context const *ctx ) { unsigned long elapsed ; unsigned long tmp ; long tmp___0 ; bool tmp___1 ; bool tmp___2 ; bool tmp___3 ; int tmp___4 ; { tmp = get_seconds(); elapsed = tmp - (unsigned long )ctx->hang_stats.guilty_ts; if ((int )ctx->hang_stats.banned) { return (1); } else { } if ((unsigned long )ctx->hang_stats.ban_period_seconds != 0UL && (unsigned long )ctx->hang_stats.ban_period_seconds >= elapsed) { tmp___3 = i915_gem_context_is_default(ctx); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { tmp___0 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_context_is_banned", "context hanging too fast, banning!\n"); } else { } return (1); } else { tmp___2 = i915_stop_ring_allow_ban(dev_priv); if ((int )tmp___2) { tmp___1 = i915_stop_ring_allow_warn(dev_priv); if ((int )tmp___1) { drm_err("gpu hanging too fast, banning!\n"); } else { } return (1); } else { } } } else { } return (0); } } static void i915_set_reset_status(struct drm_i915_private *dev_priv , struct intel_context *ctx , bool const guilty ) { struct i915_ctx_hang_stats *hs ; int __ret_warn_on ; long tmp ; long tmp___0 ; { __ret_warn_on = (unsigned long )ctx == (unsigned long )((struct intel_context *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 2604, "WARN_ON(!ctx)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } hs = & ctx->hang_stats; if ((int )guilty) { hs->banned = i915_context_is_banned(dev_priv, (struct intel_context const *)ctx); hs->batch_active = hs->batch_active + 1U; hs->guilty_ts = get_seconds(); } else { hs->batch_pending = hs->batch_pending + 1U; } return; } } void i915_gem_request_free(struct kref *req_ref ) { struct drm_i915_gem_request *req ; struct kref const *__mptr ; struct intel_context *ctx ; struct intel_engine_cs *ring ; { __mptr = (struct kref const *)req_ref; req = (struct drm_i915_gem_request *)__mptr; ctx = req->ctx; if ((unsigned long )ctx != (unsigned long )((struct intel_context *)0)) { if (i915.enable_execlists != 0) { ring = req->ring; if ((unsigned long )ring->default_context != (unsigned long )ctx) { intel_lr_context_unpin(ring, ctx); } else { } } else { } i915_gem_context_unreference___1(ctx); } else { } kmem_cache_free((req->i915)->requests, (void *)req); return; } } int i915_gem_request_alloc(struct intel_engine_cs *ring , struct intel_context *ctx ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct drm_i915_gem_request *req ; int ret ; void *tmp___0 ; { tmp = to_i915((struct drm_device const *)ring->dev); dev_priv = tmp; if ((unsigned long )ring->outstanding_lazy_request != (unsigned long )((struct drm_i915_gem_request *)0)) { return (0); } else { } tmp___0 = kmem_cache_zalloc(dev_priv->requests, 208U); req = (struct drm_i915_gem_request *)tmp___0; if ((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0)) { return (-12); } else { } kref_init(& req->ref); req->i915 = dev_priv; ret = i915_gem_get_seqno(ring->dev, & req->seqno); if (ret != 0) { goto err; } else { } req->ring = ring; if (i915.enable_execlists != 0) { ret = intel_logical_ring_alloc_request_extras(req, ctx); } else { ret = intel_ring_alloc_request_extras(req); } if (ret != 0) { goto err; } else { } ring->outstanding_lazy_request = req; return (0); err: kmem_cache_free(dev_priv->requests, (void *)req); return (ret); } } struct drm_i915_gem_request *i915_gem_find_active_request(struct intel_engine_cs *ring ) { struct drm_i915_gem_request *request ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)ring->request_list.next; request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL; goto ldv_50318; ldv_50317: tmp = i915_gem_request_completed___1(request, 0); if ((int )tmp) { goto ldv_50316; } else { } return (request); ldv_50316: __mptr___0 = (struct list_head const *)request->list.next; request = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffffb8UL; ldv_50318: ; if ((unsigned long )(& request->list) != (unsigned long )(& ring->request_list)) { goto ldv_50317; } else { } return ((struct drm_i915_gem_request *)0); } } static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv , struct intel_engine_cs *ring ) { struct drm_i915_gem_request *request ; bool ring_hung ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { request = i915_gem_find_active_request(ring); if ((unsigned long )request == (unsigned long )((struct drm_i915_gem_request *)0)) { return; } else { } ring_hung = ring->hangcheck.score > 30; i915_set_reset_status(dev_priv, request->ctx, (int )ring_hung); __mptr = (struct list_head const *)request->list.next; request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL; goto ldv_50331; ldv_50330: i915_set_reset_status(dev_priv, request->ctx, 0); __mptr___0 = (struct list_head const *)request->list.next; request = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffffb8UL; ldv_50331: ; if ((unsigned long )(& request->list) != (unsigned long )(& ring->request_list)) { goto ldv_50330; } else { } return; } } static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv , struct intel_engine_cs *ring ) { struct drm_i915_gem_object *obj ; struct list_head const *__mptr ; int tmp ; struct drm_i915_gem_request *submit_req ; struct list_head const *__mptr___0 ; int tmp___0 ; struct drm_i915_gem_request *request ; struct list_head const *__mptr___1 ; int tmp___1 ; { goto ldv_50341; ldv_50340: __mptr = (struct list_head const *)ring->active_list.next; obj = (struct drm_i915_gem_object *)__mptr + - ((unsigned long )ring->id * 16UL + 296UL); i915_gem_object_retire__read(obj, (int )ring->id); ldv_50341: tmp = list_empty((struct list_head const *)(& ring->active_list)); if (tmp == 0) { goto ldv_50340; } else { } goto ldv_50347; ldv_50346: __mptr___0 = (struct list_head const *)ring->execlist_queue.next; submit_req = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffff88UL; list_del(& submit_req->execlist_link); if ((unsigned long )submit_req->ctx != (unsigned long )ring->default_context) { intel_lr_context_unpin(ring, submit_req->ctx); } else { } i915_gem_request_unreference___0(submit_req); ldv_50347: tmp___0 = list_empty((struct list_head const *)(& ring->execlist_queue)); if (tmp___0 == 0) { goto ldv_50346; } else { } goto ldv_50353; ldv_50352: __mptr___1 = (struct list_head const *)ring->request_list.next; request = (struct drm_i915_gem_request *)__mptr___1 + 0xffffffffffffffb8UL; i915_gem_request_retire(request); ldv_50353: tmp___1 = list_empty((struct list_head const *)(& ring->request_list)); if (tmp___1 == 0) { goto ldv_50352; } else { } i915_gem_request_assign___0(& ring->outstanding_lazy_request, (struct drm_i915_gem_request *)0); return; } } void i915_gem_restore_fences(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int i ; struct drm_i915_fence_reg *reg ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_50362; ldv_50361: reg = (struct drm_i915_fence_reg *)(& dev_priv->fence_regs) + (unsigned long )i; if ((unsigned long )reg->obj != (unsigned long )((struct drm_i915_gem_object *)0)) { i915_gem_object_update_fence(reg->obj, reg, (unsigned int )*((unsigned char *)reg->obj + 409UL) != 0U); } else { i915_gem_write_fence(dev, i, (struct drm_i915_gem_object *)0); } i = i + 1; ldv_50362: ; if (dev_priv->num_fence_regs > i) { goto ldv_50361; } else { } return; } } void i915_gem_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int i ; bool tmp ; bool tmp___0 ; int __ret_warn_on ; long tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_50371; ldv_50370: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { i915_gem_reset_ring_status(dev_priv, ring); } else { } i = i + 1; ldv_50371: ; if (i <= 4) { goto ldv_50370; } else { } i = 0; goto ldv_50374; ldv_50373: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(ring); if ((int )tmp___0) { i915_gem_reset_ring_cleanup(dev_priv, ring); } else { } i = i + 1; ldv_50374: ; if (i <= 4) { goto ldv_50373; } else { } i915_gem_context_reset(dev); i915_gem_restore_fences(dev); __ret_warn_on = 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 2805, "WARN_ON(i915_verify_lists(dev))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } void i915_gem_retire_requests_ring(struct intel_engine_cs *ring ) { int __ret_warn_on ; long tmp ; struct drm_i915_gem_request *request ; struct list_head const *__mptr ; bool tmp___0 ; int tmp___1 ; int tmp___2 ; struct drm_i915_gem_object *obj ; struct list_head const *__mptr___0 ; int tmp___3 ; int tmp___4 ; bool tmp___5 ; int tmp___6 ; long tmp___7 ; int __ret_warn_on___0 ; long tmp___8 ; { __ret_warn_on = 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 2814, "WARN_ON(i915_verify_lists(ring->dev))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); goto ldv_50388; ldv_50387: __mptr = (struct list_head const *)ring->request_list.next; request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL; tmp___0 = i915_gem_request_completed___1(request, 1); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto ldv_50386; } else { } i915_gem_request_retire(request); ldv_50388: tmp___2 = list_empty((struct list_head const *)(& ring->request_list)); if (tmp___2 == 0) { goto ldv_50387; } else { } ldv_50386: ; goto ldv_50394; ldv_50393: __mptr___0 = (struct list_head const *)ring->active_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + - ((unsigned long )ring->id * 16UL + 296UL); tmp___3 = list_empty((struct list_head const *)(& (obj->last_read_req[(unsigned int )ring->id])->list)); if (tmp___3 == 0) { goto ldv_50392; } else { } i915_gem_object_retire__read(obj, (int )ring->id); ldv_50394: tmp___4 = list_empty((struct list_head const *)(& ring->active_list)); if (tmp___4 == 0) { goto ldv_50393; } else { } ldv_50392: ; if ((unsigned long )ring->trace_irq_req != (unsigned long )((struct drm_i915_gem_request *)0)) { tmp___5 = i915_gem_request_completed___1(ring->trace_irq_req, 1); if ((int )tmp___5) { tmp___6 = 1; } else { tmp___6 = 0; } } else { tmp___6 = 0; } tmp___7 = ldv__builtin_expect((long )tmp___6, 0L); if (tmp___7 != 0L) { (*(ring->irq_put))(ring); i915_gem_request_assign___0(& ring->trace_irq_req, (struct drm_i915_gem_request *)0); } else { } __ret_warn_on___0 = 0; tmp___8 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 2857, "WARN_ON(i915_verify_lists(ring->dev))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); return; } } bool i915_gem_retire_requests(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; bool idle ; int i ; int tmp ; unsigned long flags ; raw_spinlock_t *tmp___0 ; int tmp___1 ; bool tmp___2 ; unsigned long tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; idle = 1; i = 0; goto ldv_50409; ldv_50408: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___2 = intel_ring_initialized(ring); if ((int )tmp___2) { i915_gem_retire_requests_ring(ring); tmp = list_empty((struct list_head const *)(& ring->request_list)); idle = ((int )idle & tmp) != 0; if (i915.enable_execlists != 0) { tmp___0 = spinlock_check(& ring->execlist_lock); flags = _raw_spin_lock_irqsave(tmp___0); tmp___1 = list_empty((struct list_head const *)(& ring->execlist_queue)); idle = ((int )idle & tmp___1) != 0; spin_unlock_irqrestore(& ring->execlist_lock, flags); intel_execlists_retire_requests(ring); } else { } } else { } i = i + 1; ldv_50409: ; if (i <= 4) { goto ldv_50408; } else { } if ((int )idle) { tmp___3 = msecs_to_jiffies(100U); mod_delayed_work(dev_priv->wq, & dev_priv->mm.idle_work, tmp___3); } else { } return (idle); } } static void i915_gem_retire_work_handler(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; struct drm_device *dev ; bool idle ; int tmp ; unsigned long tmp___0 ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff5420UL; dev = dev_priv->dev; idle = 0; tmp = mutex_trylock(& dev->struct_mutex); if (tmp != 0) { idle = i915_gem_retire_requests(dev); mutex_unlock(& dev->struct_mutex); } else { } if (! idle) { tmp___0 = round_jiffies_up_relative(250UL); queue_delayed_work___0(dev_priv->wq, & dev_priv->mm.retire_work, tmp___0); } else { } return; } } static void i915_gem_idle_work_handler(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; struct drm_device *dev ; struct intel_engine_cs *ring ; int i ; int tmp ; bool tmp___0 ; struct intel_engine_cs *ring___0 ; int i___0 ; bool tmp___1 ; int tmp___2 ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff5340UL; dev = dev_priv->dev; i = 0; goto ldv_50429; ldv_50428: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(ring); if ((int )tmp___0) { tmp = list_empty((struct list_head const *)(& ring->request_list)); if (tmp == 0) { return; } else { } } else { } i = i + 1; ldv_50429: ; if (i <= 4) { goto ldv_50428; } else { } intel_mark_idle(dev); tmp___2 = mutex_trylock(& dev->struct_mutex); if (tmp___2 != 0) { i___0 = 0; goto ldv_50434; ldv_50433: ring___0 = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i___0; tmp___1 = intel_ring_initialized(ring___0); if ((int )tmp___1) { i915_gem_batch_pool_fini(& ring___0->batch_pool); } else { } i___0 = i___0 + 1; ldv_50434: ; if (i___0 <= 4) { goto ldv_50433; } else { } mutex_unlock(& dev->struct_mutex); } else { } return; } } static int i915_gem_object_flush_active(struct drm_i915_gem_object *obj ) { int ret ; int i ; struct drm_i915_gem_request *req ; int tmp ; bool tmp___0 ; { if ((unsigned int )*((unsigned char *)obj + 408UL) == 0U) { return (0); } else { } i = 0; goto ldv_50445; ldv_50444: req = obj->last_read_req[i]; if ((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0)) { goto ldv_50442; } else { } tmp = list_empty((struct list_head const *)(& req->list)); if (tmp != 0) { goto retire; } else { } ret = i915_gem_check_olr(req); if (ret != 0) { return (ret); } else { } tmp___0 = i915_gem_request_completed___1(req, 1); if ((int )tmp___0) { __i915_gem_request_retire__upto(req); retire: i915_gem_object_retire__read(obj, i); } else { } ldv_50442: i = i + 1; ldv_50445: ; if (i <= 4) { goto ldv_50444; } else { } return (0); } } int i915_gem_wait_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_wait *args ; struct drm_i915_gem_object *obj ; struct drm_i915_gem_request *req[5U] ; unsigned int reset_counter ; int i ; int n ; int ret ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; int tmp___0 ; int tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; args = (struct drm_i915_gem_wait *)data; n = 0; if (args->flags != 0U) { return (-22); } else { } ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } tmp = drm_gem_object_lookup(dev, file, args->bo_handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { mutex_unlock(& dev->struct_mutex); return (-2); } else { } ret = i915_gem_object_flush_active(obj); if (ret != 0) { goto out; } else { } if ((unsigned int )*((unsigned char *)obj + 408UL) == 0U) { goto out; } else { } if (args->timeout_ns == 0LL) { ret = -62; goto out; } else { } drm_gem_object_unreference___5(& obj->base); tmp___0 = atomic_read((atomic_t const *)(& dev_priv->gpu_error.reset_counter)); reset_counter = (unsigned int )tmp___0; i = 0; goto ldv_50465; ldv_50464: ; if ((unsigned long )obj->last_read_req[i] == (unsigned long )((struct drm_i915_gem_request *)0)) { goto ldv_50463; } else { } tmp___1 = n; n = n + 1; req[tmp___1] = i915_gem_request_reference___1(obj->last_read_req[i]); ldv_50463: i = i + 1; ldv_50465: ; if (i <= 4) { goto ldv_50464; } else { } mutex_unlock(& dev->struct_mutex); i = 0; goto ldv_50468; ldv_50467: ; if (ret == 0) { ret = __i915_wait_request(req[i], reset_counter, 1, args->timeout_ns > 0LL ? & args->timeout_ns : (s64 *)0LL, (struct intel_rps_client *)file->driver_priv); } else { } i915_gem_request_unreference__unlocked___0(req[i]); i = i + 1; ldv_50468: ; if (i < n) { goto ldv_50467; } else { } return (ret); out: drm_gem_object_unreference___5(& obj->base); mutex_unlock(& dev->struct_mutex); return (ret); } } static int __i915_gem_object_sync(struct drm_i915_gem_object *obj , struct intel_engine_cs *to , struct drm_i915_gem_request *req ) { struct intel_engine_cs *from ; int ret ; bool tmp ; struct drm_i915_private *i915___0 ; struct drm_i915_private *tmp___0 ; int tmp___1 ; int idx ; u32 tmp___2 ; u32 seqno ; uint32_t tmp___3 ; bool tmp___4 ; int tmp___5 ; { from = i915_gem_request_get_ring(req); if ((unsigned long )to == (unsigned long )from) { return (0); } else { } tmp = i915_gem_request_completed___1(req, 1); if ((int )tmp) { return (0); } else { } ret = i915_gem_check_olr(req); if (ret != 0) { return (ret); } else { } tmp___4 = i915_semaphore_is_enabled(obj->base.dev); if (tmp___4) { tmp___5 = 0; } else { tmp___5 = 1; } if (tmp___5) { tmp___0 = to_i915((struct drm_device const *)obj->base.dev); i915___0 = tmp___0; tmp___1 = atomic_read((atomic_t const *)(& i915___0->gpu_error.reset_counter)); ret = __i915_wait_request(req, (unsigned int )tmp___1, (int )i915___0->mm.interruptible, (s64 *)0LL, & i915___0->rps.semaphores); if (ret != 0) { return (ret); } else { } i915_gem_object_retire_request(obj, req); } else { tmp___2 = intel_ring_sync_index(from, to); idx = (int )tmp___2; tmp___3 = i915_gem_request_get_seqno(req); seqno = tmp___3; if (from->semaphore.sync_seqno[idx] >= seqno) { return (0); } else { } trace_i915_gem_ring_sync_to(from, to, req); ret = (*(to->semaphore.sync_to))(to, from, seqno); if (ret != 0) { return (ret); } else { } from->semaphore.sync_seqno[idx] = i915_gem_request_get_seqno(obj->last_read_req[(unsigned int )from->id]); } return (0); } } int i915_gem_object_sync(struct drm_i915_gem_object *obj , struct intel_engine_cs *to ) { bool readonly ; struct drm_i915_gem_request *req[5U] ; int ret ; int i ; int n ; int tmp ; int tmp___0 ; int tmp___1 ; { readonly = obj->base.pending_write_domain == 0U; if ((unsigned int )*((unsigned char *)obj + 408UL) == 0U) { return (0); } else { } if ((unsigned long )to == (unsigned long )((struct intel_engine_cs *)0)) { tmp = i915_gem_object_wait_rendering(obj, (int )readonly); return (tmp); } else { } n = 0; if ((int )readonly) { if ((unsigned long )obj->last_write_req != (unsigned long )((struct drm_i915_gem_request *)0)) { tmp___0 = n; n = n + 1; req[tmp___0] = obj->last_write_req; } else { } } else { i = 0; goto ldv_50490; ldv_50489: ; if ((unsigned long )obj->last_read_req[i] != (unsigned long )((struct drm_i915_gem_request *)0)) { tmp___1 = n; n = n + 1; req[tmp___1] = obj->last_read_req[i]; } else { } i = i + 1; ldv_50490: ; if (i <= 4) { goto ldv_50489; } else { } } i = 0; goto ldv_50493; ldv_50492: ret = __i915_gem_object_sync(obj, to, req[i]); if (ret != 0) { return (ret); } else { } i = i + 1; ldv_50493: ; if (i < n) { goto ldv_50492; } else { } return (0); } } static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj ) { u32 old_write_domain ; u32 old_read_domains ; { i915_gem_release_mmap(obj); if ((obj->base.read_domains & 64U) == 0U) { return; } else { } __asm__ volatile ("mfence": : : "memory"); old_read_domains = obj->base.read_domains; old_write_domain = obj->base.write_domain; obj->base.read_domains = obj->base.read_domains & 4294967231U; obj->base.write_domain = obj->base.write_domain & 4294967231U; trace_i915_gem_object_change_domain___0(obj, old_read_domains, old_write_domain); return; } } int i915_vma_unbind(struct i915_vma *vma ) { struct drm_i915_gem_object *obj ; struct drm_i915_private *dev_priv ; int ret ; int tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; bool tmp___4 ; int tmp___5 ; { obj = vma->obj; dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; tmp = list_empty((struct list_head const *)(& vma->vma_link)); if (tmp != 0) { return (0); } else { } tmp___0 = drm_mm_node_allocated(& vma->node); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { i915_gem_vma_destroy(vma); return (0); } else { } if ((unsigned int )*((unsigned char *)vma + 232UL) != 0U) { return (-16); } else { } tmp___2 = ldv__builtin_expect((unsigned long )obj->pages == (unsigned long )((struct sg_table *)0), 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (3209), "i" (12UL)); ldv_50506: ; goto ldv_50506; } else { } ret = i915_gem_object_wait_rendering(obj, 0); if (ret != 0) { return (ret); } else { } tmp___3 = i915_is_ggtt(vma->vm); if ((int )tmp___3 && (unsigned int )vma->ggtt_view.type == 0U) { i915_gem_object_finish_gtt(obj); ret = i915_gem_object_put_fence(obj); if (ret != 0) { return (ret); } else { } } else { } trace_i915_vma_unbind(vma); (*((vma->vm)->unbind_vma))(vma); vma->bound = 0U; list_del_init(& vma->mm_list); tmp___4 = i915_is_ggtt(vma->vm); if ((int )tmp___4) { if ((unsigned int )vma->ggtt_view.type == 0U) { obj->map_and_fenceable = 0U; } else if ((unsigned long )vma->ggtt_view.pages != (unsigned long )((struct sg_table *)0)) { sg_free_table(vma->ggtt_view.pages); kfree((void const *)vma->ggtt_view.pages); } else { } vma->ggtt_view.pages = (struct sg_table *)0; } else { } drm_mm_remove_node(& vma->node); i915_gem_vma_destroy(vma); tmp___5 = list_empty((struct list_head const *)(& obj->vma_list)); if (tmp___5 != 0) { i915_gem_gtt_finish_object(obj); list_move_tail(& obj->global_list, & dev_priv->mm.unbound_list); } else { } i915_gem_object_unpin_pages___2(obj); return (0); } } int i915_gpu_idle(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int ret ; int i ; bool tmp ; int __ret_warn_on ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_50515; ldv_50514: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { if (i915.enable_execlists == 0) { ret = i915_switch_context(ring, ring->default_context); if (ret != 0) { return (ret); } else { } } else { } ret = intel_ring_idle(ring); if (ret != 0) { return (ret); } else { } } else { } i = i + 1; ldv_50515: ; if (i <= 4) { goto ldv_50514; } else { } __ret_warn_on = 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 3283, "WARN_ON(i915_verify_lists(dev))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (0); } } static void i965_write_fence_reg(struct drm_device *dev , int reg , struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; int fence_reg ; int fence_pitch_shift ; struct drm_i915_private *__p ; u32 size ; unsigned long tmp ; uint64_t val ; uint32_t row_size ; unsigned long tmp___0 ; unsigned long tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { fence_reg = 1048576; fence_pitch_shift = 32; } else { fence_reg = 12288; fence_pitch_shift = 2; } fence_reg = reg * 8 + fence_reg; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )fence_reg, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )fence_reg, 0); if ((unsigned long )obj != (unsigned long )((struct drm_i915_gem_object *)0)) { tmp = i915_gem_obj_ggtt_size(obj); size = (u32 )tmp; if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { row_size = obj->stride * ((unsigned int )*((unsigned char *)obj + 409UL) == 128U ? 32U : 8U); size = (size / row_size) * row_size; } else { } tmp___0 = i915_gem_obj_ggtt_offset(obj); val = ((unsigned long long )((tmp___0 + (unsigned long )size) - 4096UL) & 4294963200ULL) << 32; tmp___1 = i915_gem_obj_ggtt_offset(obj); val = ((unsigned long long )tmp___1 & 4294963200ULL) | val; val = ((unsigned long long )(obj->stride / 128U - 1U) << fence_pitch_shift) | val; if ((unsigned int )*((unsigned char *)obj + 409UL) == 128U) { val = val | 2ULL; } else { } val = val | 1ULL; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(fence_reg + 4), (uint32_t )(val >> 32), 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(fence_reg + 4), 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )fence_reg, (uint32_t )val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )fence_reg, 0); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(fence_reg + 4), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(fence_reg + 4), 0); } return; } } static void i915_write_fence_reg(struct drm_device *dev , int reg , struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; u32 val ; u32 size ; unsigned long tmp ; int pitch_val ; int tile_width ; int __ret_warn_on ; unsigned long tmp___0 ; unsigned long tmp___1 ; int tmp___2 ; unsigned long tmp___3 ; long tmp___4 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int tmp___5 ; unsigned long tmp___6 ; int tmp___7 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )obj != (unsigned long )((struct drm_i915_gem_object *)0)) { tmp = i915_gem_obj_ggtt_size(obj); size = (u32 )tmp; tmp___0 = i915_gem_obj_ggtt_offset(obj); if ((tmp___0 & 0xfffffffff00fffffUL) != 0UL || (- size & size) != size) { tmp___2 = 1; } else { tmp___1 = i915_gem_obj_ggtt_offset(obj); if ((tmp___1 & (unsigned long )(size - 1U)) != 0UL) { tmp___2 = 1; } else { tmp___2 = 0; } } __ret_warn_on = tmp___2; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { tmp___3 = i915_gem_obj_ggtt_offset(obj); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 3361, "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", tmp___3, (int )obj->map_and_fenceable, size); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned int )*((unsigned char *)obj + 409UL) == 128U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) != 9618U) { tile_width = 128; } else { tile_width = 512; } } else { tile_width = 512; } } else { tile_width = 512; } } else { tile_width = 512; } pitch_val = (int )(obj->stride / (uint32_t )tile_width); tmp___5 = ffs(pitch_val); pitch_val = tmp___5 + -1; tmp___6 = i915_gem_obj_ggtt_offset(obj); val = (u32 )tmp___6; if ((unsigned int )*((unsigned char *)obj + 409UL) == 128U) { val = val | 4096U; } else { } tmp___7 = ffs((int )(size >> 20)); val = (u32 )((tmp___7 + -1) << 8) | val; val = (u32 )(pitch_val << 4) | val; val = val | 1U; } else { val = 0U; } if (reg <= 7) { reg = (reg + 2048) * 4; } else { reg = (reg + 3064) * 4; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } } static void i830_write_fence_reg(struct drm_device *dev , int reg , struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; uint32_t val ; u32 size ; unsigned long tmp ; uint32_t pitch_val ; int __ret_warn_on ; unsigned long tmp___0 ; unsigned long tmp___1 ; int tmp___2 ; unsigned long tmp___3 ; long tmp___4 ; int tmp___5 ; unsigned long tmp___6 ; int tmp___7 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )obj != (unsigned long )((struct drm_i915_gem_object *)0)) { tmp = i915_gem_obj_ggtt_size(obj); size = (u32 )tmp; tmp___0 = i915_gem_obj_ggtt_offset(obj); if ((tmp___0 & 0xfffffffff807ffffUL) != 0UL || (- size & size) != size) { tmp___2 = 1; } else { tmp___1 = i915_gem_obj_ggtt_offset(obj); if ((tmp___1 & (unsigned long )(size - 1U)) != 0UL) { tmp___2 = 1; } else { tmp___2 = 0; } } __ret_warn_on = tmp___2; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { tmp___3 = i915_gem_obj_ggtt_offset(obj); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 3404, "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", tmp___3, size); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); pitch_val = obj->stride / 128U; tmp___5 = ffs((int )pitch_val); pitch_val = (uint32_t )(tmp___5 + -1); tmp___6 = i915_gem_obj_ggtt_offset(obj); val = (uint32_t )tmp___6; if ((unsigned int )*((unsigned char *)obj + 409UL) == 128U) { val = val | 4096U; } else { } tmp___7 = ffs((int )(size >> 19)); val = (uint32_t )((tmp___7 + -1) << 8) | val; val = (pitch_val << 4) | val; val = val | 1U; } else { val = 0U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((reg + 2048) * 4), val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((reg + 2048) * 4), 0); return; } } __inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj ) { { return ((bool )((unsigned long )obj != (unsigned long )((struct drm_i915_gem_object *)0) && (obj->base.read_domains & 64U) != 0U)); } } static void i915_gem_write_fence(struct drm_device *dev , int reg , struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; bool tmp ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; bool tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj); if ((int )tmp) { __asm__ volatile ("mfence": : : "memory"); } else { } __ret_warn_on = (unsigned long )obj != (unsigned long )((struct drm_i915_gem_object *)0) && (obj->stride == 0U || (unsigned int )*((unsigned char *)obj + 409UL) == 0U); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 3440, "bogus fence setup with stride: 0x%x, tiling mode: %i\n", obj->stride, (int )obj->tiling_mode); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 2U) { i830_write_fence_reg(dev, reg, obj); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 3U) { i915_write_fence_reg(dev, reg, obj); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { i965_write_fence_reg(dev, reg, obj); } else { } } } tmp___1 = i915_gem_object_needs_mb(obj); if ((int )tmp___1) { __asm__ volatile ("mfence": : : "memory"); } else { } return; } } __inline static int fence_number(struct drm_i915_private *dev_priv , struct drm_i915_fence_reg *fence ) { { return ((int )(((long )fence - (long )(& dev_priv->fence_regs)) / 32L)); } } static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj , struct drm_i915_fence_reg *fence , bool enable ) { struct drm_i915_private *dev_priv ; int reg ; int tmp ; { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; tmp = fence_number(dev_priv, fence); reg = tmp; i915_gem_write_fence(obj->base.dev, reg, (int )enable ? obj : (struct drm_i915_gem_object *)0); if ((int )enable) { obj->fence_reg = (signed char )reg; fence->obj = obj; list_move_tail(& fence->lru_list, & dev_priv->mm.fence_list); } else { obj->fence_reg = -1; fence->obj = (struct drm_i915_gem_object *)0; list_del_init(& fence->lru_list); } obj->fence_dirty = 0U; return; } } static int i915_gem_object_wait_fence(struct drm_i915_gem_object *obj ) { int ret ; int tmp ; { if ((unsigned long )obj->last_fenced_req != (unsigned long )((struct drm_i915_gem_request *)0)) { tmp = i915_wait_request(obj->last_fenced_req); ret = tmp; if (ret != 0) { return (ret); } else { } i915_gem_request_assign___0(& obj->last_fenced_req, (struct drm_i915_gem_request *)0); } else { } return (0); } } int i915_gem_object_put_fence(struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; struct drm_i915_fence_reg *fence ; int ret ; int __ret_warn_on ; long tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; ret = i915_gem_object_wait_fence(obj); if (ret != 0) { return (ret); } else { } if ((unsigned int )*((unsigned short *)obj + 204UL) == 4032U) { return (0); } else { } fence = (struct drm_i915_fence_reg *)(& dev_priv->fence_regs) + (unsigned long )obj->fence_reg; __ret_warn_on = fence->pin_count != 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 3513, "WARN_ON(fence->pin_count)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-16); } else { } i915_gem_object_fence_lost(obj); i915_gem_object_update_fence(obj, fence, 0); return (0); } } static struct drm_i915_fence_reg *i915_find_fence_reg(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_fence_reg *reg ; struct drm_i915_fence_reg *avail ; int i ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; void *tmp ; bool tmp___0 ; void *tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; avail = (struct drm_i915_fence_reg *)0; i = dev_priv->fence_reg_start; goto ldv_50637; ldv_50636: reg = (struct drm_i915_fence_reg *)(& dev_priv->fence_regs) + (unsigned long )i; if ((unsigned long )reg->obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return (reg); } else { } if (reg->pin_count == 0) { avail = reg; } else { } i = i + 1; ldv_50637: ; if (dev_priv->num_fence_regs > i) { goto ldv_50636; } else { } if ((unsigned long )avail == (unsigned long )((struct drm_i915_fence_reg *)0)) { goto deadlock; } else { } __mptr = (struct list_head const *)dev_priv->mm.fence_list.next; reg = (struct drm_i915_fence_reg *)__mptr; goto ldv_50646; ldv_50645: ; if (reg->pin_count != 0) { goto ldv_50644; } else { } return (reg); ldv_50644: __mptr___0 = (struct list_head const *)reg->lru_list.next; reg = (struct drm_i915_fence_reg *)__mptr___0; ldv_50646: ; if ((unsigned long )(& reg->lru_list) != (unsigned long )(& dev_priv->mm.fence_list)) { goto ldv_50645; } else { } deadlock: tmp___0 = intel_has_pending_fb_unpin(dev); if ((int )tmp___0) { tmp = ERR_PTR(-11L); return ((struct drm_i915_fence_reg *)tmp); } else { } tmp___1 = ERR_PTR(-35L); return ((struct drm_i915_fence_reg *)tmp___1); } } int i915_gem_object_get_fence(struct drm_i915_gem_object *obj ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; bool enable ; struct drm_i915_fence_reg *reg ; int ret ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; bool tmp___2 ; struct drm_i915_gem_object *old ; { dev = obj->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; enable = (unsigned int )*((unsigned char *)obj + 409UL) != 0U; if ((unsigned int )*((unsigned char *)obj + 410UL) != 0U) { ret = i915_gem_object_wait_fence(obj); if (ret != 0) { return (ret); } else { } } else { } if ((unsigned int )*((unsigned short *)obj + 204UL) != 4032U) { reg = (struct drm_i915_fence_reg *)(& dev_priv->fence_regs) + (unsigned long )obj->fence_reg; if ((unsigned int )*((unsigned char *)obj + 410UL) == 0U) { list_move_tail(& reg->lru_list, & dev_priv->mm.fence_list); return (0); } else { } } else if ((int )enable) { __ret_warn_on = (unsigned int )*((unsigned char *)obj + 410UL) == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 3600, "WARN_ON(!obj->map_and_fenceable)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-22); } else { } reg = i915_find_fence_reg(dev); tmp___2 = IS_ERR((void const *)reg); if ((int )tmp___2) { tmp___1 = PTR_ERR((void const *)reg); return ((int )tmp___1); } else { } if ((unsigned long )reg->obj != (unsigned long )((struct drm_i915_gem_object *)0)) { old = reg->obj; ret = i915_gem_object_wait_fence(old); if (ret != 0) { return (ret); } else { } i915_gem_object_fence_lost(old); } else { } } else { return (0); } i915_gem_object_update_fence(obj, reg, (int )enable); return (0); } } static bool i915_gem_valid_gtt_space(struct i915_vma *vma , unsigned long cache_level ) { struct drm_mm_node *gtt_space ; struct drm_mm_node *other ; bool tmp ; int tmp___0 ; int tmp___1 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { gtt_space = & vma->node; if ((unsigned long )(vma->vm)->mm.color_adjust == (unsigned long )((void (*)(struct drm_mm_node * , unsigned long , u64 * , u64 * ))0)) { return (1); } else { } tmp = drm_mm_node_allocated(gtt_space); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (1); } else { } tmp___1 = list_empty((struct list_head const *)(& gtt_space->node_list)); if (tmp___1 != 0) { return (1); } else { } __mptr = (struct list_head const *)gtt_space->node_list.prev; other = (struct drm_mm_node *)__mptr; if (((unsigned int )*((unsigned char *)other + 32UL) != 0U && (unsigned int )*((unsigned char *)other + 32UL) == 0U) && other->color != cache_level) { return (0); } else { } __mptr___0 = (struct list_head const *)gtt_space->node_list.next; other = (struct drm_mm_node *)__mptr___0; if (((unsigned int )*((unsigned char *)other + 32UL) != 0U && (unsigned int )*((unsigned char *)gtt_space + 32UL) == 0U) && other->color != cache_level) { return (0); } else { } return (1); } } static struct i915_vma *i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj , struct i915_address_space *vm , struct i915_ggtt_view const *ggtt_view , unsigned int alignment , uint64_t flags ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 size ; u32 fence_size ; u32 fence_alignment ; u32 unfenced_alignment ; unsigned long start ; unsigned long end ; struct i915_vma *vma ; int ret ; u32 view_size ; void *tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; size_t tmp___2 ; bool tmp___3 ; long tmp___4 ; void *tmp___5 ; long tmp___6 ; void *tmp___7 ; void *tmp___8 ; struct i915_vma *tmp___9 ; struct i915_vma *tmp___10 ; bool tmp___11 ; int __ret_warn_on___0 ; bool tmp___12 ; int tmp___13 ; long tmp___14 ; long tmp___15 ; void *tmp___16 ; { dev = obj->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; start = (flags & 8ULL) != 0ULL ? (unsigned long )flags & 0xfffffffffffff000UL : 0UL; end = (int )flags & 1 ? dev_priv->gtt.mappable_end : vm->total; tmp___3 = i915_is_ggtt(vm); if ((int )tmp___3) { __ret_warn_on = (unsigned long )ggtt_view == (unsigned long )((struct i915_ggtt_view const *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 3681, "WARN_ON(!ggtt_view)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { tmp = ERR_PTR(-22L); return ((struct i915_vma *)tmp); } else { } tmp___2 = i915_ggtt_view_size(obj, ggtt_view); view_size = (u32 )tmp___2; fence_size = i915_gem_get_gtt_size(dev, view_size, (int )obj->tiling_mode); fence_alignment = i915_gem_get_gtt_alignment(dev, view_size, (int )obj->tiling_mode, 1); unfenced_alignment = i915_gem_get_gtt_alignment(dev, view_size, (int )obj->tiling_mode, 0); size = (int )flags & 1 ? fence_size : view_size; } else { fence_size = i915_gem_get_gtt_size(dev, (uint32_t )obj->base.size, (int )obj->tiling_mode); fence_alignment = i915_gem_get_gtt_alignment(dev, (uint32_t )obj->base.size, (int )obj->tiling_mode, 1); unfenced_alignment = i915_gem_get_gtt_alignment(dev, (uint32_t )obj->base.size, (int )obj->tiling_mode, 0); size = (int )flags & 1 ? fence_size : (u32 )obj->base.size; } if (alignment == 0U) { alignment = (int )flags & 1 ? fence_alignment : unfenced_alignment; } else { } if ((int )flags & 1 && ((fence_alignment - 1U) & alignment) != 0U) { tmp___4 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("i915_gem_object_bind_to_vm", "Invalid object (view type=%u) alignment requested %u\n", (unsigned long )ggtt_view != (unsigned long )((struct i915_ggtt_view const *)0) ? (unsigned int )ggtt_view->type : 0U, alignment); } else { } tmp___5 = ERR_PTR(-22L); return ((struct i915_vma *)tmp___5); } else { } if ((unsigned long )size > end) { tmp___6 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("i915_gem_object_bind_to_vm", "Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n", (unsigned long )ggtt_view != (unsigned long )((struct i915_ggtt_view const *)0) ? (unsigned int )ggtt_view->type : 0U, size, (int )flags & 1 ? (char *)"mappable" : (char *)"total", end); } else { } tmp___7 = ERR_PTR(-7L); return ((struct i915_vma *)tmp___7); } else { } ret = i915_gem_object_get_pages(obj); if (ret != 0) { tmp___8 = ERR_PTR((long )ret); return ((struct i915_vma *)tmp___8); } else { } i915_gem_object_pin_pages___1(obj); if ((unsigned long )ggtt_view != (unsigned long )((struct i915_ggtt_view const *)0)) { tmp___9 = i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view); vma = tmp___9; } else { tmp___10 = i915_gem_obj_lookup_or_create_vma(obj, vm); vma = tmp___10; } tmp___11 = IS_ERR((void const *)vma); if ((int )tmp___11) { goto err_unpin; } else { } search_free: ret = drm_mm_insert_node_in_range_generic(& vm->mm, & vma->node, (u64 )size, alignment, (unsigned long )obj->cache_level, (u64 )start, (u64 )end, 0, 0); if (ret != 0) { ret = i915_gem_evict_something(dev, vm, (int )size, alignment, (unsigned int )obj->cache_level, start, end, (unsigned int )flags); if (ret == 0) { goto search_free; } else { } goto err_free_vma; } else { } tmp___12 = i915_gem_valid_gtt_space(vma, (unsigned long )obj->cache_level); if (tmp___12) { tmp___13 = 0; } else { tmp___13 = 1; } __ret_warn_on___0 = tmp___13; tmp___14 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___14 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 3766, "WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))"); } else { } tmp___15 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___15 != 0L) { ret = -22; goto err_remove_node; } else { } ret = i915_gem_gtt_prepare_object(obj); if (ret != 0) { goto err_remove_node; } else { } trace_i915_vma_bind(vma, (unsigned int )flags); ret = i915_vma_bind(vma, (enum i915_cache_level )obj->cache_level, (u32 )flags); if (ret != 0) { goto err_finish_gtt; } else { } list_move_tail(& obj->global_list, & dev_priv->mm.bound_list); list_add_tail(& vma->mm_list, & vm->inactive_list); return (vma); err_finish_gtt: i915_gem_gtt_finish_object(obj); err_remove_node: drm_mm_remove_node(& vma->node); err_free_vma: i915_gem_vma_destroy(vma); tmp___16 = ERR_PTR((long )ret); vma = (struct i915_vma *)tmp___16; err_unpin: i915_gem_object_unpin_pages___2(obj); return (vma); } } bool i915_gem_clflush_object(struct drm_i915_gem_object *obj , bool force ) { bool tmp ; { if ((unsigned long )obj->pages == (unsigned long )((struct sg_table *)0)) { return (0); } else { } if ((unsigned long )obj->stolen != (unsigned long )((struct drm_mm_node *)0) || (unsigned long )obj->__annonCompField84.phys_handle != (unsigned long )((struct drm_dma_handle *)0)) { return (0); } else { } if (! force) { tmp = cpu_cache_is_coherent(obj->base.dev, (enum i915_cache_level )obj->cache_level); if ((int )tmp) { obj->cache_dirty = 1U; return (0); } else { } } else { } trace_i915_gem_object_clflush(obj); drm_clflush_sg(obj->pages); obj->cache_dirty = 0U; return (1); } } static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj ) { uint32_t old_write_domain ; { if (obj->base.write_domain != 64U) { return; } else { } __asm__ volatile ("sfence": : : "memory"); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0U; intel_fb_obj_flush(obj, 0); trace_i915_gem_object_change_domain___0(obj, obj->base.read_domains, old_write_domain); return; } } static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj ) { uint32_t old_write_domain ; bool tmp ; { if (obj->base.write_domain != 1U) { return; } else { } tmp = i915_gem_clflush_object(obj, obj->pin_display != 0U); if ((int )tmp) { i915_gem_chipset_flush(obj->base.dev); } else { } old_write_domain = obj->base.write_domain; obj->base.write_domain = 0U; intel_fb_obj_flush(obj, 0); trace_i915_gem_object_change_domain___0(obj, obj->base.read_domains, old_write_domain); return; } } int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj , bool write ) { uint32_t old_write_domain ; uint32_t old_read_domains ; struct i915_vma *vma ; int ret ; long tmp ; struct drm_i915_private *tmp___0 ; bool tmp___1 ; { if (obj->base.write_domain == 64U) { return (0); } else { } ret = i915_gem_object_wait_rendering(obj, (int )((bool )(! ((int )write != 0)))); if (ret != 0) { return (ret); } else { } ret = i915_gem_object_get_pages(obj); if (ret != 0) { return (ret); } else { } i915_gem_object_flush_cpu_write_domain(obj); if ((obj->base.read_domains & 64U) == 0U) { __asm__ volatile ("mfence": : : "memory"); } else { } old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; tmp = ldv__builtin_expect((obj->base.write_domain & 4294967231U) != 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (3933), "i" (12UL)); ldv_50717: ; goto ldv_50717; } else { } obj->base.read_domains = obj->base.read_domains | 64U; if ((int )write) { obj->base.read_domains = 64U; obj->base.write_domain = 64U; obj->dirty = 1U; } else { } if ((int )write) { intel_fb_obj_invalidate(obj, (struct intel_engine_cs *)0, 0); } else { } trace_i915_gem_object_change_domain___0(obj, old_read_domains, old_write_domain); vma = i915_gem_obj_to_ggtt(obj); if ((unsigned long )vma != (unsigned long )((struct i915_vma *)0)) { tmp___1 = drm_mm_node_allocated(& vma->node); if ((int )tmp___1) { if ((unsigned int )*((unsigned char *)obj + 408UL) == 0U) { tmp___0 = to_i915((struct drm_device const *)obj->base.dev); list_move_tail(& vma->mm_list, & tmp___0->gtt.base.inactive_list); } else { } } else { } } else { } return (0); } } int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj , enum i915_cache_level cache_level ) { struct drm_device *dev ; struct i915_vma *vma ; struct i915_vma *next ; int ret ; long tmp ; bool tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; bool tmp___1 ; int tmp___2 ; struct list_head const *__mptr___1 ; struct drm_i915_private *__p ; struct list_head const *__mptr___2 ; bool tmp___3 ; struct list_head const *__mptr___3 ; bool tmp___4 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; bool tmp___5 ; bool tmp___6 ; { dev = obj->base.dev; if ((unsigned int )obj->cache_level == (unsigned int )cache_level) { return (0); } else { } tmp___0 = i915_gem_obj_is_pinned(obj); if ((int )tmp___0) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_gem_object_set_cache_level", "can not change the cache level of pinned objects\n"); } else { } return (-16); } else { } __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; __mptr___0 = (struct list_head const *)vma->vma_link.next; next = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; goto ldv_50734; ldv_50733: tmp___1 = i915_gem_valid_gtt_space(vma, (unsigned long )cache_level); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { ret = i915_vma_unbind(vma); if (ret != 0) { return (ret); } else { } } else { } vma = next; __mptr___1 = (struct list_head const *)next->vma_link.next; next = (struct i915_vma *)__mptr___1 + 0xffffffffffffff58UL; ldv_50734: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_50733; } else { } tmp___4 = i915_gem_obj_bound_any(obj); if ((int )tmp___4) { ret = i915_gem_object_wait_rendering(obj, 0); if (ret != 0) { return (ret); } else { } i915_gem_object_finish_gtt(obj); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { ret = i915_gem_object_put_fence(obj); if (ret != 0) { return (ret); } else { } } else { } __mptr___2 = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr___2 + 0xffffffffffffff58UL; goto ldv_50747; ldv_50746: tmp___3 = drm_mm_node_allocated(& vma->node); if ((int )tmp___3) { ret = i915_vma_bind(vma, cache_level, 32U); if (ret != 0) { return (ret); } else { } } else { } __mptr___3 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___3 + 0xffffffffffffff58UL; ldv_50747: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_50746; } else { } } else { } __mptr___4 = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr___4 + 0xffffffffffffff58UL; goto ldv_50754; ldv_50753: vma->node.color = (unsigned long )cache_level; __mptr___5 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___5 + 0xffffffffffffff58UL; ldv_50754: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_50753; } else { } obj->cache_level = (unsigned char )cache_level; if ((unsigned int )*((unsigned char *)obj + 410UL) != 0U && obj->base.write_domain != 1U) { tmp___6 = cpu_write_needs_clflush(obj); if ((int )tmp___6) { tmp___5 = i915_gem_clflush_object(obj, 1); if ((int )tmp___5) { i915_gem_chipset_flush(obj->base.dev); } else { } } else { } } else { } return (0); } } int i915_gem_get_caching_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_caching *args ; struct drm_i915_gem_object *obj ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; { args = (struct drm_i915_gem_caching *)data; tmp = drm_gem_object_lookup(dev, file, args->handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { return (-2); } else { } switch ((int )obj->cache_level) { case 1: ; case 2: args->caching = 1U; goto ldv_50767; case 3: args->caching = 2U; goto ldv_50767; default: args->caching = 0U; goto ldv_50767; } ldv_50767: drm_gem_object_unreference_unlocked(& obj->base); return (0); } } int i915_gem_set_caching_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_caching *args ; struct drm_i915_gem_object *obj ; enum i915_cache_level level ; int ret ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; { args = (struct drm_i915_gem_caching *)data; switch (args->caching) { case 0U: level = 0; goto ldv_50780; case 1U: level = 1; goto ldv_50780; case 2U: __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { goto _L; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 8U) { _L: /* CIL Label */ __p___6 = to_i915((struct drm_device const *)dev); if (__p___6->ellc_size != 0UL) { level = 3; } else { level = 0; } } else { level = 0; } } else { level = 0; } } goto ldv_50780; default: ; return (-22); } ldv_50780: ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } tmp = drm_gem_object_lookup(dev, file, args->handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { ret = -2; goto unlock; } else { } ret = i915_gem_object_set_cache_level(obj, level); drm_gem_object_unreference___5(& obj->base); unlock: mutex_unlock(& dev->struct_mutex); return (ret); } } int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj , u32 alignment , struct intel_engine_cs *pipelined , struct i915_ggtt_view const *view ) { u32 old_read_domains ; u32 old_write_domain ; int ret ; int tmp ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; { ret = i915_gem_object_sync(obj, pipelined); if (ret != 0) { return (ret); } else { } obj->pin_display = obj->pin_display + 1U; __p___3 = to_i915((struct drm_device const *)obj->base.dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { goto _L; } else { __p___4 = to_i915((struct drm_device const *)obj->base.dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)obj->base.dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 8U) { _L: /* CIL Label */ __p___6 = to_i915((struct drm_device const *)obj->base.dev); if (__p___6->ellc_size != 0UL) { tmp = 3; } else { tmp = 0; } } else { tmp = 0; } } else { tmp = 0; } } ret = i915_gem_object_set_cache_level(obj, tmp); if (ret != 0) { goto err_unpin_display; } else { } ret = i915_gem_object_ggtt_pin(obj, view, alignment, (unsigned int )view->type == 0U); if (ret != 0) { goto err_unpin_display; } else { } i915_gem_object_flush_cpu_write_domain(obj); old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; obj->base.write_domain = 0U; obj->base.read_domains = obj->base.read_domains | 64U; trace_i915_gem_object_change_domain___0(obj, old_read_domains, old_write_domain); return (0); err_unpin_display: obj->pin_display = obj->pin_display - 1U; return (ret); } } void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) { int __ret_warn_on ; long tmp ; long tmp___0 ; { __ret_warn_on = obj->pin_display == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4162, "WARN_ON(obj->pin_display == 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } i915_gem_object_ggtt_unpin_view(obj, view); obj->pin_display = obj->pin_display - 1U; return; } } int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj , bool write ) { uint32_t old_write_domain ; uint32_t old_read_domains ; int ret ; long tmp ; { if (obj->base.write_domain == 1U) { return (0); } else { } ret = i915_gem_object_wait_rendering(obj, (int )((bool )(! ((int )write != 0)))); if (ret != 0) { return (ret); } else { } i915_gem_object_flush_gtt_write_domain(obj); old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; if ((obj->base.read_domains & 1U) == 0U) { i915_gem_clflush_object(obj, 0); obj->base.read_domains = obj->base.read_domains | 1U; } else { } tmp = ldv__builtin_expect((obj->base.write_domain & 4294967294U) != 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (4204), "i" (12UL)); ldv_50858: ; goto ldv_50858; } else { } if ((int )write) { obj->base.read_domains = 1U; obj->base.write_domain = 1U; } else { } if ((int )write) { intel_fb_obj_invalidate(obj, (struct intel_engine_cs *)0, 1); } else { } trace_i915_gem_object_change_domain___0(obj, old_read_domains, old_write_domain); return (0); } } static int i915_gem_ring_throttle(struct drm_device *dev , struct drm_file *file ) { struct drm_i915_private *dev_priv ; struct drm_i915_file_private *file_priv ; unsigned long recent_enough ; unsigned long tmp ; struct drm_i915_gem_request *request ; struct drm_i915_gem_request *target ; unsigned int reset_counter ; int ret ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; file_priv = (struct drm_i915_file_private *)file->driver_priv; tmp = msecs_to_jiffies(20U); recent_enough = (unsigned long )jiffies - tmp; target = (struct drm_i915_gem_request *)0; ret = i915_gem_wait_for_error(& dev_priv->gpu_error); if (ret != 0) { return (ret); } else { } ret = i915_gem_check_wedge(& dev_priv->gpu_error, 0); if (ret != 0) { return (ret); } else { } spin_lock(& file_priv->mm.lock); __mptr = (struct list_head const *)file_priv->mm.request_list.next; request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffa0UL; goto ldv_50882; ldv_50881: ; if ((long )(request->emitted_jiffies - recent_enough) >= 0L) { goto ldv_50880; } else { } target = request; __mptr___0 = (struct list_head const *)request->client_list.next; request = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffffa0UL; ldv_50882: ; if ((unsigned long )(& request->client_list) != (unsigned long )(& file_priv->mm.request_list)) { goto ldv_50881; } else { } ldv_50880: tmp___0 = atomic_read((atomic_t const *)(& dev_priv->gpu_error.reset_counter)); reset_counter = (unsigned int )tmp___0; if ((unsigned long )target != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_reference___1(target); } else { } spin_unlock(& file_priv->mm.lock); if ((unsigned long )target == (unsigned long )((struct drm_i915_gem_request *)0)) { return (0); } else { } ret = __i915_wait_request(target, reset_counter, 1, (s64 *)0LL, (struct intel_rps_client *)0); if (ret == 0) { queue_delayed_work___0(dev_priv->wq, & dev_priv->mm.retire_work, 0UL); } else { } i915_gem_request_unreference__unlocked___0(target); return (ret); } } static bool i915_vma_misplaced(struct i915_vma *vma , uint32_t alignment , uint64_t flags ) { struct drm_i915_gem_object *obj ; { obj = vma->obj; if (alignment != 0U && (vma->node.start & (u64 )(alignment - 1U)) != 0ULL) { return (1); } else { } if ((int )flags & 1 && (unsigned int )*((unsigned char *)obj + 410UL) == 0U) { return (1); } else { } if ((flags & 8ULL) != 0ULL && vma->node.start < (flags & 0xfffffffffffff000ULL)) { return (1); } else { } return (0); } } static int i915_gem_object_do_pin(struct drm_i915_gem_object *obj , struct i915_address_space *vm , struct i915_ggtt_view const *ggtt_view , uint32_t alignment , uint64_t flags ) { struct drm_i915_private *dev_priv ; struct i915_vma *vma ; unsigned int bound ; int ret ; int __ret_warn_on ; long tmp ; long tmp___0 ; int __ret_warn_on___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; long tmp___4 ; long tmp___5 ; int __ret_warn_on___1 ; long tmp___6 ; long tmp___7 ; int __ret_warn_on___2 ; bool tmp___8 ; long tmp___9 ; long tmp___10 ; struct i915_vma *tmp___11 ; struct i915_vma *tmp___12 ; long tmp___13 ; bool tmp___14 ; int __ret_warn_on___3 ; long tmp___15 ; long tmp___16 ; unsigned long offset ; unsigned long tmp___17 ; unsigned long tmp___18 ; int __ret_warn_on___4 ; long tmp___19 ; bool tmp___20 ; long tmp___21 ; bool tmp___22 ; bool tmp___23 ; int tmp___24 ; bool mappable ; bool fenceable ; u32 fence_size ; u32 fence_alignment ; int __ret_warn_on___5 ; long tmp___25 ; { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; __ret_warn_on = (unsigned long )(& (dev_priv->mm.aliasing_ppgtt)->base) == (unsigned long )vm; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4307, "WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-19); } else { } if ((flags & 5ULL) != 0ULL) { tmp___1 = i915_is_ggtt(vm); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { tmp___3 = 1; } else { tmp___3 = 0; } } else { tmp___3 = 0; } __ret_warn_on___0 = tmp___3; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4310, "WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { return (-22); } else { } __ret_warn_on___1 = (flags & 5ULL) == 1ULL; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4313, "WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)"); } else { } tmp___7 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___7 != 0L) { return (-22); } else { } tmp___8 = i915_is_ggtt(vm); __ret_warn_on___2 = (int )tmp___8 != ((unsigned long )ggtt_view != (unsigned long )((struct i915_ggtt_view const *)0)); tmp___9 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4316, "WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)"); } else { } tmp___10 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___10 != 0L) { return (-22); } else { } if ((unsigned long )ggtt_view != (unsigned long )((struct i915_ggtt_view const *)0)) { tmp___11 = i915_gem_obj_to_ggtt_view(obj, ggtt_view); vma = tmp___11; } else { tmp___12 = i915_gem_obj_to_vma(obj, vm); vma = tmp___12; } tmp___14 = IS_ERR((void const *)vma); if ((int )tmp___14) { tmp___13 = PTR_ERR((void const *)vma); return ((int )tmp___13); } else { } if ((unsigned long )vma != (unsigned long )((struct i915_vma *)0)) { __ret_warn_on___3 = (unsigned int )*((unsigned char *)vma + 232UL) == 15U; tmp___15 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___15 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4326, "WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)"); } else { } tmp___16 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___16 != 0L) { return (-16); } else { } tmp___20 = i915_vma_misplaced(vma, alignment, flags); if ((int )tmp___20) { if ((unsigned long )ggtt_view != (unsigned long )((struct i915_ggtt_view const *)0)) { tmp___17 = i915_gem_obj_ggtt_offset_view(obj, ggtt_view); offset = tmp___17; } else { tmp___18 = i915_gem_obj_offset(obj, vm); offset = tmp___18; } __ret_warn_on___4 = (unsigned int )*((unsigned char *)vma + 232UL) != 0U; tmp___19 = ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); if (tmp___19 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4341, "bo is already pinned in %s with incorrect alignment: offset=%lx, req.alignment=%x, req.map_and_fenceable=%d, obj->map_and_fenceable=%d\n", (unsigned long )ggtt_view != (unsigned long )((struct i915_ggtt_view const *)0) ? (char *)"ggtt" : (char *)"ppgtt", offset, alignment, (int )flags & 1, (int )obj->map_and_fenceable); } else { } ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); ret = i915_vma_unbind(vma); if (ret != 0) { return (ret); } else { } vma = (struct i915_vma *)0; } else { } } else { } bound = (unsigned long )vma != (unsigned long )((struct i915_vma *)0) ? (unsigned int )vma->bound : 0U; if ((unsigned long )vma == (unsigned long )((struct i915_vma *)0)) { goto _L; } else { tmp___23 = drm_mm_node_allocated(& vma->node); if (tmp___23) { tmp___24 = 0; } else { tmp___24 = 1; } if (tmp___24) { _L: /* CIL Label */ vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment, flags); tmp___22 = IS_ERR((void const *)vma); if ((int )tmp___22) { tmp___21 = PTR_ERR((void const *)vma); return ((int )tmp___21); } else { } } else { ret = i915_vma_bind(vma, (enum i915_cache_level )obj->cache_level, (u32 )flags); if (ret != 0) { return (ret); } else { } } } if (((unsigned long )ggtt_view != (unsigned long )((struct i915_ggtt_view const *)0) && (unsigned int )ggtt_view->type == 0U) && (int )((unsigned int )vma->bound ^ bound) & 1) { fence_size = i915_gem_get_gtt_size(obj->base.dev, (uint32_t )obj->base.size, (int )obj->tiling_mode); fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, (uint32_t )obj->base.size, (int )obj->tiling_mode, 1); fenceable = (bool )(vma->node.size == (u64 )fence_size && (vma->node.start & (u64 )(fence_alignment - 1U)) == 0ULL); mappable = vma->node.start + (u64 )fence_size <= (unsigned long long )dev_priv->gtt.mappable_end; obj->map_and_fenceable = (unsigned char )((int )mappable && (int )fenceable); __ret_warn_on___5 = (int )flags & 1 && (unsigned int )*((unsigned char *)obj + 410UL) == 0U; tmp___25 = ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); if (tmp___25 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4383, "WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable)"); } else { } ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); } else { } vma->pin_count = (unsigned char )((int )vma->pin_count + 1); return (0); } } int i915_gem_object_pin(struct drm_i915_gem_object *obj , struct i915_address_space *vm , uint32_t alignment , uint64_t flags ) { bool tmp ; int tmp___0 ; { tmp = i915_is_ggtt(vm); tmp___0 = i915_gem_object_do_pin(obj, vm, (int )tmp ? & i915_ggtt_view_normal : (struct i915_ggtt_view const *)0, alignment, flags); return (tmp___0); } } int i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view , uint32_t alignment , uint64_t flags ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; int tmp___3 ; { __ret_warn_once = (unsigned long )view == (unsigned long )((struct i915_ggtt_view const *)0); tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4407, "no view specified"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { return (-22); } else { } tmp___3 = i915_gem_object_do_pin(obj, & ((struct drm_i915_private *)(obj->base.dev)->dev_private)->gtt.base, view, alignment, flags | 4ULL); return (tmp___3); } } void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) { struct i915_vma *vma ; struct i915_vma *tmp ; long tmp___0 ; int __ret_warn_on ; long tmp___1 ; int __ret_warn_on___0 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; { tmp = i915_gem_obj_to_ggtt_view(obj, view); vma = tmp; tmp___0 = ldv__builtin_expect((unsigned long )vma == (unsigned long )((struct i915_vma *)0), 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (4420), "i" (12UL)); ldv_50941: ; goto ldv_50941; } else { } __ret_warn_on = (unsigned int )*((unsigned char *)vma + 232UL) == 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4421, "WARN_ON(vma->pin_count == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = i915_gem_obj_ggtt_bound_view(obj, view); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } __ret_warn_on___0 = tmp___3; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4422, "WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); vma->pin_count = (unsigned char )((int )vma->pin_count - 1); return; } } bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; struct i915_vma *ggtt_vma ; struct i915_vma *tmp ; int __ret_warn_on ; long tmp___0 ; { if ((unsigned int )*((unsigned short *)obj + 204UL) != 4032U) { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; tmp = i915_gem_obj_to_ggtt(obj); ggtt_vma = tmp; __ret_warn_on = (unsigned long )ggtt_vma == (unsigned long )((struct i915_vma *)0) || dev_priv->fence_regs[(int )obj->fence_reg].pin_count > (int )ggtt_vma->pin_count; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4436, "WARN_ON(!ggtt_vma || dev_priv->fence_regs[obj->fence_reg].pin_count > ggtt_vma->pin_count)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); dev_priv->fence_regs[(int )obj->fence_reg].pin_count = dev_priv->fence_regs[(int )obj->fence_reg].pin_count + 1; return (1); } else { return (0); } } } void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; int __ret_warn_on ; long tmp ; { if ((unsigned int )*((unsigned short *)obj + 204UL) != 4032U) { dev_priv = (struct drm_i915_private *)(obj->base.dev)->dev_private; __ret_warn_on = dev_priv->fence_regs[(int )obj->fence_reg].pin_count <= 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4448, "WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); dev_priv->fence_regs[(int )obj->fence_reg].pin_count = dev_priv->fence_regs[(int )obj->fence_reg].pin_count - 1; } else { } return; } } int i915_gem_busy_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_busy *args ; struct drm_i915_gem_object *obj ; int ret ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; { args = (struct drm_i915_gem_busy *)data; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } tmp = drm_gem_object_lookup(dev, file, args->handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { ret = -2; goto unlock; } else { } ret = i915_gem_object_flush_active(obj); if (ret != 0) { goto unref; } else { } args->busy = (__u32 )((int )obj->active << 16); if ((unsigned long )obj->last_write_req != (unsigned long )((struct drm_i915_gem_request *)0)) { args->busy = args->busy | (__u32 )((obj->last_write_req)->ring)->id; } else { } unref: drm_gem_object_unreference___5(& obj->base); unlock: mutex_unlock(& dev->struct_mutex); return (ret); } } int i915_gem_throttle_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) { int tmp ; { tmp = i915_gem_ring_throttle(dev, file_priv); return (tmp); } } int i915_gem_madvise_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_madvise *args ; struct drm_i915_gem_object *obj ; int ret ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; bool tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; args = (struct drm_i915_gem_madvise *)data; switch (args->madv) { case 1U: ; case 0U: ; goto ldv_50987; default: ; return (-22); } ldv_50987: ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { return (ret); } else { } tmp = drm_gem_object_lookup(dev, file_priv, args->handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { ret = -2; goto unlock; } else { } tmp___0 = i915_gem_obj_is_pinned(obj); if ((int )tmp___0) { ret = -22; goto out; } else { } if (((unsigned long )obj->pages != (unsigned long )((struct sg_table *)0) && (unsigned int )*((unsigned char *)obj + 409UL) != 0U) && (dev_priv->quirks & 32UL) != 0UL) { if ((unsigned int )*((unsigned char *)obj + 409UL) == 0U) { i915_gem_object_unpin_pages___2(obj); } else { } if (args->madv == 0U) { i915_gem_object_pin_pages___1(obj); } else { } } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) != 32U) { obj->madv = (unsigned char )args->madv; } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) == 16U && (unsigned long )obj->pages == (unsigned long )((struct sg_table *)0)) { i915_gem_object_truncate(obj); } else { } args->retained = (unsigned int )*((unsigned char *)obj + 409UL) != 32U; out: drm_gem_object_unreference___5(& obj->base); unlock: mutex_unlock(& dev->struct_mutex); return (ret); } } void i915_gem_object_init(struct drm_i915_gem_object *obj , struct drm_i915_gem_object_ops const *ops ) { int i ; { INIT_LIST_HEAD(& obj->global_list); i = 0; goto ldv_50999; ldv_50998: INIT_LIST_HEAD((struct list_head *)(& obj->ring_list) + (unsigned long )i); i = i + 1; ldv_50999: ; if (i <= 4) { goto ldv_50998; } else { } INIT_LIST_HEAD(& obj->obj_exec_link); INIT_LIST_HEAD(& obj->vma_list); INIT_LIST_HEAD(& obj->batch_pool_link); obj->ops = ops; obj->fence_reg = -1; obj->madv = 0U; i915_gem_info_add_obj((struct drm_i915_private *)(obj->base.dev)->dev_private, obj->base.size); return; } } static struct drm_i915_gem_object_ops const i915_gem_object_ops = {& i915_gem_object_get_pages_gtt, & i915_gem_object_put_pages_gtt, 0, 0}; struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev , size_t size ) { struct drm_i915_gem_object *obj ; struct address_space *mapping ; gfp_t mask ; void *tmp ; int tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct inode *tmp___1 ; struct drm_i915_private *__p___1 ; { tmp = i915_gem_object_alloc(dev); obj = (struct drm_i915_gem_object *)tmp; if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return ((struct drm_i915_gem_object *)0); } else { } tmp___0 = drm_gem_object_init(dev, & obj->base, size); if (tmp___0 != 0) { i915_gem_object_free(obj); return ((struct drm_i915_gem_object *)0); } else { } mask = 655570U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { mask = mask & 4294967293U; mask = mask | 4U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { mask = mask & 4294967293U; mask = mask | 4U; } else { } } tmp___1 = file_inode((struct file const *)obj->base.filp); mapping = tmp___1->i_mapping; mapping_set_gfp_mask(mapping, mask); i915_gem_object_init(obj, & i915_gem_object_ops); obj->base.write_domain = 1U; obj->base.read_domains = 1U; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 46UL) != 0U) { obj->cache_level = 1U; } else { obj->cache_level = 0U; } trace_i915_gem_object_create(obj); return (obj); } } static bool discard_backing_storage(struct drm_i915_gem_object *obj ) { long tmp ; { if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { return (0); } else { } if ((unsigned long )obj->base.filp == (unsigned long )((struct file *)0)) { return (1); } else { } tmp = atomic_long_read(& (obj->base.filp)->f_count); return (tmp == 1L); } } void i915_gem_free_object(struct drm_gem_object *gem_obj ) { struct drm_i915_gem_object *obj ; struct drm_gem_object const *__mptr ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct i915_vma *vma ; struct i915_vma *next ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int ret ; bool was_interruptible ; int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; long tmp___2 ; struct list_head const *__mptr___2 ; int __ret_warn_on___1 ; long tmp___3 ; int __ret_warn_on___2 ; long tmp___4 ; long tmp___5 ; bool tmp___6 ; long tmp___7 ; { __mptr = (struct drm_gem_object const *)gem_obj; obj = (struct drm_i915_gem_object *)__mptr; dev = obj->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_runtime_pm_get(dev_priv); trace_i915_gem_object_destroy(obj); __mptr___0 = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; __mptr___1 = (struct list_head const *)vma->vma_link.next; next = (struct i915_vma *)__mptr___1 + 0xffffffffffffff58UL; goto ldv_51053; ldv_51052: vma->pin_count = 0U; ret = i915_vma_unbind(vma); __ret_warn_on___0 = ret == -512; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4674, "WARN_ON(ret == -ERESTARTSYS)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = 0; tmp = i915_vma_unbind(vma); __ret_warn_on = tmp != 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4680, "WARN_ON(i915_vma_unbind(vma))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); dev_priv->mm.interruptible = was_interruptible; } else { } vma = next; __mptr___2 = (struct list_head const *)next->vma_link.next; next = (struct i915_vma *)__mptr___2 + 0xffffffffffffff58UL; ldv_51053: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_51052; } else { } if ((unsigned long )obj->stolen != (unsigned long )((struct drm_mm_node *)0)) { i915_gem_object_unpin_pages___2(obj); } else { } __ret_warn_on___1 = (unsigned int )*((unsigned short *)obj + 206UL) != 0U; tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4691, "WARN_ON(obj->frontbuffer_bits)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if ((((unsigned long )obj->pages != (unsigned long )((struct sg_table *)0) && (unsigned int )*((unsigned char *)obj + 409UL) == 0U) && (dev_priv->quirks & 32UL) != 0UL) && (unsigned int )*((unsigned char *)obj + 409UL) != 0U) { i915_gem_object_unpin_pages___2(obj); } else { } __ret_warn_on___2 = obj->pages_pin_count != 0; tmp___4 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4698, "WARN_ON(obj->pages_pin_count)"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___5 != 0L) { obj->pages_pin_count = 0; } else { } tmp___6 = discard_backing_storage(obj); if ((int )tmp___6) { obj->madv = 1U; } else { } i915_gem_object_put_pages(obj); i915_gem_object_free_mmap_offset(obj); tmp___7 = ldv__builtin_expect((unsigned long )obj->pages != (unsigned long )((struct sg_table *)0), 0L); if (tmp___7 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (4705), "i" (12UL)); ldv_51059: ; goto ldv_51059; } else { } if ((unsigned long )obj->base.import_attach != (unsigned long )((struct dma_buf_attachment *)0)) { drm_prime_gem_destroy(& obj->base, (struct sg_table *)0); } else { } if ((unsigned long )(obj->ops)->release != (unsigned long )((void (*/* const */)(struct drm_i915_gem_object * ))0)) { (*((obj->ops)->release))(obj); } else { } drm_gem_object_release(& obj->base); i915_gem_info_remove_obj(dev_priv, obj->base.size); kfree((void const *)obj->bit_17); i915_gem_object_free(obj); intel_runtime_pm_put(dev_priv); return; } } struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj , struct i915_address_space *vm ) { struct i915_vma *vma ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_51071; ldv_51070: tmp = i915_is_ggtt(vma->vm); if ((int )tmp && (unsigned int )vma->ggtt_view.type != 0U) { goto ldv_51069; } else { } if ((unsigned long )vma->vm == (unsigned long )vm) { return (vma); } else { } ldv_51069: __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_51071: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_51070; } else { } return ((struct i915_vma *)0); } } struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj , struct i915_ggtt_view const *view ) { struct i915_address_space *ggtt ; struct i915_vma *vma ; void *tmp ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; struct list_head const *__mptr ; bool tmp___4 ; struct list_head const *__mptr___0 ; { ggtt = & ((struct drm_i915_private *)(obj->base.dev)->dev_private)->gtt.base; __ret_warn_once = (unsigned long )view == (unsigned long )((struct i915_ggtt_view const *)0); tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4742, "no view specified"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } tmp___3 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___3 != 0L) { tmp = ERR_PTR(-22L); return ((struct i915_vma *)tmp); } else { } __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_51089; ldv_51088: ; if ((unsigned long )vma->vm == (unsigned long )ggtt) { tmp___4 = i915_ggtt_view_equal((struct i915_ggtt_view const *)(& vma->ggtt_view), view); if ((int )tmp___4) { return (vma); } else { } } else { } __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_51089: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_51088; } else { } return ((struct i915_vma *)0); } } void i915_gem_vma_destroy(struct i915_vma *vma ) { struct i915_address_space *vm ; int __ret_warn_on ; long tmp ; int tmp___0 ; struct i915_hw_ppgtt *tmp___1 ; bool tmp___2 ; int tmp___3 ; struct drm_i915_private *tmp___4 ; { vm = (struct i915_address_space *)0; __ret_warn_on = (unsigned int )*((unsigned char *)vma + 32UL) != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4755, "WARN_ON(vma->node.allocated)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = list_empty((struct list_head const *)(& vma->exec_list)); if (tmp___0 == 0) { return; } else { } vm = vma->vm; tmp___2 = i915_is_ggtt(vm); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { tmp___1 = i915_vm_to_ppgtt(vm); i915_ppgtt_put___0(tmp___1); } else { } list_del(& vma->vma_link); tmp___4 = to_i915((struct drm_device const *)(vma->obj)->base.dev); kmem_cache_free(tmp___4->vmas, (void *)vma); return; } } static void i915_gem_stop_ringbuffers(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int i ; bool tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_51104; ldv_51103: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { (*(dev_priv->gt.stop_ring))(ring); } else { } i = i + 1; ldv_51104: ; if (i <= 4) { goto ldv_51103; } else { } return; } } int i915_gem_suspend(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; int __ret_warn_on ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; mutex_lock_nested(& dev->struct_mutex, 0U); ret = i915_gpu_idle(dev); if (ret != 0) { goto err; } else { } i915_gem_retire_requests(dev); i915_gem_stop_ringbuffers(dev); mutex_unlock(& dev->struct_mutex); ldv_cancel_delayed_work_sync_298(& dev_priv->gpu_error.hangcheck_work); ldv_cancel_delayed_work_sync_299(& dev_priv->mm.retire_work); ldv_flush_delayed_work_300(& dev_priv->mm.idle_work); __ret_warn_on = (int )dev_priv->mm.busy; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 4805, "WARN_ON(dev_priv->mm.busy)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (0); err: mutex_unlock(& dev->struct_mutex); return (ret); } } int i915_gem_l3_remap(struct intel_engine_cs *ring , int slice ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 reg_base ; u32 *remap_info ; int i ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; reg_base = (u32 )(slice * 512 + 45168); remap_info = dev_priv->l3_parity.remap_info[slice]; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { return (0); } else { goto _L; } } else _L: /* CIL Label */ if ((unsigned long )remap_info == (unsigned long )((u32 *)0U)) { return (0); } else { } ret = intel_ring_begin(ring, 96); if (ret != 0) { return (ret); } else { } i = 0; goto ldv_51137; ldv_51136: intel_ring_emit(ring, 285212673U); intel_ring_emit(ring, reg_base + (u32 )i); intel_ring_emit(ring, *(remap_info + (unsigned long )(i / 4))); i = i + 4; ldv_51137: ; if (i <= 127) { goto ldv_51136; } else { } intel_ring_advance(ring); return (ret); } } void i915_gem_init_swizzling(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; uint32_t tmp ; struct drm_i915_private *__p___0 ; uint32_t tmp___0 ; int _a ; int _a___0 ; int _a___1 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U || dev_priv->mm.bit_6_swizzle_x == 0U) { return; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282624L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282624L, tmp | 8192U, 1); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 5U) { return; } else { } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1052672L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1052672L, tmp___0 | 1U, 1); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 6U) { _a = 16; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16432L, (uint32_t )((_a << 16) | _a), 1); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 7U) { _a___0 = 32; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 16432L, (uint32_t )((_a___0 << 16) | _a___0), 1); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { _a___1 = 2; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 18952L, (uint32_t )((_a___1 << 16) | _a___1), 1); } else { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (4867), "i" (12UL)); ldv_51218: ; goto ldv_51218; } } } return; } } static bool intel_enable_blt(struct drm_device *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p = to_i915((struct drm_device const *)dev); if (((int )__p->info.ring_mask & 4) == 0) { return (0); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U && (unsigned int )(dev->pdev)->revision <= 7U) { printk("\016[drm] BLT not supported on this pre-production hardware; graphics performance will be degraded.\n"); return (0); } else { } return (1); } } static void init_unused_ring(struct drm_device *dev , u32 base ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(base + 60U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(base + 52U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(base + 48U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(base + 56U), 0U, 1); return; } } static void init_unused_rings(struct drm_device *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) == 13687U) { init_unused_ring(dev, 8208U); init_unused_ring(dev, 8400U); init_unused_ring(dev, 8416U); init_unused_ring(dev, 8432U); init_unused_ring(dev, 8448U); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 2U) { init_unused_ring(dev, 8400U); init_unused_ring(dev, 8416U); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 3U) { init_unused_ring(dev, 8208U); init_unused_ring(dev, 8224U); } else { } } } return; } } int i915_gem_init_rings(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; struct drm_i915_private *__p ; bool tmp ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ret = intel_init_render_ring_buffer(dev); if (ret != 0) { return (ret); } else { } __p = to_i915((struct drm_device const *)dev); if (((int )__p->info.ring_mask & 2) != 0) { ret = intel_init_bsd_ring_buffer(dev); if (ret != 0) { goto cleanup_render_ring; } else { } } else { } tmp = intel_enable_blt(dev); if ((int )tmp) { ret = intel_init_blt_ring_buffer(dev); if (ret != 0) { goto cleanup_bsd_ring; } else { } } else { } __p___0 = to_i915((struct drm_device const *)dev); if (((int )__p___0->info.ring_mask & 8) != 0) { ret = intel_init_vebox_ring_buffer(dev); if (ret != 0) { goto cleanup_blt_ring; } else { } } else { } __p___1 = to_i915((struct drm_device const *)dev); if (((int )__p___1->info.ring_mask & 16) != 0) { ret = intel_init_bsd2_ring_buffer(dev); if (ret != 0) { goto cleanup_vebox_ring; } else { } } else { } ret = i915_gem_set_seqno(dev, 4294963199U); if (ret != 0) { goto cleanup_bsd2_ring; } else { } return (0); cleanup_bsd2_ring: intel_cleanup_ring_buffer((struct intel_engine_cs *)(& dev_priv->ring) + 4UL); cleanup_vebox_ring: intel_cleanup_ring_buffer((struct intel_engine_cs *)(& dev_priv->ring) + 3UL); cleanup_blt_ring: intel_cleanup_ring_buffer((struct intel_engine_cs *)(& dev_priv->ring) + 2UL); cleanup_bsd_ring: intel_cleanup_ring_buffer((struct intel_engine_cs *)(& dev_priv->ring) + 1UL); cleanup_render_ring: intel_cleanup_ring_buffer((struct intel_engine_cs *)(& dev_priv->ring)); return (ret); } } int i915_gem_init_hw(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int ret ; int i ; struct drm_i915_private *__p ; bool tmp ; int tmp___0 ; uint32_t tmp___1 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int tmp___2 ; struct drm_i915_private *__p___2 ; u32 temp ; uint32_t tmp___3 ; u32 temp___0 ; uint32_t tmp___4 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; bool tmp___5 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; int tmp___6 ; int tmp___7 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { tmp = intel_enable_gtt(); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-5); } else { } } else { } intel_uncore_forcewake_get(dev_priv, 7); if (dev_priv->ellc_size != 0UL) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 36872L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 36872L, tmp___1 | 983040U, 1); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if (((int )__p___1->info.device_id & 240) == 32) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8724L, (uint32_t )tmp___2, 1); } else { } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___5->pch_type == 5U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282640L, 1); temp = tmp___3; temp = temp & 4294967292U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282640L, temp, 1); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 6U) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 287752L, 1); temp___0 = tmp___4; temp___0 = temp___0 & 4294967279U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 287752L, temp___0, 1); } else { } } } else { } i915_gem_init_swizzling(dev); init_unused_rings(dev); i = 0; goto ldv_51341; ldv_51340: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___5 = intel_ring_initialized(ring); if ((int )tmp___5) { ret = (*(ring->init_hw))(ring); if (ret != 0) { goto out; } else { } } else { } i = i + 1; ldv_51341: ; if (i <= 4) { goto ldv_51340; } else { } i = 0; goto ldv_51368; ldv_51367: i915_gem_l3_remap((struct intel_engine_cs *)(& dev_priv->ring), i); i = i + 1; ldv_51368: __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 45UL) == 0U) { goto _L; } else { __p___11 = to_i915((struct drm_device const *)dev); if (((int )__p___11->info.device_id & 240) != 32) { _L: /* CIL Label */ __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { tmp___6 = 1; } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 45UL) != 0U) { tmp___6 = 1; } else { tmp___6 = 0; } } tmp___7 = tmp___6; } else { tmp___7 = 2; } } if (tmp___7 > i) { goto ldv_51367; } else { } ret = i915_ppgtt_init_hw(dev); if (ret != 0 && ret != -5) { drm_err("PPGTT enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); } else { } ret = i915_gem_context_enable(dev_priv); if (ret != 0 && ret != -5) { drm_err("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); goto out; } else { } out: intel_uncore_forcewake_put(dev_priv, 7); return (ret); } } int i915_gem_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; long tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i915.enable_execlists = intel_sanitize_enable_execlists(dev, i915.enable_execlists); mutex_lock_nested(& dev->struct_mutex, 0U); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245328L, 1U, 1); tmp___0 = msecs_to_jiffies(10U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_51391; ldv_51390: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); if ((tmp___1 & 1U) == 0U) { ret__ = -110; } else { } goto ldv_51389; } else { } tmp___2 = drm_can_sleep___2(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_51391: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245332L, 1); if ((tmp___3 & 1U) == 0U) { goto ldv_51390; } else { } ldv_51389: ; if (ret__ != 0) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_gem_init", "allow wake ack timed out\n"); } else { } } else { } } else { } if (i915.enable_execlists == 0) { dev_priv->gt.execbuf_submit = & i915_gem_ringbuffer_submission; dev_priv->gt.init_rings = & i915_gem_init_rings; dev_priv->gt.cleanup_ring = & intel_cleanup_ring_buffer; dev_priv->gt.stop_ring = & intel_stop_ring_buffer; } else { dev_priv->gt.execbuf_submit = & intel_execlists_submission; dev_priv->gt.init_rings = & intel_logical_rings_init; dev_priv->gt.cleanup_ring = & intel_logical_ring_cleanup; dev_priv->gt.stop_ring = & intel_logical_ring_stop; } intel_uncore_forcewake_get(dev_priv, 7); ret = i915_gem_init_userptr(dev); if (ret != 0) { goto out_unlock; } else { } i915_gem_init_global_gtt(dev); ret = i915_gem_context_init(dev); if (ret != 0) { goto out_unlock; } else { } ret = (*(dev_priv->gt.init_rings))(dev); if (ret != 0) { goto out_unlock; } else { } ret = i915_gem_init_hw(dev); if (ret == -5) { drm_err("Failed to initialize GPU, declaring it wedged\n"); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; orl %0,%1": : "r" (2147483648U), "m" (dev_priv->gpu_error.reset_counter): "memory"); ret = 0; } else { } out_unlock: intel_uncore_forcewake_put(dev_priv, 7); mutex_unlock(& dev->struct_mutex); return (ret); } } void i915_gem_cleanup_ringbuffer(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int i ; bool tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_51402; ldv_51401: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { (*(dev_priv->gt.cleanup_ring))(ring); } else { } i = i + 1; ldv_51402: ; if (i <= 4) { goto ldv_51401; } else { } return; } } static void init_ring_lists(struct intel_engine_cs *ring ) { { INIT_LIST_HEAD(& ring->active_list); INIT_LIST_HEAD(& ring->request_list); return; } } void i915_init_vm(struct drm_i915_private *dev_priv , struct i915_address_space *vm ) { bool tmp ; int tmp___0 ; { tmp = i915_is_ggtt(vm); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { drm_mm_init(& vm->mm, (u64 )vm->start, (u64 )vm->total); } else { } vm->dev = dev_priv->dev; INIT_LIST_HEAD(& vm->active_list); INIT_LIST_HEAD(& vm->inactive_list); INIT_LIST_HEAD(& vm->global_link); list_add_tail(& vm->global_link, & dev_priv->vm_list); return; } } void i915_gem_load(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int i ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; atomic_long_t __constr_expr_1 ; struct lock_class_key __key___2 ; struct lock_class_key __key___3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; uint32_t tmp ; bool tmp___0 ; struct lock_class_key __key___4 ; struct lock_class_key __key___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->objects = kmem_cache_create("i915_gem_object", 592UL, 0UL, 8192UL, (void (*)(void * ))0); dev_priv->vmas = kmem_cache_create("i915_gem_vma", 240UL, 0UL, 8192UL, (void (*)(void * ))0); dev_priv->requests = kmem_cache_create("i915_gem_request", 144UL, 0UL, 8192UL, (void (*)(void * ))0); INIT_LIST_HEAD(& dev_priv->vm_list); i915_init_vm(dev_priv, & dev_priv->gtt.base); INIT_LIST_HEAD(& dev_priv->context_list); INIT_LIST_HEAD(& dev_priv->mm.unbound_list); INIT_LIST_HEAD(& dev_priv->mm.bound_list); INIT_LIST_HEAD(& dev_priv->mm.fence_list); i = 0; goto ldv_51417; ldv_51416: init_ring_lists((struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i); i = i + 1; ldv_51417: ; if (i <= 4) { goto ldv_51416; } else { } i = 0; goto ldv_51420; ldv_51419: INIT_LIST_HEAD(& dev_priv->fence_regs[i].lru_list); i = i + 1; ldv_51420: ; if (i <= 31) { goto ldv_51419; } else { } __init_work(& dev_priv->mm.retire_work.work, 0); __constr_expr_0___0.counter = 137438953408L; dev_priv->mm.retire_work.work.data = __constr_expr_0___0; lockdep_init_map(& dev_priv->mm.retire_work.work.lockdep_map, "(&(&dev_priv->mm.retire_work)->work)", & __key, 0); INIT_LIST_HEAD(& dev_priv->mm.retire_work.work.entry); dev_priv->mm.retire_work.work.func = & i915_gem_retire_work_handler; init_timer_key(& dev_priv->mm.retire_work.timer, 2097152U, "(&(&dev_priv->mm.retire_work)->timer)", & __key___0); dev_priv->mm.retire_work.timer.function = & delayed_work_timer_fn; dev_priv->mm.retire_work.timer.data = (unsigned long )(& dev_priv->mm.retire_work); __init_work(& dev_priv->mm.idle_work.work, 0); __constr_expr_1.counter = 137438953408L; dev_priv->mm.idle_work.work.data = __constr_expr_1; lockdep_init_map(& dev_priv->mm.idle_work.work.lockdep_map, "(&(&dev_priv->mm.idle_work)->work)", & __key___1, 0); INIT_LIST_HEAD(& dev_priv->mm.idle_work.work.entry); dev_priv->mm.idle_work.work.func = & i915_gem_idle_work_handler; init_timer_key(& dev_priv->mm.idle_work.timer, 2097152U, "(&(&dev_priv->mm.idle_work)->timer)", & __key___2); dev_priv->mm.idle_work.timer.function = & delayed_work_timer_fn; dev_priv->mm.idle_work.timer.data = (unsigned long )(& dev_priv->mm.idle_work); __init_waitqueue_head(& dev_priv->gpu_error.reset_queue, "&dev_priv->gpu_error.reset_queue", & __key___3); dev_priv->relative_constants_mode = 0; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 6U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { dev_priv->num_fence_regs = 32; } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { dev_priv->num_fence_regs = 16; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 10098U) { dev_priv->num_fence_regs = 16; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { dev_priv->num_fence_regs = 16; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 44UL) != 0U) { dev_priv->num_fence_regs = 16; } else { dev_priv->num_fence_regs = 8; } } } } } tmp___0 = intel_vgpu_active(dev); if ((int )tmp___0) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 491600L, 1); dev_priv->num_fence_regs = (int )tmp; } else { } INIT_LIST_HEAD(& dev_priv->mm.fence_list); i915_gem_restore_fences(dev); i915_gem_detect_bit_6_swizzle(dev); __init_waitqueue_head(& dev_priv->pending_flip_queue, "&dev_priv->pending_flip_queue", & __key___4); dev_priv->mm.interruptible = 1; i915_gem_shrinker_init(dev_priv); __mutex_init(& dev_priv->fb_tracking.lock, "&dev_priv->fb_tracking.lock", & __key___5); return; } } void i915_gem_release(struct drm_device *dev , struct drm_file *file ) { struct drm_i915_file_private *file_priv ; struct drm_i915_gem_request *request ; struct list_head const *__mptr ; int tmp ; struct drm_i915_private *tmp___0 ; struct drm_i915_private *tmp___1 ; int tmp___2 ; { file_priv = (struct drm_i915_file_private *)file->driver_priv; spin_lock(& file_priv->mm.lock); goto ldv_51476; ldv_51475: __mptr = (struct list_head const *)file_priv->mm.request_list.next; request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffa0UL; list_del(& request->client_list); request->file_priv = (struct drm_i915_file_private *)0; ldv_51476: tmp = list_empty((struct list_head const *)(& file_priv->mm.request_list)); if (tmp == 0) { goto ldv_51475; } else { } spin_unlock(& file_priv->mm.lock); tmp___2 = list_empty((struct list_head const *)(& file_priv->rps.link)); if (tmp___2 == 0) { tmp___0 = to_i915((struct drm_device const *)dev); spin_lock(& tmp___0->rps.client_lock); list_del(& file_priv->rps.link); tmp___1 = to_i915((struct drm_device const *)dev); spin_unlock(& tmp___1->rps.client_lock); } else { } return; } } int i915_gem_open(struct drm_device *dev , struct drm_file *file ) { struct drm_i915_file_private *file_priv ; int ret ; long tmp ; void *tmp___0 ; struct lock_class_key __key ; { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_gem_open", "\n"); } else { } tmp___0 = kzalloc(248UL, 208U); file_priv = (struct drm_i915_file_private *)tmp___0; if ((unsigned long )file_priv == (unsigned long )((struct drm_i915_file_private *)0)) { return (-12); } else { } file->driver_priv = (void *)file_priv; file_priv->dev_priv = (struct drm_i915_private *)dev->dev_private; file_priv->file = file; INIT_LIST_HEAD(& file_priv->rps.link); spinlock_check(& file_priv->mm.lock); __raw_spin_lock_init(& file_priv->mm.lock.__annonCompField18.rlock, "&(&file_priv->mm.lock)->rlock", & __key); INIT_LIST_HEAD(& file_priv->mm.request_list); ret = i915_gem_context_open(dev, file); if (ret != 0) { kfree((void const *)file_priv); } else { } return (ret); } } void i915_gem_track_fb(struct drm_i915_gem_object *old , struct drm_i915_gem_object *new , unsigned int frontbuffer_bits ) { int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; int __ret_warn_on___1 ; int tmp___2 ; long tmp___3 ; int __ret_warn_on___2 ; long tmp___4 ; { if ((unsigned long )old != (unsigned long )((struct drm_i915_gem_object *)0)) { tmp = mutex_is_locked(& (old->base.dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 5269, "WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = ((unsigned int )old->frontbuffer_bits & frontbuffer_bits) == 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 5270, "WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); old->frontbuffer_bits = (unsigned short )((int )old->frontbuffer_bits & ~ ((int )((unsigned short )frontbuffer_bits))); } else { } if ((unsigned long )new != (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___2 = mutex_is_locked(& (new->base.dev)->struct_mutex); __ret_warn_on___1 = tmp___2 == 0; tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 5275, "WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); __ret_warn_on___2 = ((unsigned int )new->frontbuffer_bits & frontbuffer_bits) != 0U; tmp___4 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 5276, "WARN_ON(new->frontbuffer_bits & frontbuffer_bits)"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); new->frontbuffer_bits = (unsigned short )((int )new->frontbuffer_bits | (int )((unsigned short )frontbuffer_bits)); } else { } return; } } unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o , struct i915_address_space *vm ) { struct drm_i915_private *dev_priv ; struct i915_vma *vma ; int __ret_warn_on ; long tmp ; struct list_head const *__mptr ; bool tmp___0 ; struct list_head const *__mptr___0 ; int __ret_warn_on___0 ; bool tmp___1 ; long tmp___2 ; { dev_priv = (struct drm_i915_private *)(o->base.dev)->dev_private; __ret_warn_on = (unsigned long )(& (dev_priv->mm.aliasing_ppgtt)->base) == (unsigned long )vm; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 5289, "WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __mptr = (struct list_head const *)o->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_51513; ldv_51512: tmp___0 = i915_is_ggtt(vma->vm); if ((int )tmp___0 && (unsigned int )vma->ggtt_view.type != 0U) { goto ldv_51511; } else { } if ((unsigned long )vma->vm == (unsigned long )vm) { return ((unsigned long )vma->node.start); } else { } ldv_51511: __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_51513: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& o->vma_list)) { goto ldv_51512; } else { } __ret_warn_on___0 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { tmp___1 = i915_is_ggtt(vm); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 5300, "%s vma for this object not found.\n", (int )tmp___1 ? (char *)"global" : (char *)"ppgtt"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); return (0xffffffffffffffffUL); } } unsigned long i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o , struct i915_ggtt_view const *view ) { struct i915_address_space *ggtt ; struct i915_vma *vma ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; int __ret_warn_on ; long tmp___0 ; { ggtt = & ((struct drm_i915_private *)(o->base.dev)->dev_private)->gtt.base; __mptr = (struct list_head const *)o->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_51528; ldv_51527: ; if ((unsigned long )vma->vm == (unsigned long )ggtt) { tmp = i915_ggtt_view_equal((struct i915_ggtt_view const *)(& vma->ggtt_view), view); if ((int )tmp) { return ((unsigned long )vma->node.start); } else { } } else { } __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_51528: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& o->vma_list)) { goto ldv_51527; } else { } __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 5316, "global vma for this object not found. (view=%u)\n", (unsigned int )view->type); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (0xffffffffffffffffUL); } } bool i915_gem_obj_bound(struct drm_i915_gem_object *o , struct i915_address_space *vm ) { struct i915_vma *vma ; struct list_head const *__mptr ; bool tmp ; bool tmp___0 ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)o->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_51543; ldv_51542: tmp = i915_is_ggtt(vma->vm); if ((int )tmp && (unsigned int )vma->ggtt_view.type != 0U) { goto ldv_51541; } else { } if ((unsigned long )vma->vm == (unsigned long )vm) { tmp___0 = drm_mm_node_allocated(& vma->node); if ((int )tmp___0) { return (1); } else { } } else { } ldv_51541: __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_51543: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& o->vma_list)) { goto ldv_51542; } else { } return (0); } } bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o , struct i915_ggtt_view const *view ) { struct i915_address_space *ggtt ; struct i915_vma *vma ; struct list_head const *__mptr ; bool tmp ; bool tmp___0 ; struct list_head const *__mptr___0 ; { ggtt = & ((struct drm_i915_private *)(o->base.dev)->dev_private)->gtt.base; __mptr = (struct list_head const *)o->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_51556; ldv_51555: ; if ((unsigned long )vma->vm == (unsigned long )ggtt) { tmp = i915_ggtt_view_equal((struct i915_ggtt_view const *)(& vma->ggtt_view), view); if ((int )tmp) { tmp___0 = drm_mm_node_allocated(& vma->node); if ((int )tmp___0) { return (1); } else { } } else { } } else { } __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_51556: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& o->vma_list)) { goto ldv_51555; } else { } return (0); } } bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o ) { struct i915_vma *vma ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)o->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_51567; ldv_51566: tmp = drm_mm_node_allocated(& vma->node); if ((int )tmp) { return (1); } else { } __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_51567: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& o->vma_list)) { goto ldv_51566; } else { } return (0); } } unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o , struct i915_address_space *vm ) { struct drm_i915_private *dev_priv ; struct i915_vma *vma ; int __ret_warn_on ; long tmp ; int tmp___0 ; long tmp___1 ; struct list_head const *__mptr ; bool tmp___2 ; struct list_head const *__mptr___0 ; { dev_priv = (struct drm_i915_private *)(o->base.dev)->dev_private; __ret_warn_on = (unsigned long )(& (dev_priv->mm.aliasing_ppgtt)->base) == (unsigned long )vm; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c", 5368, "WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = list_empty((struct list_head const *)(& o->vma_list)); tmp___1 = ldv__builtin_expect(tmp___0 != 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem.c"), "i" (5370), "i" (12UL)); ldv_51577: ; goto ldv_51577; } else { } __mptr = (struct list_head const *)o->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_51584; ldv_51583: tmp___2 = i915_is_ggtt(vma->vm); if ((int )tmp___2 && (unsigned int )vma->ggtt_view.type != 0U) { goto ldv_51582; } else { } if ((unsigned long )vma->vm == (unsigned long )vm) { return ((unsigned long )vma->node.size); } else { } ldv_51582: __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_51584: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& o->vma_list)) { goto ldv_51583; } else { } return (0UL); } } bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj ) { struct i915_vma *vma ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_51595; ldv_51594: ; if ((int )vma->pin_count > 0) { return (1); } else { } __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_51595: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_51594; } else { } return (0); } } extern int ldv_probe_139(void) ; void call_and_disable_work_3(struct work_struct *work ) { { if ((ldv_work_3_0 == 2 || ldv_work_3_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_0) { i915_gem_retire_work_handler(work); ldv_work_3_0 = 1; return; } else { } if ((ldv_work_3_1 == 2 || ldv_work_3_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_1) { i915_gem_retire_work_handler(work); ldv_work_3_1 = 1; return; } else { } if ((ldv_work_3_2 == 2 || ldv_work_3_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_2) { i915_gem_retire_work_handler(work); ldv_work_3_2 = 1; return; } else { } if ((ldv_work_3_3 == 2 || ldv_work_3_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_3) { i915_gem_retire_work_handler(work); ldv_work_3_3 = 1; return; } else { } return; } } void disable_work_3(struct work_struct *work ) { { if ((ldv_work_3_0 == 3 || ldv_work_3_0 == 2) && (unsigned long )ldv_work_struct_3_0 == (unsigned long )work) { ldv_work_3_0 = 1; } else { } if ((ldv_work_3_1 == 3 || ldv_work_3_1 == 2) && (unsigned long )ldv_work_struct_3_1 == (unsigned long )work) { ldv_work_3_1 = 1; } else { } if ((ldv_work_3_2 == 3 || ldv_work_3_2 == 2) && (unsigned long )ldv_work_struct_3_2 == (unsigned long )work) { ldv_work_3_2 = 1; } else { } if ((ldv_work_3_3 == 3 || ldv_work_3_3 == 2) && (unsigned long )ldv_work_struct_3_3 == (unsigned long )work) { ldv_work_3_3 = 1; } else { } return; } } void invoke_work_4(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_4_0 == 2 || ldv_work_4_0 == 3) { ldv_work_4_0 = 4; i915_gem_idle_work_handler(ldv_work_struct_4_0); ldv_work_4_0 = 1; } else { } goto ldv_51613; case 1: ; if (ldv_work_4_1 == 2 || ldv_work_4_1 == 3) { ldv_work_4_1 = 4; i915_gem_idle_work_handler(ldv_work_struct_4_0); ldv_work_4_1 = 1; } else { } goto ldv_51613; case 2: ; if (ldv_work_4_2 == 2 || ldv_work_4_2 == 3) { ldv_work_4_2 = 4; i915_gem_idle_work_handler(ldv_work_struct_4_0); ldv_work_4_2 = 1; } else { } goto ldv_51613; case 3: ; if (ldv_work_4_3 == 2 || ldv_work_4_3 == 3) { ldv_work_4_3 = 4; i915_gem_idle_work_handler(ldv_work_struct_4_0); ldv_work_4_3 = 1; } else { } goto ldv_51613; default: ldv_stop(); } ldv_51613: ; return; } } void call_and_disable_work_4(struct work_struct *work ) { { if ((ldv_work_4_0 == 2 || ldv_work_4_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_0) { i915_gem_idle_work_handler(work); ldv_work_4_0 = 1; return; } else { } if ((ldv_work_4_1 == 2 || ldv_work_4_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_1) { i915_gem_idle_work_handler(work); ldv_work_4_1 = 1; return; } else { } if ((ldv_work_4_2 == 2 || ldv_work_4_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_2) { i915_gem_idle_work_handler(work); ldv_work_4_2 = 1; return; } else { } if ((ldv_work_4_3 == 2 || ldv_work_4_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_3) { i915_gem_idle_work_handler(work); ldv_work_4_3 = 1; return; } else { } return; } } void activate_work_3(struct work_struct *work , int state ) { { if (ldv_work_3_0 == 0) { ldv_work_struct_3_0 = work; ldv_work_3_0 = state; return; } else { } if (ldv_work_3_1 == 0) { ldv_work_struct_3_1 = work; ldv_work_3_1 = state; return; } else { } if (ldv_work_3_2 == 0) { ldv_work_struct_3_2 = work; ldv_work_3_2 = state; return; } else { } if (ldv_work_3_3 == 0) { ldv_work_struct_3_3 = work; ldv_work_3_3 = state; return; } else { } return; } } void activate_pending_timer_22(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_22_0 == (unsigned long )timer) { if (ldv_timer_22_0 == 2 || pending_flag != 0) { ldv_timer_list_22_0 = timer; ldv_timer_list_22_0->data = data; ldv_timer_22_0 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_22_1 == (unsigned long )timer) { if (ldv_timer_22_1 == 2 || pending_flag != 0) { ldv_timer_list_22_1 = timer; ldv_timer_list_22_1->data = data; ldv_timer_22_1 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_22_2 == (unsigned long )timer) { if (ldv_timer_22_2 == 2 || pending_flag != 0) { ldv_timer_list_22_2 = timer; ldv_timer_list_22_2->data = data; ldv_timer_22_2 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_22_3 == (unsigned long )timer) { if (ldv_timer_22_3 == 2 || pending_flag != 0) { ldv_timer_list_22_3 = timer; ldv_timer_list_22_3->data = data; ldv_timer_22_3 = 1; } else { } return; } else { } activate_suitable_timer_22(timer, data); return; } } void activate_work_4(struct work_struct *work , int state ) { { if (ldv_work_4_0 == 0) { ldv_work_struct_4_0 = work; ldv_work_4_0 = state; return; } else { } if (ldv_work_4_1 == 0) { ldv_work_struct_4_1 = work; ldv_work_4_1 = state; return; } else { } if (ldv_work_4_2 == 0) { ldv_work_struct_4_2 = work; ldv_work_4_2 = state; return; } else { } if (ldv_work_4_3 == 0) { ldv_work_struct_4_3 = work; ldv_work_4_3 = state; return; } else { } return; } } void activate_suitable_timer_22(struct timer_list *timer , unsigned long data ) { { if (ldv_timer_22_0 == 0 || ldv_timer_22_0 == 2) { ldv_timer_list_22_0 = timer; ldv_timer_list_22_0->data = data; ldv_timer_22_0 = 1; return; } else { } if (ldv_timer_22_1 == 0 || ldv_timer_22_1 == 2) { ldv_timer_list_22_1 = timer; ldv_timer_list_22_1->data = data; ldv_timer_22_1 = 1; return; } else { } if (ldv_timer_22_2 == 0 || ldv_timer_22_2 == 2) { ldv_timer_list_22_2 = timer; ldv_timer_list_22_2->data = data; ldv_timer_22_2 = 1; return; } else { } if (ldv_timer_22_3 == 0 || ldv_timer_22_3 == 2) { ldv_timer_list_22_3 = timer; ldv_timer_list_22_3->data = data; ldv_timer_22_3 = 1; return; } else { } return; } } void ldv_initialize_drm_i915_gem_object_ops_139(void) { void *tmp ; { tmp = ldv_init_zalloc(592UL); i915_gem_phys_ops_group0 = (struct drm_i915_gem_object *)tmp; return; } } void invoke_work_3(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_3_0 == 2 || ldv_work_3_0 == 3) { ldv_work_3_0 = 4; i915_gem_retire_work_handler(ldv_work_struct_3_0); ldv_work_3_0 = 1; } else { } goto ldv_51649; case 1: ; if (ldv_work_3_1 == 2 || ldv_work_3_1 == 3) { ldv_work_3_1 = 4; i915_gem_retire_work_handler(ldv_work_struct_3_0); ldv_work_3_1 = 1; } else { } goto ldv_51649; case 2: ; if (ldv_work_3_2 == 2 || ldv_work_3_2 == 3) { ldv_work_3_2 = 4; i915_gem_retire_work_handler(ldv_work_struct_3_0); ldv_work_3_2 = 1; } else { } goto ldv_51649; case 3: ; if (ldv_work_3_3 == 2 || ldv_work_3_3 == 3) { ldv_work_3_3 = 4; i915_gem_retire_work_handler(ldv_work_struct_3_0); ldv_work_3_3 = 1; } else { } goto ldv_51649; default: ldv_stop(); } ldv_51649: ; return; } } void ldv_initialize_drm_i915_gem_object_ops_138(void) { void *tmp ; { tmp = ldv_init_zalloc(592UL); i915_gem_object_ops_group0 = (struct drm_i915_gem_object *)tmp; return; } } void call_and_disable_all_4(int state ) { { if (ldv_work_4_0 == state) { call_and_disable_work_4(ldv_work_struct_4_0); } else { } if (ldv_work_4_1 == state) { call_and_disable_work_4(ldv_work_struct_4_1); } else { } if (ldv_work_4_2 == state) { call_and_disable_work_4(ldv_work_struct_4_2); } else { } if (ldv_work_4_3 == state) { call_and_disable_work_4(ldv_work_struct_4_3); } else { } return; } } void choose_timer_22(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_timer_22_0 == 1) { ldv_timer_22_0 = 2; ldv_timer_22(ldv_timer_22_0, ldv_timer_list_22_0); } else { } goto ldv_51664; case 1: ; if (ldv_timer_22_1 == 1) { ldv_timer_22_1 = 2; ldv_timer_22(ldv_timer_22_1, ldv_timer_list_22_1); } else { } goto ldv_51664; case 2: ; if (ldv_timer_22_2 == 1) { ldv_timer_22_2 = 2; ldv_timer_22(ldv_timer_22_2, ldv_timer_list_22_2); } else { } goto ldv_51664; case 3: ; if (ldv_timer_22_3 == 1) { ldv_timer_22_3 = 2; ldv_timer_22(ldv_timer_22_3, ldv_timer_list_22_3); } else { } goto ldv_51664; default: ldv_stop(); } ldv_51664: ; return; } } void disable_work_4(struct work_struct *work ) { { if ((ldv_work_4_0 == 3 || ldv_work_4_0 == 2) && (unsigned long )ldv_work_struct_4_0 == (unsigned long )work) { ldv_work_4_0 = 1; } else { } if ((ldv_work_4_1 == 3 || ldv_work_4_1 == 2) && (unsigned long )ldv_work_struct_4_1 == (unsigned long )work) { ldv_work_4_1 = 1; } else { } if ((ldv_work_4_2 == 3 || ldv_work_4_2 == 2) && (unsigned long )ldv_work_struct_4_2 == (unsigned long )work) { ldv_work_4_2 = 1; } else { } if ((ldv_work_4_3 == 3 || ldv_work_4_3 == 2) && (unsigned long )ldv_work_struct_4_3 == (unsigned long )work) { ldv_work_4_3 = 1; } else { } return; } } void work_init_4(void) { { ldv_work_4_0 = 0; ldv_work_4_1 = 0; ldv_work_4_2 = 0; ldv_work_4_3 = 0; return; } } void disable_suitable_timer_22(struct timer_list *timer ) { { if (ldv_timer_22_0 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_22_0) { ldv_timer_22_0 = 0; return; } else { } if (ldv_timer_22_1 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_22_1) { ldv_timer_22_1 = 0; return; } else { } if (ldv_timer_22_2 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_22_2) { ldv_timer_22_2 = 0; return; } else { } if (ldv_timer_22_3 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_22_3) { ldv_timer_22_3 = 0; return; } else { } return; } } void call_and_disable_all_3(int state ) { { if (ldv_work_3_0 == state) { call_and_disable_work_3(ldv_work_struct_3_0); } else { } if (ldv_work_3_1 == state) { call_and_disable_work_3(ldv_work_struct_3_1); } else { } if (ldv_work_3_2 == state) { call_and_disable_work_3(ldv_work_struct_3_2); } else { } if (ldv_work_3_3 == state) { call_and_disable_work_3(ldv_work_struct_3_3); } else { } return; } } void work_init_3(void) { { ldv_work_3_0 = 0; ldv_work_3_1 = 0; ldv_work_3_2 = 0; ldv_work_3_3 = 0; return; } } void ldv_timer_22(int state , struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; fake_irq(timer->data); LDV_IN_INTERRUPT = 1; return; } } void timer_init_22(void) { { ldv_timer_22_0 = 0; ldv_timer_22_1 = 0; ldv_timer_22_2 = 0; ldv_timer_22_3 = 0; return; } } int reg_timer_22(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) { { if ((unsigned long )function == (unsigned long )(& fake_irq)) { activate_suitable_timer_22(timer, data); } else { } return (0); } } void ldv_main_exported_139(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_139 == 2) { i915_gem_object_release_phys(i915_gem_phys_ops_group0); ldv_state_variable_139 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51701; case 1: ; if (ldv_state_variable_139 == 1) { i915_gem_object_put_pages_phys(i915_gem_phys_ops_group0); ldv_state_variable_139 = 1; } else { } if (ldv_state_variable_139 == 2) { i915_gem_object_put_pages_phys(i915_gem_phys_ops_group0); ldv_state_variable_139 = 2; } else { } goto ldv_51701; case 2: ; if (ldv_state_variable_139 == 1) { i915_gem_object_get_pages_phys(i915_gem_phys_ops_group0); ldv_state_variable_139 = 1; } else { } if (ldv_state_variable_139 == 2) { i915_gem_object_get_pages_phys(i915_gem_phys_ops_group0); ldv_state_variable_139 = 2; } else { } goto ldv_51701; case 3: ; if (ldv_state_variable_139 == 1) { ldv_probe_139(); ldv_state_variable_139 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_51701; default: ldv_stop(); } ldv_51701: ; return; } } void ldv_main_exported_138(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_138 == 1) { i915_gem_object_put_pages_gtt(i915_gem_object_ops_group0); ldv_state_variable_138 = 1; } else { } goto ldv_51710; case 1: ; if (ldv_state_variable_138 == 1) { i915_gem_object_get_pages_gtt(i915_gem_object_ops_group0); ldv_state_variable_138 = 1; } else { } goto ldv_51710; default: ldv_stop(); } ldv_51710: ; return; } } bool ldv_queue_work_on_291(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_292(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_293(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_294(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_295(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_mod_timer_296(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___9 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_21(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_del_timer_sync_297(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___10 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_21(ldv_func_arg1); return (ldv_func_res); } } bool ldv_cancel_delayed_work_sync_298(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_cancel_delayed_work_sync_299(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___6 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_flush_delayed_work_300(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___7 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_315(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_317(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_316(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_319(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_318(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void kref_get___6(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___6(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___6(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___6(kref, 1U, release); return (tmp); } } __inline static int sigismember(sigset_t *set , int _sig ) { unsigned long sig ; { sig = (unsigned long )(_sig + -1); return ((int )(set->sig[0] >> (int )sig) & 1); } } extern long schedule_timeout_killable(long ) ; __inline static int signal_pending___0(struct task_struct *p ) { int tmp ; long tmp___0 ; { tmp = test_tsk_thread_flag(p, 2); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); return ((int )tmp___0); } } __inline static int __fatal_signal_pending(struct task_struct *p ) { int tmp ; long tmp___0 ; { tmp = sigismember(& p->pending.signal, 9); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); return ((int )tmp___0); } } __inline static int fatal_signal_pending(struct task_struct *p ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp = signal_pending___0(p); if (tmp != 0) { tmp___0 = __fatal_signal_pending(p); if (tmp___0 != 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } return (tmp___1); } } extern int register_oom_notifier(struct notifier_block * ) ; extern int register_shrinker(struct shrinker * ) ; __inline static void drm_gem_object_reference___3(struct drm_gem_object *obj ) { { kref_get___6(& obj->refcount); return; } } __inline static void drm_gem_object_unreference___6(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___6(& obj->refcount, & drm_gem_object_free); } else { } return; } } static bool mutex_is_locked_by(struct mutex *mutex , struct task_struct *task ) { int tmp ; { tmp = mutex_is_locked(mutex); if (tmp == 0) { return (0); } else { } return ((unsigned long )mutex->owner == (unsigned long )task); } } unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv , long target , unsigned int flags ) { struct __anonstruct_phases_453 phases[3U] ; struct __anonstruct_phase_454 const *phase ; unsigned long count ; struct list_head still_in_list ; struct drm_i915_gem_object *obj ; struct i915_vma *vma ; struct i915_vma *v ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp ; struct list_head const *__mptr___2 ; int tmp___0 ; int tmp___1 ; { phases[0].list = & dev_priv->mm.unbound_list; phases[0].bit = 2U; phases[1].list = & dev_priv->mm.bound_list; phases[1].bit = 4U; phases[2].list = (struct list_head *)0; phases[2].bit = 0U; count = 0UL; phase = (struct __anonstruct_456 const *)(& phases); goto ldv_49524; ldv_49523: ; if (((unsigned int )phase->bit & flags) == 0U) { goto ldv_49505; } else { } INIT_LIST_HEAD(& still_in_list); goto ldv_49511; ldv_49521: __mptr = (struct list_head const *)(phase->list)->next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; list_move_tail(& obj->global_list, & still_in_list); if ((int )flags & 1 && (unsigned int )*((unsigned char *)obj + 409UL) != 16U) { goto ldv_49511; } else { } drm_gem_object_reference___3(& obj->base); __mptr___0 = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; __mptr___1 = (struct list_head const *)vma->vma_link.next; v = (struct i915_vma *)__mptr___1 + 0xffffffffffffff58UL; goto ldv_49520; ldv_49519: tmp = i915_vma_unbind(vma); if (tmp != 0) { goto ldv_49518; } else { } vma = v; __mptr___2 = (struct list_head const *)v->vma_link.next; v = (struct i915_vma *)__mptr___2 + 0xffffffffffffff58UL; ldv_49520: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_49519; } else { } ldv_49518: tmp___0 = i915_gem_object_put_pages(obj); if (tmp___0 == 0) { count = (obj->base.size >> 12) + count; } else { } drm_gem_object_unreference___6(& obj->base); ldv_49511: ; if ((unsigned long )target > count) { tmp___1 = list_empty((struct list_head const *)phase->list); if (tmp___1 == 0) { goto ldv_49521; } else { goto ldv_49522; } } else { } ldv_49522: list_splice((struct list_head const *)(& still_in_list), phase->list); ldv_49505: phase = phase + 1; ldv_49524: ; if ((unsigned long )phase->list != (unsigned long )((struct list_head */* const */)0)) { goto ldv_49523; } else { } return (count); } } unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv ) { unsigned long tmp ; { i915_gem_evict_everything(dev_priv->dev); tmp = i915_gem_shrink(dev_priv, 9223372036854775807L, 6U); return (tmp); } } static bool i915_gem_shrinker_lock(struct drm_device *dev , bool *unlock ) { struct task_struct *tmp ; bool tmp___0 ; int tmp___1 ; struct drm_i915_private *tmp___2 ; int tmp___3 ; { tmp___3 = mutex_trylock(& dev->struct_mutex); if (tmp___3 == 0) { tmp = get_current(); tmp___0 = mutex_is_locked_by(& dev->struct_mutex, tmp); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = to_i915((struct drm_device const *)dev); if ((int )tmp___2->mm.shrinker_no_lock_stealing) { return (0); } else { } *unlock = 0; } else { *unlock = 1; } return (1); } } static int num_vma_bound(struct drm_i915_gem_object *obj ) { struct i915_vma *vma ; int count ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; { count = 0; __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; goto ldv_49543; ldv_49542: tmp = drm_mm_node_allocated(& vma->node); if ((int )tmp) { count = count + 1; } else { } if ((unsigned int )*((unsigned char *)vma + 232UL) != 0U) { count = count + 1; } else { } __mptr___0 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; ldv_49543: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_49542; } else { } return (count); } } static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker , struct shrink_control *sc ) { struct drm_i915_private *dev_priv ; struct shrinker const *__mptr ; struct drm_device *dev ; struct drm_i915_gem_object *obj ; unsigned long count ; bool unlock ; bool tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; int tmp___1 ; struct list_head const *__mptr___3 ; { __mptr = (struct shrinker const *)shrinker; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff5478UL; dev = dev_priv->dev; tmp = i915_gem_shrinker_lock(dev, & unlock); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0UL); } else { } count = 0UL; __mptr___0 = (struct list_head const *)dev_priv->mm.unbound_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; goto ldv_49561; ldv_49560: ; if (obj->pages_pin_count == 0) { count = (obj->base.size >> 12) + count; } else { } __mptr___1 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___1 + 0xfffffffffffffee8UL; ldv_49561: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.unbound_list)) { goto ldv_49560; } else { } __mptr___2 = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr___2 + 0xfffffffffffffee8UL; goto ldv_49568; ldv_49567: tmp___1 = num_vma_bound(obj); if (obj->pages_pin_count == tmp___1) { count = (obj->base.size >> 12) + count; } else { } __mptr___3 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___3 + 0xfffffffffffffee8UL; ldv_49568: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_49567; } else { } if ((int )unlock) { mutex_unlock(& dev->struct_mutex); } else { } return (count); } } static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker , struct shrink_control *sc ) { struct drm_i915_private *dev_priv ; struct shrinker const *__mptr ; struct drm_device *dev ; unsigned long freed ; bool unlock ; bool tmp ; int tmp___0 ; unsigned long tmp___1 ; { __mptr = (struct shrinker const *)shrinker; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff5478UL; dev = dev_priv->dev; tmp = i915_gem_shrinker_lock(dev, & unlock); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0xffffffffffffffffUL); } else { } freed = i915_gem_shrink(dev_priv, (long )sc->nr_to_scan, 7U); if (sc->nr_to_scan > freed) { tmp___1 = i915_gem_shrink(dev_priv, (long )(sc->nr_to_scan - freed), 6U); freed = tmp___1 + freed; } else { } if ((int )unlock) { mutex_unlock(& dev->struct_mutex); } else { } return (freed); } } static int i915_gem_shrinker_oom(struct notifier_block *nb , unsigned long event , void *ptr ) { struct drm_i915_private *dev_priv ; struct notifier_block const *__mptr ; struct drm_device *dev ; struct drm_i915_gem_object *obj ; unsigned long timeout ; unsigned long tmp ; unsigned long pinned ; unsigned long bound ; unsigned long unbound ; unsigned long freed_pages ; bool was_interruptible ; bool unlock ; struct task_struct *tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; { __mptr = (struct notifier_block const *)nb; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff5490UL; dev = dev_priv->dev; tmp = msecs_to_jiffies(5000U); timeout = tmp + 1UL; goto ldv_49598; ldv_49597: schedule_timeout_killable(1L); tmp___0 = get_current(); tmp___1 = fatal_signal_pending(tmp___0); if (tmp___1 != 0) { return (0); } else { } ldv_49598: tmp___2 = i915_gem_shrinker_lock(dev, & unlock); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { timeout = timeout - 1UL; if (timeout != 0UL) { goto ldv_49597; } else { goto ldv_49599; } } else { } ldv_49599: ; if (timeout == 0UL) { printk("\vUnable to purge GPU memory due lock contention.\n"); return (0); } else { } was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = 0; freed_pages = i915_gem_shrink_all(dev_priv); dev_priv->mm.interruptible = was_interruptible; pinned = 0UL; bound = pinned; unbound = bound; __mptr___0 = (struct list_head const *)dev_priv->mm.unbound_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; goto ldv_49606; ldv_49605: ; if ((unsigned long )obj->base.filp == (unsigned long )((struct file *)0)) { goto ldv_49604; } else { } if (obj->pages_pin_count != 0) { pinned = obj->base.size + pinned; } else { unbound = obj->base.size + unbound; } ldv_49604: __mptr___1 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___1 + 0xfffffffffffffee8UL; ldv_49606: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.unbound_list)) { goto ldv_49605; } else { } __mptr___2 = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr___2 + 0xfffffffffffffee8UL; goto ldv_49614; ldv_49613: ; if ((unsigned long )obj->base.filp == (unsigned long )((struct file *)0)) { goto ldv_49612; } else { } if (obj->pages_pin_count != 0) { pinned = obj->base.size + pinned; } else { bound = obj->base.size + bound; } ldv_49612: __mptr___3 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___3 + 0xfffffffffffffee8UL; ldv_49614: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_49613; } else { } if ((int )unlock) { mutex_unlock(& dev->struct_mutex); } else { } if ((freed_pages != 0UL || unbound != 0UL) || bound != 0UL) { printk("\016Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", freed_pages << 12, pinned); } else { } if (unbound != 0UL || bound != 0UL) { printk("\v%lu and %lu bytes still available in the bound and unbound GPU page lists.\n", bound, unbound); } else { } *((unsigned long *)ptr) = *((unsigned long *)ptr) + freed_pages; return (0); } } void i915_gem_shrinker_init(struct drm_i915_private *dev_priv ) { { dev_priv->mm.shrinker.scan_objects = & i915_gem_shrinker_scan; dev_priv->mm.shrinker.count_objects = & i915_gem_shrinker_count; dev_priv->mm.shrinker.seeks = 2; register_shrinker(& dev_priv->mm.shrinker); dev_priv->mm.oom_notifier.notifier_call = & i915_gem_shrinker_oom; register_oom_notifier(& dev_priv->mm.oom_notifier); return; } } bool ldv_queue_work_on_315(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_316(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_317(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_318(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_319(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; bool ldv_queue_work_on_329(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_331(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_330(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_333(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_332(struct workqueue_struct *ldv_func_arg1 ) ; extern struct resource iomem_resource ; extern struct resource *__devm_request_region(struct device * , struct resource * , resource_size_t , resource_size_t , char const * ) ; __inline static int kref_sub___7(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___7(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___7(kref, 1U, release); return (tmp); } } extern int pci_bus_read_config_dword(struct pci_bus * , unsigned int , int , u32 * ) ; __inline static int pci_read_config_dword(struct pci_dev const *dev , int where , u32 *val ) { int tmp ; { tmp = pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); return (tmp); } } extern int drm_mm_insert_node_generic(struct drm_mm * , struct drm_mm_node * , u64 , unsigned int , unsigned long , enum drm_mm_search_flags , enum drm_mm_allocator_flags ) ; __inline static int drm_mm_insert_node(struct drm_mm *mm , struct drm_mm_node *node , u64 size , unsigned int alignment , enum drm_mm_search_flags flags ) { int tmp ; { tmp = drm_mm_insert_node_generic(mm, node, size, alignment, 0UL, flags, 0); return (tmp); } } __inline static void drm_gem_object_unreference___7(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___7(& obj->refcount, & drm_gem_object_free); } else { } return; } } int i915_gem_init_stolen(struct drm_device *dev ) ; int i915_gem_stolen_setup_compression(struct drm_device *dev , int size , int fb_cpp ) ; void i915_gem_stolen_cleanup_compression(struct drm_device *dev ) ; void i915_gem_cleanup_stolen(struct drm_device *dev ) ; static unsigned long i915_stolen_to_physical(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct resource *r ; u32 base ; struct drm_i915_private *__p ; struct __anonstruct_stolen_440 stolen[2U] ; u64 gtt_start ; u64 gtt_end ; uint32_t tmp ; struct drm_i915_private *__p___0 ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; base = 0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 2U) { pci_read_config_dword((struct pci_dev const *)dev->pdev, 92, & base); base = base & 4293918720U; } else { } if (base == 0U) { return (0UL); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) <= 4U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 44UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) == 0U) { stolen[0].start = base; stolen[0].end = (unsigned int )dev_priv->gtt.stolen_size + base; stolen[1].start = base; stolen[1].end = (unsigned int )dev_priv->gtt.stolen_size + base; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8224L, 1); gtt_start = (u64 )tmp; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 4U) { gtt_start = (gtt_start & 4294963200ULL) | ((gtt_start & 240ULL) << 28); } else { gtt_start = gtt_start & 4294963200ULL; } gtt_end = (unsigned long long )((dev_priv->gtt.base.total >> 12) * 4UL) + gtt_start; if ((u64 )stolen[0].start <= gtt_start && (u64 )stolen[0].end > gtt_start) { stolen[0].end = (u32 )gtt_start; } else { } if ((u64 )stolen[1].start < gtt_end && (u64 )stolen[1].end >= gtt_end) { stolen[1].start = (u32 )gtt_end; } else { } if (stolen[0].end - stolen[0].start > stolen[1].end - stolen[1].start) { base = stolen[0].start; dev_priv->gtt.stolen_size = (size_t )(stolen[0].end - stolen[0].start); } else { base = stolen[1].start; dev_priv->gtt.stolen_size = (size_t )(stolen[1].end - stolen[1].start); } if (stolen[0].start != stolen[1].start || stolen[0].end != stolen[1].end) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_stolen_to_physical", "GTT within stolen memory at 0x%llx-0x%llx\n", gtt_start, gtt_end - 1ULL); } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i915_stolen_to_physical", "Stolen memory adjusted to 0x%x-0x%x\n", base, ((u32 )dev_priv->gtt.stolen_size + base) - 1U); } else { } } else { } } else { } } else { } } else { } r = __devm_request_region(dev->dev, & iomem_resource, (resource_size_t )base, (resource_size_t )dev_priv->gtt.stolen_size, "Graphics Stolen Memory"); if ((unsigned long )r == (unsigned long )((struct resource *)0)) { r = __devm_request_region(dev->dev, & iomem_resource, (resource_size_t )(base + 1U), (resource_size_t )(dev_priv->gtt.stolen_size - 1UL), "Graphics Stolen Memory"); if ((unsigned long )r == (unsigned long )((struct resource *)0)) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) != 3U) { drm_err("conflict detected with stolen region: [0x%08x - 0x%08x]\n", base, (u32 )dev_priv->gtt.stolen_size + base); base = 0U; } else { } } else { } } else { } return ((unsigned long )base); } } static int find_compression_threshold(struct drm_device *dev , struct drm_mm_node *node , int size , int fb_cpp ) { struct drm_i915_private *dev_priv ; int compression_threshold ; int ret ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; compression_threshold = 1; size = size << 1; ret = drm_mm_insert_node(& dev_priv->mm.stolen, node, (u64 )size, 4096U, 0); if (ret == 0) { return (compression_threshold); } else { } again: ; if (compression_threshold > 4 || (fb_cpp == 2 && compression_threshold == 2)) { return (0); } else { } size = size >> 1; ret = drm_mm_insert_node(& dev_priv->mm.stolen, node, (u64 )size, 4096U, 0); if (ret != 0) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { return (0); } else { goto _L; } } else _L: /* CIL Label */ if (ret != 0) { compression_threshold = compression_threshold << 1; goto again; } else { return (compression_threshold); } } } static int i915_setup_compression(struct drm_device *dev , int size , int fb_cpp ) { struct drm_i915_private *dev_priv ; struct drm_mm_node *compressed_llb ; int ret ; void *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; long tmp___0 ; bool __print_once ; { dev_priv = (struct drm_i915_private *)dev->dev_private; compressed_llb = compressed_llb; ret = find_compression_threshold(dev, & dev_priv->fbc.compressed_fb, size, fb_cpp); if (ret == 0) { goto err_llb; } else if (ret > 1) { printk("\016[drm] Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); } else { } dev_priv->fbc.threshold = (unsigned int )ret; __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) > 4U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 274944L, (uint32_t )dev_priv->fbc.compressed_fb.start, 1); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 10818U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12800L, (uint32_t )dev_priv->fbc.compressed_fb.start, 1); } else { tmp = kzalloc(72UL, 208U); compressed_llb = (struct drm_mm_node *)tmp; if ((unsigned long )compressed_llb == (unsigned long )((struct drm_mm_node *)0)) { goto err_fb; } else { } ret = drm_mm_insert_node(& dev_priv->mm.stolen, compressed_llb, 4096ULL, 4096U, 0); if (ret != 0) { goto err_fb; } else { } dev_priv->fbc.compressed_llb = compressed_llb; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12800L, (uint32_t )dev_priv->mm.stolen_base + (uint32_t )dev_priv->fbc.compressed_fb.start, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12804L, (uint32_t )dev_priv->mm.stolen_base + (uint32_t )compressed_llb->start, 1); } } dev_priv->fbc.uncompressed_size = (unsigned long )size; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_setup_compression", "reserved %d bytes of contiguous stolen space for FBC\n", size); } else { } return (0); err_fb: kfree((void const *)compressed_llb); drm_mm_remove_node(& dev_priv->fbc.compressed_fb); err_llb: ; if (! __print_once) { __print_once = 1; printk("\016drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); } else { } return (-28); } } int i915_gem_stolen_setup_compression(struct drm_device *dev , int size , int fb_cpp ) { struct drm_i915_private *dev_priv ; bool tmp ; int tmp___0 ; int tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = drm_mm_initialized(& dev_priv->mm.stolen); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-19); } else { } if ((unsigned long )size <= dev_priv->fbc.uncompressed_size) { return (0); } else { } i915_gem_stolen_cleanup_compression(dev); tmp___1 = i915_setup_compression(dev, size, fb_cpp); return (tmp___1); } } void i915_gem_stolen_cleanup_compression(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (dev_priv->fbc.uncompressed_size == 0UL) { return; } else { } drm_mm_remove_node(& dev_priv->fbc.compressed_fb); if ((unsigned long )dev_priv->fbc.compressed_llb != (unsigned long )((struct drm_mm_node *)0)) { drm_mm_remove_node(dev_priv->fbc.compressed_llb); kfree((void const *)dev_priv->fbc.compressed_llb); } else { } dev_priv->fbc.uncompressed_size = 0UL; return; } } void i915_gem_cleanup_stolen(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; bool tmp ; int tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = drm_mm_initialized(& dev_priv->mm.stolen); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } i915_gem_stolen_cleanup_compression(dev); drm_mm_takedown(& dev_priv->mm.stolen); return; } } int i915_gem_init_stolen(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 tmp ; int bios_reserved ; struct drm_i915_private *__p ; long tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; bios_reserved = 0; if (intel_iommu_gfx_mapped != 0) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 7U) { printk("\016[drm] DMAR active, disabling use of stolen memory\n"); return (0); } else { } } else { } if (dev_priv->gtt.stolen_size == 0UL) { return (0); } else { } dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); if (dev_priv->mm.stolen_base == 0UL) { return (0); } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_gem_init_stolen", "found %zd bytes of stolen memory at %08lx\n", dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 7U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1082048L, 1); tmp = tmp >> 7; tmp = tmp & 3U; bios_reserved = 1048576 << (int )tmp; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 7U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1082048L, 1); bios_reserved = (tmp & 32U) != 0U ? 262144 : 1048576; } else { } } __ret_warn_on = (size_t )bios_reserved > dev_priv->gtt.stolen_size; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_stolen.c", 327, "WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (0); } else { } drm_mm_init(& dev_priv->mm.stolen, 0ULL, (u64 )(dev_priv->gtt.stolen_size - (size_t )bios_reserved)); return (0); } } static struct sg_table *i915_pages_create_for_stolen(struct drm_device *dev , u32 offset , u32 size ) { struct drm_i915_private *dev_priv ; struct sg_table *st ; struct scatterlist *sg ; long tmp ; long tmp___0 ; void *tmp___1 ; int tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_pages_create_for_stolen", "offset=0x%x, size=%d\n", offset, size); } else { } tmp___0 = ldv__builtin_expect((size_t )offset > dev_priv->gtt.stolen_size - (size_t )size, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_stolen.c"), "i" (346), "i" (12UL)); ldv_48116: ; goto ldv_48116; } else { } tmp___1 = kmalloc(16UL, 208U); st = (struct sg_table *)tmp___1; if ((unsigned long )st == (unsigned long )((struct sg_table *)0)) { return ((struct sg_table *)0); } else { } tmp___2 = sg_alloc_table(st, 1U, 208U); if (tmp___2 != 0) { kfree((void const *)st); return ((struct sg_table *)0); } else { } sg = st->sgl; sg->offset = 0U; sg->length = size; sg->dma_address = (unsigned long long )dev_priv->mm.stolen_base + (unsigned long long )offset; sg->dma_length = size; return (st); } } static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj ) { { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_stolen.c"), "i" (374), "i" (12UL)); ldv_48120: ; goto ldv_48120; return (-22); } } static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj ) { { sg_free_table(obj->pages); kfree((void const *)obj->pages); return; } } static void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj ) { { if ((unsigned long )obj->stolen != (unsigned long )((struct drm_mm_node *)0)) { drm_mm_remove_node(obj->stolen); kfree((void const *)obj->stolen); obj->stolen = (struct drm_mm_node *)0; } else { } return; } } static struct drm_i915_gem_object_ops const i915_gem_object_stolen_ops = {& i915_gem_object_get_pages_stolen, & i915_gem_object_put_pages_stolen, 0, & i915_gem_object_release_stolen}; static struct drm_i915_gem_object *_i915_gem_object_create_stolen(struct drm_device *dev , struct drm_mm_node *stolen ) { struct drm_i915_gem_object *obj ; void *tmp ; struct drm_i915_private *__p ; { tmp = i915_gem_object_alloc(dev); obj = (struct drm_i915_gem_object *)tmp; if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return ((struct drm_i915_gem_object *)0); } else { } drm_gem_private_object_init(dev, & obj->base, (size_t )stolen->size); i915_gem_object_init(obj, & i915_gem_object_stolen_ops); obj->pages = i915_pages_create_for_stolen(dev, (u32 )stolen->start, (u32 )stolen->size); if ((unsigned long )obj->pages == (unsigned long )((struct sg_table *)0)) { goto cleanup; } else { } obj->has_dma_mapping = 1U; i915_gem_object_pin_pages(obj); obj->stolen = stolen; obj->base.read_domains = 65U; __p = to_i915((struct drm_device const *)dev); obj->cache_level = (unsigned int )*((unsigned char *)__p + 46UL) != 0U; return (obj); cleanup: i915_gem_object_free(obj); return ((struct drm_i915_gem_object *)0); } } struct drm_i915_gem_object *i915_gem_object_create_stolen(struct drm_device *dev , u32 size ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; struct drm_mm_node *stolen ; int ret ; bool tmp ; int tmp___0 ; long tmp___1 ; void *tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = drm_mm_initialized(& dev_priv->mm.stolen); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return ((struct drm_i915_gem_object *)0); } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i915_gem_object_create_stolen", "creating stolen object: size=%x\n", size); } else { } if (size == 0U) { return ((struct drm_i915_gem_object *)0); } else { } tmp___2 = kzalloc(72UL, 208U); stolen = (struct drm_mm_node *)tmp___2; if ((unsigned long )stolen == (unsigned long )((struct drm_mm_node *)0)) { return ((struct drm_i915_gem_object *)0); } else { } ret = drm_mm_insert_node(& dev_priv->mm.stolen, stolen, (u64 )size, 4096U, 0); if (ret != 0) { kfree((void const *)stolen); return ((struct drm_i915_gem_object *)0); } else { } obj = _i915_gem_object_create_stolen(dev, stolen); if ((unsigned long )obj != (unsigned long )((struct drm_i915_gem_object *)0)) { return (obj); } else { } drm_mm_remove_node(stolen); kfree((void const *)stolen); return ((struct drm_i915_gem_object *)0); } } struct drm_i915_gem_object *i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev , u32 stolen_offset , u32 gtt_offset , u32 size ) { struct drm_i915_private *dev_priv ; struct i915_address_space *ggtt ; struct drm_i915_gem_object *obj ; struct drm_mm_node *stolen ; struct i915_vma *vma ; int ret ; bool tmp ; int tmp___0 ; long tmp___1 ; int __ret_warn_on ; long tmp___2 ; long tmp___3 ; int __ret_warn_on___0 ; long tmp___4 ; long tmp___5 ; int __ret_warn_on___1 ; long tmp___6 ; long tmp___7 ; void *tmp___8 ; long tmp___9 ; long tmp___10 ; long tmp___11 ; bool tmp___12 ; long tmp___13 ; bool tmp___14 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ggtt = & dev_priv->gtt.base; tmp = drm_mm_initialized(& dev_priv->mm.stolen); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return ((struct drm_i915_gem_object *)0); } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i915_gem_object_create_stolen_for_preallocated", "creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", stolen_offset, gtt_offset, size); } else { } __ret_warn_on = size == 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_stolen.c", 488, "WARN_ON(size == 0)"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { return ((struct drm_i915_gem_object *)0); } else { __ret_warn_on___0 = (size & 4095U) != 0U; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_stolen.c", 488, "WARN_ON(size & 4095)"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { return ((struct drm_i915_gem_object *)0); } else { __ret_warn_on___1 = (stolen_offset & 4095U) != 0U; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_stolen.c", 489, "WARN_ON(stolen_offset & 4095)"); } else { } tmp___7 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___7 != 0L) { return ((struct drm_i915_gem_object *)0); } else { } } } tmp___8 = kzalloc(72UL, 208U); stolen = (struct drm_mm_node *)tmp___8; if ((unsigned long )stolen == (unsigned long )((struct drm_mm_node *)0)) { return ((struct drm_i915_gem_object *)0); } else { } stolen->start = (u64 )stolen_offset; stolen->size = (u64 )size; ret = drm_mm_reserve_node(& dev_priv->mm.stolen, stolen); if (ret != 0) { tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("i915_gem_object_create_stolen_for_preallocated", "failed to allocate stolen space\n"); } else { } kfree((void const *)stolen); return ((struct drm_i915_gem_object *)0); } else { } obj = _i915_gem_object_create_stolen(dev, stolen); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("i915_gem_object_create_stolen_for_preallocated", "failed to allocate stolen object\n"); } else { } drm_mm_remove_node(stolen); kfree((void const *)stolen); return ((struct drm_i915_gem_object *)0); } else { } if (gtt_offset == 4294967295U) { return (obj); } else { } vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); tmp___12 = IS_ERR((void const *)vma); if ((int )tmp___12) { tmp___11 = PTR_ERR((void const *)vma); ret = (int )tmp___11; goto err_out; } else { } vma->node.start = (u64 )gtt_offset; vma->node.size = (u64 )size; tmp___14 = drm_mm_initialized(& ggtt->mm); if ((int )tmp___14) { ret = drm_mm_reserve_node(& ggtt->mm, & vma->node); if (ret != 0) { tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("i915_gem_object_create_stolen_for_preallocated", "failed to allocate stolen GTT space\n"); } else { } goto err_vma; } else { } } else { } vma->bound = (unsigned char )((unsigned int )vma->bound | 1U); list_add_tail(& obj->global_list, & dev_priv->mm.bound_list); list_add_tail(& vma->mm_list, & ggtt->inactive_list); i915_gem_object_pin_pages(obj); return (obj); err_vma: i915_gem_vma_destroy(vma); err_out: drm_mm_remove_node(stolen); kfree((void const *)stolen); drm_gem_object_unreference___7(& obj->base); return ((struct drm_i915_gem_object *)0); } } extern int ldv_probe_137(void) ; void ldv_initialize_drm_i915_gem_object_ops_137(void) { void *tmp ; { tmp = ldv_init_zalloc(592UL); i915_gem_object_stolen_ops_group0 = (struct drm_i915_gem_object *)tmp; return; } } void ldv_main_exported_137(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_137 == 2) { i915_gem_object_release_stolen(i915_gem_object_stolen_ops_group0); ldv_state_variable_137 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48179; case 1: ; if (ldv_state_variable_137 == 1) { i915_gem_object_put_pages_stolen(i915_gem_object_stolen_ops_group0); ldv_state_variable_137 = 1; } else { } if (ldv_state_variable_137 == 2) { i915_gem_object_put_pages_stolen(i915_gem_object_stolen_ops_group0); ldv_state_variable_137 = 2; } else { } goto ldv_48179; case 2: ; if (ldv_state_variable_137 == 1) { i915_gem_object_get_pages_stolen(i915_gem_object_stolen_ops_group0); ldv_state_variable_137 = 1; } else { } if (ldv_state_variable_137 == 2) { i915_gem_object_get_pages_stolen(i915_gem_object_stolen_ops_group0); ldv_state_variable_137 = 2; } else { } goto ldv_48179; case 3: ; if (ldv_state_variable_137 == 1) { ldv_probe_137(); ldv_state_variable_137 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48179; default: ldv_stop(); } ldv_48179: ; return; } } bool ldv_queue_work_on_329(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_330(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_331(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_332(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_333(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void __set_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile ("bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static void __clear_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile ("btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } __inline static int __atomic_add_unless___1(atomic_t *v , int a , int u ) { int c ; int old ; long tmp ; long tmp___0 ; { c = atomic_read((atomic_t const *)v); ldv_5708: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5707; } else { } old = atomic_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5707; } else { } c = old; goto ldv_5708; ldv_5707: ; return (c); } } __inline static int atomic_add_unless___1(atomic_t *v , int a , int u ) { int tmp ; { tmp = __atomic_add_unless___1(v, a, u); return (tmp != u); } } bool ldv_queue_work_on_343(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_345(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_344(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_347(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_346(struct workqueue_struct *ldv_func_arg1 ) ; __inline static int kref_sub___8(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___8(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___8(kref, 1U, release); return (tmp); } } __inline static int kref_put_mutex___1(struct kref *kref , void (*release)(struct kref * ) , struct mutex *lock ) { int __ret_warn_on ; long tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; long tmp___3 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 138); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = atomic_add_unless___1(& kref->refcount, -1, 1); tmp___3 = ldv__builtin_expect(tmp___2 == 0, 0L); if (tmp___3 != 0L) { mutex_lock_nested(lock, 0U); tmp___0 = atomic_dec_and_test(& kref->refcount); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { mutex_unlock(lock); return (0); } else { } (*release)(kref); return (1); } else { } return (0); } } __inline static void drm_gem_object_unreference___8(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___8(& obj->refcount, & drm_gem_object_free); } else { } return; } } __inline static void drm_gem_object_unreference_unlocked___0(struct drm_gem_object *obj ) { struct drm_device *dev ; int tmp ; { if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) { return; } else { } dev = obj->dev; tmp = kref_put_mutex___1(& obj->refcount, & drm_gem_object_free, & dev->struct_mutex); if (tmp != 0) { mutex_unlock(& dev->struct_mutex); } else { lock_acquire(& dev->struct_mutex.dep_map, 0U, 0, 0, 1, (struct lockdep_map *)0, 0UL); lock_release(& dev->struct_mutex.dep_map, 0, 0UL); } return; } } int i915_gem_set_tiling(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_gem_get_tiling(struct drm_device *dev , void *data , struct drm_file *file ) ; __inline static int i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj ) { struct i915_vma *tmp ; int tmp___0 ; { tmp = i915_gem_obj_to_ggtt(obj); tmp___0 = i915_vma_unbind(tmp); return (tmp___0); } } void i915_gem_detect_bit_6_swizzle(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t swizzle_x ; uint32_t swizzle_y ; uint32_t tmp ; uint32_t dimm_c0 ; uint32_t dimm_c1 ; uint32_t dcc ; uint32_t ddc2 ; uint32_t tmp___0 ; struct drm_i915_private *__p ; uint16_t tmp___1 ; uint16_t tmp___2 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; swizzle_x = 5U; swizzle_y = 5U; __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 7U) { swizzle_x = 0U; swizzle_y = 0U; } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { swizzle_x = 0U; swizzle_y = 0U; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) > 5U) { if ((int )dev_priv->preserve_bios_swizzle) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282624L, 1); if ((tmp & 8192U) != 0U) { swizzle_x = 2U; swizzle_y = 1U; } else { swizzle_x = 0U; swizzle_y = 0U; } } else { dimm_c0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1331204L, 1); dimm_c1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1331208L, 1); dimm_c0 = dimm_c0 & 65535U; dimm_c1 = dimm_c1 & 65535U; if (dimm_c0 == dimm_c1) { swizzle_x = 2U; swizzle_y = 1U; } else { swizzle_x = 0U; swizzle_y = 0U; } } } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 5U) { swizzle_x = 2U; swizzle_y = 1U; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 2U) { swizzle_x = 0U; swizzle_y = 0U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 3U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 44UL) == 0U) { _L: /* CIL Label */ dcc = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 66048L, 1); switch (dcc & 3U) { case 0U: ; case 1U: swizzle_x = 0U; swizzle_y = 0U; goto ldv_48036; case 2U: ; if ((dcc & 1024U) != 0U) { swizzle_x = 2U; swizzle_y = 1U; } else if ((dcc & 512U) == 0U) { swizzle_x = 4U; swizzle_y = 3U; } else { swizzle_x = 7U; swizzle_y = 6U; } goto ldv_48036; } ldv_48036: __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 4U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 66052L, 1); ddc2 = tmp___0; if ((ddc2 & 1048576U) == 0U) { dev_priv->quirks = dev_priv->quirks | 32UL; } else { } } else { } if (dcc == 4294967295U) { drm_err("Couldn\'t read from MCHBAR. Disabling tiling.\n"); swizzle_x = 5U; swizzle_y = 5U; } else { } } else { goto _L___0; } } else { _L___0: /* CIL Label */ tmp___1 = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 66054L, 1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 67078L, 1); if ((int )tmp___1 != (int )tmp___2) { swizzle_x = 0U; swizzle_y = 0U; } else { swizzle_x = 2U; swizzle_y = 1U; } } } } } } } } dev_priv->mm.bit_6_swizzle_x = swizzle_x; dev_priv->mm.bit_6_swizzle_y = swizzle_y; return; } } static bool i915_tiling_ok(struct drm_device *dev , int stride , int size , int tiling_mode ) { int tile_width ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; { if (tiling_mode == 0) { return (1); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { tile_width = 128; } else if (tiling_mode == 2) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 2U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) != 9618U) { tile_width = 128; } else { tile_width = 512; } } else { tile_width = 512; } } else { tile_width = 512; } } else { tile_width = 512; } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) > 6U) { if (stride > 262271) { return (0); } else { } } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 3U) { if (stride > 131199) { return (0); } else { } } else { if (stride > 8192) { return (0); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 3U) { if (size > 268435456) { return (0); } else { } } else if (size > 134217728) { return (0); } else { } } } if (stride < tile_width) { return (0); } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 3U) { if (((tile_width + -1) & stride) != 0) { return (0); } else { } return (1); } else { } if (((stride + -1) & stride) != 0) { return (0); } else { } return (1); } } static bool i915_gem_object_fence_ok(struct drm_i915_gem_object *obj , int tiling_mode ) { u32 size ; struct drm_i915_private *__p ; unsigned long tmp ; unsigned long tmp___0 ; struct drm_i915_private *__p___0 ; unsigned long tmp___1 ; unsigned long tmp___2 ; { if (tiling_mode == 0) { return (1); } else { } __p = to_i915((struct drm_device const *)obj->base.dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { return (1); } else { } __p___0 = to_i915((struct drm_device const *)obj->base.dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 3U) { tmp = i915_gem_obj_ggtt_offset(obj); if ((tmp & 0xfffffffff00fffffUL) != 0UL) { return (0); } else { } } else { tmp___0 = i915_gem_obj_ggtt_offset(obj); if ((tmp___0 & 0xfffffffff807ffffUL) != 0UL) { return (0); } else { } } size = i915_gem_get_gtt_size(obj->base.dev, (uint32_t )obj->base.size, tiling_mode); tmp___1 = i915_gem_obj_ggtt_size(obj); if (tmp___1 != (unsigned long )size) { return (0); } else { } tmp___2 = i915_gem_obj_ggtt_offset(obj); if ((tmp___2 & (unsigned long )(size - 1U)) != 0UL) { return (0); } else { } return (1); } } int i915_gem_set_tiling(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_set_tiling *args ; struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; int ret ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; void *tmp___4 ; bool tmp___5 ; { args = (struct drm_i915_gem_set_tiling *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; tmp = drm_gem_object_lookup(dev, file, args->handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { return (-2); } else { } tmp___0 = i915_tiling_ok(dev, (int )args->stride, (int )obj->base.size, (int )args->tiling_mode); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { drm_gem_object_unreference_unlocked___0(& obj->base); return (-22); } else { } mutex_lock_nested(& dev->struct_mutex, 0U); if (obj->pin_display != 0U || obj->framebuffer_references != 0UL) { ret = -16; goto err; } else { } if (args->tiling_mode == 0U) { args->swizzle_mode = 0U; args->stride = 0U; } else { if (args->tiling_mode == 1U) { args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; } else { args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; } if (args->swizzle_mode == 6U) { args->swizzle_mode = 1U; } else { } if (args->swizzle_mode == 7U) { args->swizzle_mode = 2U; } else { } if (args->swizzle_mode == 5U) { args->tiling_mode = 0U; args->swizzle_mode = 0U; args->stride = 0U; } else { } } if (args->tiling_mode != (__u32 )obj->tiling_mode || args->stride != obj->stride) { if ((unsigned int )*((unsigned char *)obj + 410UL) != 0U) { tmp___2 = i915_gem_object_fence_ok(obj, (int )args->tiling_mode); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { ret = i915_gem_object_ggtt_unbind(obj); } else { } } else { } if (ret == 0) { if (((unsigned long )obj->pages != (unsigned long )((struct sg_table *)0) && (unsigned int )*((unsigned char *)obj + 409UL) == 0U) && (dev_priv->quirks & 32UL) != 0UL) { if (args->tiling_mode == 0U) { i915_gem_object_unpin_pages(obj); } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) == 0U) { i915_gem_object_pin_pages(obj); } else { } } else { } obj->fence_dirty = (unsigned char )((unsigned long )obj->last_fenced_req != (unsigned long )((struct drm_i915_gem_request *)0) || (unsigned int )*((unsigned short *)obj + 204UL) != 4032U); obj->tiling_mode = (unsigned char )args->tiling_mode; obj->stride = args->stride; i915_gem_release_mmap(obj); } else { } } else { } args->stride = obj->stride; args->tiling_mode = (__u32 )obj->tiling_mode; tmp___5 = i915_gem_object_needs_bit17_swizzle(obj); if ((int )tmp___5) { if ((unsigned long )obj->bit_17 == (unsigned long )((unsigned long *)0UL)) { tmp___4 = kcalloc(((obj->base.size >> 12) + 63UL) / 64UL, 8UL, 208U); obj->bit_17 = (unsigned long *)tmp___4; } else { } } else { kfree((void const *)obj->bit_17); obj->bit_17 = (unsigned long *)0UL; } err: drm_gem_object_unreference___8(& obj->base); mutex_unlock(& dev->struct_mutex); return (ret); } } int i915_gem_get_tiling(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_gem_get_tiling *args ; struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; { args = (struct drm_i915_gem_get_tiling *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = drm_gem_object_lookup(dev, file, args->handle); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { return (-2); } else { } mutex_lock_nested(& dev->struct_mutex, 0U); args->tiling_mode = (__u32 )obj->tiling_mode; switch ((int )obj->tiling_mode) { case 1: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; goto ldv_48140; case 2: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; goto ldv_48140; case 0: args->swizzle_mode = 0U; goto ldv_48140; default: drm_err("unknown tiling mode\n"); } ldv_48140: args->phys_swizzle_mode = args->swizzle_mode; if (args->swizzle_mode == 6U) { args->swizzle_mode = 1U; } else { } if (args->swizzle_mode == 7U) { args->swizzle_mode = 2U; } else { } drm_gem_object_unreference___8(& obj->base); mutex_unlock(& dev->struct_mutex); return (0); } } static void i915_gem_swizzle_page(struct page *page ) { char temp[64U] ; char *vaddr ; int i ; void *tmp ; { tmp = kmap(page); vaddr = (char *)tmp; i = 0; goto ldv_48151; ldv_48150: memcpy((void *)(& temp), (void const *)vaddr + (unsigned long )i, 64UL); memcpy((void *)vaddr + (unsigned long )i, (void const *)(vaddr + ((unsigned long )i + 64UL)), 64UL); memcpy((void *)(vaddr + ((unsigned long )i + 64UL)), (void const *)(& temp), 64UL); i = i + 128; ldv_48151: ; if ((unsigned int )i <= 4095U) { goto ldv_48150; } else { } kunmap(page); return; } } void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj ) { struct sg_page_iter sg_iter ; int i ; struct page *page ; struct page *tmp ; char new_bit_17 ; int tmp___0 ; bool tmp___1 ; { if ((unsigned long )obj->bit_17 == (unsigned long )((unsigned long *)0UL)) { return; } else { } i = 0; __sg_page_iter_start(& sg_iter, (obj->pages)->sgl, (obj->pages)->nents, 0UL); goto ldv_48161; ldv_48160: tmp = sg_page_iter_page___0(& sg_iter); page = tmp; new_bit_17 = (char )(((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) >> 17); tmp___0 = variable_test_bit((long )i, (unsigned long const volatile *)obj->bit_17); if ((int )((_Bool )((int )new_bit_17 & 1)) ^ (tmp___0 != 0)) { i915_gem_swizzle_page(page); set_page_dirty(page); } else { } i = i + 1; ldv_48161: tmp___1 = __sg_page_iter_next(& sg_iter); if ((int )tmp___1) { goto ldv_48160; } else { } return; } } void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj ) { struct sg_page_iter sg_iter ; int page_count___0 ; int i ; void *tmp ; struct page *tmp___0 ; bool tmp___1 ; { page_count___0 = (int )(obj->base.size >> 12); if ((unsigned long )obj->bit_17 == (unsigned long )((unsigned long *)0UL)) { tmp = kcalloc(((unsigned long )page_count___0 + 63UL) / 64UL, 8UL, 208U); obj->bit_17 = (unsigned long *)tmp; if ((unsigned long )obj->bit_17 == (unsigned long )((unsigned long *)0UL)) { drm_err("Failed to allocate memory for bit 17 record\n"); return; } else { } } else { } i = 0; __sg_page_iter_start(& sg_iter, (obj->pages)->sgl, (obj->pages)->nents, 0UL); goto ldv_48170; ldv_48169: tmp___0 = sg_page_iter_page___0(& sg_iter); if ((((unsigned long long )(((long )tmp___0 + 24189255811072L) / 64L) << 12) & 131072ULL) != 0ULL) { __set_bit((long )i, (unsigned long volatile *)obj->bit_17); } else { __clear_bit((long )i, (unsigned long volatile *)obj->bit_17); } i = i + 1; ldv_48170: tmp___1 = __sg_page_iter_next(& sg_iter); if ((int )tmp___1) { goto ldv_48169; } else { } return; } } bool ldv_queue_work_on_343(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_344(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_345(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_346(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_347(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static int __atomic_add_unless___2(atomic_t *v , int a , int u ) { int c ; int old ; long tmp ; long tmp___0 ; { c = atomic_read((atomic_t const *)v); ldv_5708: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5707; } else { } old = atomic_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5707; } else { } c = old; goto ldv_5708; ldv_5707: ; return (c); } } __inline static int atomic_add_unless___2(atomic_t *v , int a , int u ) { int tmp ; { tmp = __atomic_add_unless___2(v, a, u); return (tmp != u); } } extern void down_read(struct rw_semaphore * ) ; extern void up_read(struct rw_semaphore * ) ; bool ldv_queue_work_on_357(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_359(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_358(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_361(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_360(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool queue_work___0(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_357(8192, wq, work); return (tmp); } } __inline static bool schedule_work(struct work_struct *work ) { bool tmp ; { tmp = queue_work___0(system_wq, work); return (tmp); } } __inline static void kref_get___7(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___9(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___9(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___9(kref, 1U, release); return (tmp); } } __inline static int kref_put_mutex___2(struct kref *kref , void (*release)(struct kref * ) , struct mutex *lock ) { int __ret_warn_on ; long tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; long tmp___3 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 138); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = atomic_add_unless___2(& kref->refcount, -1, 1); tmp___3 = ldv__builtin_expect(tmp___2 == 0, 0L); if (tmp___3 != 0L) { mutex_lock_nested(lock, 0U); tmp___0 = atomic_dec_and_test(& kref->refcount); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { mutex_unlock(lock); return (0); } else { } (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_get_unless_zero(struct kref *kref ) { int tmp ; { tmp = atomic_add_unless___2(& kref->refcount, 1, 0); return (tmp); } } extern void __put_task_struct(struct task_struct * ) ; __inline static void put_task_struct(struct task_struct *t ) { int tmp ; { tmp = atomic_dec_and_test(& t->usage); if (tmp != 0) { __put_task_struct(t); } else { } return; } } extern void __mmdrop(struct mm_struct * ) ; __inline static void mmdrop(struct mm_struct *mm ) { int tmp ; long tmp___0 ; { tmp = atomic_dec_and_test(& mm->mm_count); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { __mmdrop(mm); } else { } return; } } void activate_work_5(struct work_struct *work , int state ) ; void activate_work_6(struct work_struct *work , int state ) ; void invoke_work_5(void) ; void invoke_work_6(void) ; void disable_work_5(struct work_struct *work ) ; void call_and_disable_all_6(int state ) ; void call_and_disable_all_5(int state ) ; void call_and_disable_work_5(struct work_struct *work ) ; void disable_work_6(struct work_struct *work ) ; void call_and_disable_work_6(struct work_struct *work ) ; extern long get_user_pages(struct task_struct * , struct mm_struct * , unsigned long , unsigned long , int , int , struct page ** , struct vm_area_struct ** ) ; extern int __get_user_pages_fast(unsigned long , int , int , struct page ** ) ; extern int sg_alloc_table_from_pages(struct sg_table * , struct page ** , unsigned int , unsigned long , unsigned long , gfp_t ) ; extern void release_pages(struct page ** , int , bool ) ; __inline static u64 hash_64(u64 val , unsigned int bits ) { u64 hash ; { hash = val; hash = hash * 0x9e37fffffffc0001ULL; return (hash >> (int )(64U - bits)); } } __inline static void __hash_init___0(struct hlist_head *ht , unsigned int sz ) { unsigned int i ; { i = 0U; goto ldv_40459; ldv_40458: (ht + (unsigned long )i)->first = (struct hlist_node *)0; i = i + 1U; ldv_40459: ; if (i < sz) { goto ldv_40458; } else { } return; } } __inline static void drm_gem_object_reference___4(struct drm_gem_object *obj ) { { kref_get___7(& obj->refcount); return; } } __inline static void drm_gem_object_unreference___9(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___9(& obj->refcount, & drm_gem_object_free); } else { } return; } } __inline static void drm_gem_object_unreference_unlocked___1(struct drm_gem_object *obj ) { struct drm_device *dev ; int tmp ; { if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) { return; } else { } dev = obj->dev; tmp = kref_put_mutex___2(& obj->refcount, & drm_gem_object_free, & dev->struct_mutex); if (tmp != 0) { mutex_unlock(& dev->struct_mutex); } else { lock_acquire(& dev->struct_mutex.dep_map, 0U, 0, 0, 1, (struct lockdep_map *)0, 0UL); lock_release(& dev->struct_mutex.dep_map, 0, 0UL); } return; } } int i915_gem_userptr_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; extern int __mmu_notifier_register(struct mmu_notifier * , struct mm_struct * ) ; extern void mmu_notifier_unregister(struct mmu_notifier * , struct mm_struct * ) ; extern void interval_tree_insert(struct interval_tree_node * , struct rb_root * ) ; extern void interval_tree_remove(struct interval_tree_node * , struct rb_root * ) ; extern struct interval_tree_node *interval_tree_iter_first(struct rb_root * , unsigned long , unsigned long ) ; extern struct interval_tree_node *interval_tree_iter_next(struct interval_tree_node * , unsigned long , unsigned long ) ; static unsigned long cancel_userptr(struct drm_i915_gem_object *obj ) { struct drm_device *dev ; unsigned long end ; struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct i915_vma *vma ; struct i915_vma *tmp___0 ; bool was_interruptible ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int ret ; int tmp___1 ; int __ret_warn_on ; long tmp___2 ; struct list_head const *__mptr___1 ; int __ret_warn_on___0 ; int tmp___3 ; long tmp___4 ; { dev = obj->base.dev; mutex_lock_nested(& dev->struct_mutex, 0U); obj->__annonCompField84.userptr.work = (struct work_struct *)0; if ((unsigned long )obj->pages != (unsigned long )((struct sg_table *)0)) { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = 0; __mptr = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff58UL; __mptr___0 = (struct list_head const *)vma->vma_link.next; tmp___0 = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; goto ldv_49150; ldv_49149: tmp___1 = i915_vma_unbind(vma); ret = tmp___1; __ret_warn_on = ret != 0 && ret != -5; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_userptr.c", 84, "WARN_ON(ret && ret != -EIO)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); vma = tmp___0; __mptr___1 = (struct list_head const *)tmp___0->vma_link.next; tmp___0 = (struct i915_vma *)__mptr___1 + 0xffffffffffffff58UL; ldv_49150: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_49149; } else { } tmp___3 = i915_gem_object_put_pages(obj); __ret_warn_on___0 = tmp___3 != 0; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_userptr.c", 86, "WARN_ON(i915_gem_object_put_pages(obj))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); dev_priv->mm.interruptible = was_interruptible; } else { } end = obj->__annonCompField84.userptr.ptr + obj->base.size; drm_gem_object_unreference___9(& obj->base); mutex_unlock(& dev->struct_mutex); return (end); } } static void *invalidate_range__linear(struct i915_mmu_notifier *mn , struct mm_struct *mm , unsigned long start , unsigned long end ) { struct i915_mmu_object *mo ; unsigned long serial ; struct list_head const *__mptr ; struct drm_i915_gem_object *obj ; int tmp ; struct list_head const *__mptr___0 ; { restart: serial = mn->serial; __mptr = (struct list_head const *)mn->linear.next; mo = (struct i915_mmu_object *)__mptr + 0xffffffffffffffc8UL; goto ldv_49170; ldv_49169: ; if (mo->it.last < start || mo->it.start > end) { goto ldv_49168; } else { } obj = mo->obj; tmp = kref_get_unless_zero(& obj->base.refcount); if (tmp == 0) { goto ldv_49168; } else { } spin_unlock(& mn->lock); cancel_userptr(obj); spin_lock(& mn->lock); if (mn->serial != serial) { goto restart; } else { } ldv_49168: __mptr___0 = (struct list_head const *)mo->link.next; mo = (struct i915_mmu_object *)__mptr___0 + 0xffffffffffffffc8UL; ldv_49170: ; if ((unsigned long )(& mo->link) != (unsigned long )(& mn->linear)) { goto ldv_49169; } else { } return ((void *)0); } } static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn , struct mm_struct *mm , unsigned long start , unsigned long end ) { struct i915_mmu_notifier *mn ; struct mmu_notifier const *__mptr ; struct interval_tree_node *it ; unsigned long next ; unsigned long serial ; struct drm_i915_gem_object *obj ; void *tmp ; struct interval_tree_node const *__mptr___0 ; int tmp___0 ; { __mptr = (struct mmu_notifier const *)_mn; mn = (struct i915_mmu_notifier *)__mptr + 0xffffffffffffffa8UL; it = (struct interval_tree_node *)0; next = start; serial = 0UL; end = end - 1UL; goto ldv_49187; ldv_49188: obj = (struct drm_i915_gem_object *)0; spin_lock(& mn->lock); if ((int )mn->has_linear) { tmp = invalidate_range__linear(mn, mm, start, end); it = (struct interval_tree_node *)tmp; } else if (mn->serial == serial) { it = interval_tree_iter_next(it, next, end); } else { it = interval_tree_iter_first(& mn->objects, start, end); } if ((unsigned long )it != (unsigned long )((struct interval_tree_node *)0)) { __mptr___0 = (struct interval_tree_node const *)it; obj = ((struct i915_mmu_object *)__mptr___0 + 0xfffffffffffffff8UL)->obj; tmp___0 = kref_get_unless_zero(& obj->base.refcount); if (tmp___0 == 0) { spin_unlock(& mn->lock); serial = 0UL; goto ldv_49187; } else { } serial = mn->serial; } else { } spin_unlock(& mn->lock); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return; } else { } next = cancel_userptr(obj); ldv_49187: ; if (next < end) { goto ldv_49188; } else { } return; } } static struct mmu_notifier_ops const i915_gem_userptr_notifier = {0, 0, 0, 0, 0, & i915_gem_userptr_mn_invalidate_range_start, 0, 0}; static struct i915_mmu_notifier *i915_mmu_notifier_create(struct mm_struct *mm ) { struct i915_mmu_notifier *mn ; int ret ; void *tmp ; void *tmp___0 ; struct lock_class_key __key ; struct rb_root __constr_expr_0___0 ; void *tmp___1 ; { tmp = kmalloc(152UL, 208U); mn = (struct i915_mmu_notifier *)tmp; if ((unsigned long )mn == (unsigned long )((struct i915_mmu_notifier *)0)) { tmp___0 = ERR_PTR(-12L); return ((struct i915_mmu_notifier *)tmp___0); } else { } spinlock_check(& mn->lock); __raw_spin_lock_init(& mn->lock.__annonCompField18.rlock, "&(&mn->lock)->rlock", & __key); mn->mn.ops = & i915_gem_userptr_notifier; __constr_expr_0___0.rb_node = (struct rb_node *)0; mn->objects = __constr_expr_0___0; mn->serial = 1UL; INIT_LIST_HEAD(& mn->linear); mn->has_linear = 0; ret = __mmu_notifier_register(& mn->mn, mm); if (ret != 0) { kfree((void const *)mn); tmp___1 = ERR_PTR((long )ret); return ((struct i915_mmu_notifier *)tmp___1); } else { } return (mn); } } static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn ) { { mn->serial = mn->serial + 1UL; if (mn->serial == 0UL) { mn->serial = 1UL; } else { } return; } } static int i915_mmu_notifier_add(struct drm_device *dev , struct i915_mmu_notifier *mn , struct i915_mmu_object *mo ) { struct interval_tree_node *it ; int ret ; struct drm_i915_gem_object *obj ; struct interval_tree_node const *__mptr ; bool tmp ; { ret = 0; mutex_lock_nested(& dev->struct_mutex, 0U); i915_gem_retire_requests(dev); spin_lock(& mn->lock); it = interval_tree_iter_first(& mn->objects, mo->it.start, mo->it.last); if ((unsigned long )it != (unsigned long )((struct interval_tree_node *)0)) { __mptr = (struct interval_tree_node const *)it; obj = ((struct i915_mmu_object *)__mptr + 0xfffffffffffffff8UL)->obj; if ((unsigned int )*((unsigned char *)obj + 560UL) == 0U) { tmp = 1; mo->is_linear = tmp; mn->has_linear = tmp; } else { ret = -11; } } else { interval_tree_insert(& mo->it, & mn->objects); } if (ret == 0) { list_add(& mo->link, & mn->linear); __i915_mmu_notifier_update_serial(mn); } else { } spin_unlock(& mn->lock); mutex_unlock(& dev->struct_mutex); return (ret); } } static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn ) { struct i915_mmu_object *mo ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)mn->linear.next; mo = (struct i915_mmu_object *)__mptr + 0xffffffffffffffc8UL; goto ldv_49220; ldv_49219: ; if ((int )mo->is_linear) { return (1); } else { } __mptr___0 = (struct list_head const *)mo->link.next; mo = (struct i915_mmu_object *)__mptr___0 + 0xffffffffffffffc8UL; ldv_49220: ; if ((unsigned long )(& mo->link) != (unsigned long )(& mn->linear)) { goto ldv_49219; } else { } return (0); } } static void i915_mmu_notifier_del(struct i915_mmu_notifier *mn , struct i915_mmu_object *mo ) { { spin_lock(& mn->lock); list_del(& mo->link); if ((int )mo->is_linear) { mn->has_linear = i915_mmu_notifier_has_linear(mn); } else { interval_tree_remove(& mo->it, & mn->objects); } __i915_mmu_notifier_update_serial(mn); spin_unlock(& mn->lock); return; } } static void i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj ) { struct i915_mmu_object *mo ; { mo = obj->__annonCompField84.userptr.mmu_object; if ((unsigned long )mo == (unsigned long )((struct i915_mmu_object *)0)) { return; } else { } i915_mmu_notifier_del(mo->mn, mo); kfree((void const *)mo); obj->__annonCompField84.userptr.mmu_object = (struct i915_mmu_object *)0; return; } } static struct i915_mmu_notifier *i915_mmu_notifier_find(struct i915_mm_struct *mm ) { struct i915_mmu_notifier *mn ; struct drm_i915_private *tmp ; bool tmp___0 ; int tmp___1 ; struct drm_i915_private *tmp___2 ; { mn = mm->mn; mn = mm->mn; if ((unsigned long )mn != (unsigned long )((struct i915_mmu_notifier *)0)) { return (mn); } else { } down_write(& (mm->mm)->mmap_sem); tmp = to_i915((struct drm_device const *)mm->dev); mutex_lock_nested(& tmp->mm_lock, 0U); mn = mm->mn; if ((unsigned long )mn == (unsigned long )((struct i915_mmu_notifier *)0)) { mn = i915_mmu_notifier_create(mm->mm); tmp___0 = IS_ERR((void const *)mn); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { mm->mn = mn; } else { } } else { } tmp___2 = to_i915((struct drm_device const *)mm->dev); mutex_unlock(& tmp___2->mm_lock); up_write(& (mm->mm)->mmap_sem); return (mn); } } static int i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj , unsigned int flags ) { struct i915_mmu_notifier *mn ; struct i915_mmu_object *mo ; int ret ; bool tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; bool tmp___3 ; void *tmp___4 ; { if ((int )flags < 0) { tmp = capable(21); return ((int )tmp ? 0 : -1); } else { } __ret_warn_on = (unsigned long )obj->__annonCompField84.userptr.mm == (unsigned long )((struct i915_mm_struct *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_userptr.c", 345, "WARN_ON(obj->userptr.mm == NULL)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return (-22); } else { } mn = i915_mmu_notifier_find(obj->__annonCompField84.userptr.mm); tmp___3 = IS_ERR((void const *)mn); if ((int )tmp___3) { tmp___2 = PTR_ERR((void const *)mn); return ((int )tmp___2); } else { } tmp___4 = kzalloc(88UL, 208U); mo = (struct i915_mmu_object *)tmp___4; if ((unsigned long )mo == (unsigned long )((struct i915_mmu_object *)0)) { return (-12); } else { } mo->mn = mn; mo->it.start = obj->__annonCompField84.userptr.ptr; mo->it.last = (mo->it.start + obj->base.size) - 1UL; mo->obj = obj; ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); if (ret != 0) { kfree((void const *)mo); return (ret); } else { } obj->__annonCompField84.userptr.mmu_object = mo; return (0); } } static void i915_mmu_notifier_free(struct i915_mmu_notifier *mn , struct mm_struct *mm ) { { if ((unsigned long )mn == (unsigned long )((struct i915_mmu_notifier *)0)) { return; } else { } mmu_notifier_unregister(& mn->mn, mm); kfree((void const *)mn); return; } } static struct i915_mm_struct *__i915_mm_struct_find(struct drm_i915_private *dev_priv , struct mm_struct *real ) { struct i915_mm_struct *mm ; struct hlist_node *____ptr ; u64 tmp ; struct hlist_node const *__mptr ; struct i915_mm_struct *tmp___0 ; struct hlist_node *____ptr___0 ; struct hlist_node const *__mptr___0 ; struct i915_mm_struct *tmp___1 ; { tmp = hash_64((u64 )real, 7U); ____ptr = ((struct hlist_head *)(& dev_priv->mm_structs) + tmp)->first; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___0 = (struct i915_mm_struct *)__mptr + 0xffffffffffffffe8UL; } else { tmp___0 = (struct i915_mm_struct *)0; } mm = tmp___0; goto ldv_49813; ldv_49812: ; if ((unsigned long )mm->mm == (unsigned long )real) { return (mm); } else { } ____ptr___0 = mm->node.next; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___1 = (struct i915_mm_struct *)__mptr___0 + 0xffffffffffffffe8UL; } else { tmp___1 = (struct i915_mm_struct *)0; } mm = tmp___1; ldv_49813: ; if ((unsigned long )mm != (unsigned long )((struct i915_mm_struct *)0)) { goto ldv_49812; } else { } return ((struct i915_mm_struct *)0); } } static int i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct i915_mm_struct *mm ; int ret ; struct task_struct *tmp___0 ; void *tmp___1 ; struct task_struct *tmp___2 ; struct task_struct *tmp___3 ; u64 tmp___4 ; { tmp = to_i915((struct drm_device const *)obj->base.dev); dev_priv = tmp; ret = 0; mutex_lock_nested(& dev_priv->mm_lock, 0U); tmp___0 = get_current(); mm = __i915_mm_struct_find(dev_priv, tmp___0->mm); if ((unsigned long )mm == (unsigned long )((struct i915_mm_struct *)0)) { tmp___1 = kmalloc(128UL, 208U); mm = (struct i915_mm_struct *)tmp___1; if ((unsigned long )mm == (unsigned long )((struct i915_mm_struct *)0)) { ret = -12; goto out; } else { } kref_init(& mm->kref); mm->dev = obj->base.dev; tmp___2 = get_current(); mm->mm = tmp___2->mm; tmp___3 = get_current(); atomic_inc(& (tmp___3->mm)->mm_count); mm->mn = (struct i915_mmu_notifier *)0; tmp___4 = hash_64((u64 )mm->mm, 7U); hlist_add_head(& mm->node, (struct hlist_head *)(& dev_priv->mm_structs) + tmp___4); } else { kref_get___7(& mm->kref); } obj->__annonCompField84.userptr.mm = mm; out: mutex_unlock(& dev_priv->mm_lock); return (ret); } } static void __i915_mm_struct_free__worker(struct work_struct *work ) { struct i915_mm_struct *mm ; struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)work; mm = (struct i915_mm_struct *)__mptr + 0xffffffffffffffd0UL; i915_mmu_notifier_free(mm->mn, mm->mm); mmdrop(mm->mm); kfree((void const *)mm); return; } } static void __i915_mm_struct_free(struct kref *kref ) { struct i915_mm_struct *mm ; struct kref const *__mptr ; struct drm_i915_private *tmp ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; { __mptr = (struct kref const *)kref; mm = (struct i915_mm_struct *)__mptr + 0xffffffffffffffd8UL; hash_del(& mm->node); tmp = to_i915((struct drm_device const *)mm->dev); mutex_unlock(& tmp->mm_lock); __init_work(& mm->work, 0); __constr_expr_0___0.counter = 137438953408L; mm->work.data = __constr_expr_0___0; lockdep_init_map(& mm->work.lockdep_map, "(&mm->work)", & __key, 0); INIT_LIST_HEAD(& mm->work.entry); mm->work.func = & __i915_mm_struct_free__worker; schedule_work(& mm->work); return; } } static void i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj ) { struct drm_i915_private *tmp ; { if ((unsigned long )obj->__annonCompField84.userptr.mm == (unsigned long )((struct i915_mm_struct *)0)) { return; } else { } tmp = to_i915((struct drm_device const *)obj->base.dev); kref_put_mutex___2(& (obj->__annonCompField84.userptr.mm)->kref, & __i915_mm_struct_free, & tmp->mm_lock); obj->__annonCompField84.userptr.mm = (struct i915_mm_struct *)0; return; } } static int st_set_pages(struct sg_table **st , struct page **pvec , int num_pages ) { struct scatterlist *sg ; int ret ; int n ; void *tmp ; unsigned long tmp___0 ; { tmp = kmalloc(16UL, 208U); *st = (struct sg_table *)tmp; if ((unsigned long )*st == (unsigned long )((struct sg_table *)0)) { return (-12); } else { } tmp___0 = swiotlb_nr_tbl(); if (tmp___0 != 0UL) { ret = sg_alloc_table(*st, (unsigned int )num_pages, 208U); if (ret != 0) { goto err; } else { } n = 0; sg = (*st)->sgl; goto ldv_50129; ldv_50128: sg_set_page(sg, *(pvec + (unsigned long )n), 4096U, 0U); n = n + 1; sg = sg_next(sg); ldv_50129: ; if (n < num_pages) { goto ldv_50128; } else { } } else { ret = sg_alloc_table_from_pages(*st, pvec, (unsigned int )num_pages, 0UL, (unsigned long )(num_pages << 12), 208U); if (ret != 0) { goto err; } else { } } return (0); err: kfree((void const *)*st); *st = (struct sg_table *)0; return (ret); } } static void __i915_gem_userptr_get_pages_worker(struct work_struct *_work ) { struct get_pages_work *work ; struct work_struct const *__mptr ; struct drm_i915_gem_object *obj ; struct drm_device *dev ; int num_pages ; struct page **pvec ; int pinned ; int ret ; void *tmp ; void *tmp___0 ; struct mm_struct *mm ; long tmp___1 ; struct drm_i915_private *tmp___2 ; void *tmp___3 ; { __mptr = (struct work_struct const *)_work; work = (struct get_pages_work *)__mptr; obj = work->obj; dev = obj->base.dev; num_pages = (int const )(obj->base.size >> 12); ret = -12; pinned = 0; tmp = kmalloc((unsigned long )num_pages * 8UL, 529104U); pvec = (struct page **)tmp; if ((unsigned long )pvec == (unsigned long )((struct page **)0)) { tmp___0 = drm_malloc_ab((size_t )num_pages, 8UL); pvec = (struct page **)tmp___0; } else { } if ((unsigned long )pvec != (unsigned long )((struct page **)0)) { mm = (obj->__annonCompField84.userptr.mm)->mm; down_read(& mm->mmap_sem); goto ldv_50146; ldv_50145: tmp___1 = get_user_pages(work->task, mm, obj->__annonCompField84.userptr.ptr + (unsigned long )pinned * 4096UL, (unsigned long )(num_pages - pinned), (unsigned int )*((unsigned char *)obj + 560UL) == 0U, 0, pvec + (unsigned long )pinned, (struct vm_area_struct **)0); ret = (int )tmp___1; if (ret < 0) { goto ldv_50144; } else { } pinned = pinned + ret; ldv_50146: ; if (pinned < num_pages) { goto ldv_50145; } else { } ldv_50144: up_read(& mm->mmap_sem); } else { } mutex_lock_nested(& dev->struct_mutex, 0U); if ((unsigned long )obj->__annonCompField84.userptr.work != (unsigned long )(& work->work)) { ret = 0; } else if (pinned == num_pages) { ret = st_set_pages(& obj->pages, pvec, num_pages); if (ret == 0) { tmp___2 = to_i915((struct drm_device const *)dev); list_add_tail(& obj->global_list, & tmp___2->mm.unbound_list); pinned = 0; } else { } } else { } tmp___3 = ERR_PTR((long )ret); obj->__annonCompField84.userptr.work = (struct work_struct *)tmp___3; obj->__annonCompField84.userptr.workers = (unsigned char )((int )obj->__annonCompField84.userptr.workers - 1); drm_gem_object_unreference___9(& obj->base); mutex_unlock(& dev->struct_mutex); release_pages(pvec, pinned, 0); drm_free_large((void *)pvec); put_task_struct(work->task); kfree((void const *)work); return; } } static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj ) { int num_pages ; struct page **pvec ; int pinned ; int ret ; void *tmp ; void *tmp___0 ; struct task_struct *tmp___1 ; struct get_pages_work *work ; void *tmp___2 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; long tmp___3 ; bool tmp___4 ; { num_pages = (int const )(obj->base.size >> 12); pvec = (struct page **)0; pinned = 0; tmp___1 = get_current(); if ((unsigned long )(obj->__annonCompField84.userptr.mm)->mm == (unsigned long )tmp___1->mm) { tmp = kmalloc((unsigned long )num_pages * 8UL, 529104U); pvec = (struct page **)tmp; if ((unsigned long )pvec == (unsigned long )((struct page **)0)) { tmp___0 = drm_malloc_ab((size_t )num_pages, 8UL); pvec = (struct page **)tmp___0; if ((unsigned long )pvec == (unsigned long )((struct page **)0)) { return (-12); } else { } } else { } pinned = __get_user_pages_fast(obj->__annonCompField84.userptr.ptr, num_pages, (unsigned int )*((unsigned char *)obj + 560UL) == 0U, pvec); } else { } if (pinned < num_pages) { if (pinned < 0) { ret = pinned; pinned = 0; } else { ret = -11; if ((unsigned long )obj->__annonCompField84.userptr.work == (unsigned long )((struct work_struct *)0) && (int )obj->__annonCompField84.userptr.workers <= 14) { tmp___2 = kmalloc(96UL, 208U); work = (struct get_pages_work *)tmp___2; if ((unsigned long )work != (unsigned long )((struct get_pages_work *)0)) { obj->__annonCompField84.userptr.work = & work->work; obj->__annonCompField84.userptr.workers = (unsigned char )((int )obj->__annonCompField84.userptr.workers + 1); work->obj = obj; drm_gem_object_reference___4(& obj->base); work->task = get_current(); atomic_inc(& (work->task)->usage); __init_work(& work->work, 0); __constr_expr_0___0.counter = 137438953408L; work->work.data = __constr_expr_0___0; lockdep_init_map(& work->work.lockdep_map, "(&work->work)", & __key, 0); INIT_LIST_HEAD(& work->work.entry); work->work.func = & __i915_gem_userptr_get_pages_worker; schedule_work(& work->work); } else { ret = -12; } } else { tmp___4 = IS_ERR((void const *)obj->__annonCompField84.userptr.work); if ((int )tmp___4) { tmp___3 = PTR_ERR((void const *)obj->__annonCompField84.userptr.work); ret = (int )tmp___3; obj->__annonCompField84.userptr.work = (struct work_struct *)0; } else { } } } } else { ret = st_set_pages(& obj->pages, pvec, num_pages); if (ret == 0) { obj->__annonCompField84.userptr.work = (struct work_struct *)0; pinned = 0; } else { } } release_pages(pvec, pinned, 0); drm_free_large((void *)pvec); return (ret); } } static void i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj ) { struct sg_page_iter sg_iter ; long tmp ; struct page *page ; struct page *tmp___0 ; bool tmp___1 ; { tmp = ldv__builtin_expect((unsigned long )obj->__annonCompField84.userptr.work != (unsigned long )((struct work_struct *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gem_userptr.c"), "i" (713), "i" (12UL)); ldv_50161: ; goto ldv_50161; } else { } if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { obj->dirty = 0U; } else { } __sg_page_iter_start(& sg_iter, (obj->pages)->sgl, (obj->pages)->nents, 0UL); goto ldv_50164; ldv_50163: tmp___0 = sg_page_iter_page___0(& sg_iter); page = tmp___0; if ((unsigned int )*((unsigned char *)obj + 408UL) != 0U) { set_page_dirty(page); } else { } mark_page_accessed(page); put_page(page); ldv_50164: tmp___1 = __sg_page_iter_next(& sg_iter); if ((int )tmp___1) { goto ldv_50163; } else { } obj->dirty = 0U; sg_free_table(obj->pages); kfree((void const *)obj->pages); return; } } static void i915_gem_userptr_release(struct drm_i915_gem_object *obj ) { { i915_gem_userptr_release__mmu_notifier(obj); i915_gem_userptr_release__mm_struct(obj); return; } } static int i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj ) { int tmp ; { if ((unsigned long )obj->__annonCompField84.userptr.mmu_object != (unsigned long )((struct i915_mmu_object *)0)) { return (0); } else { } tmp = i915_gem_userptr_init__mmu_notifier(obj, 0U); return (tmp); } } static struct drm_i915_gem_object_ops const i915_gem_userptr_ops = {& i915_gem_userptr_get_pages, & i915_gem_userptr_put_pages, & i915_gem_userptr_dmabuf_export, & i915_gem_userptr_release}; int i915_gem_userptr_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_userptr *args ; struct drm_i915_gem_object *obj ; int ret ; u32 handle ; struct thread_info *tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; void *tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; args = (struct drm_i915_gem_userptr *)data; if ((args->flags & 2147483646U) != 0U) { return (-22); } else { } if (((unsigned long )(args->user_ptr | args->user_size) & 4095UL) != 0UL) { return (-22); } else { } if (args->user_size > (unsigned long long )dev_priv->gtt.base.total) { return (-7); } else { } tmp = current_thread_info(); tmp___0 = __chk_range_not_ok((unsigned long )args->user_ptr, (unsigned long )args->user_size, tmp->addr_limit.seg); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } tmp___2 = ldv__builtin_expect((long )tmp___1, 1L); if (tmp___2 == 0L) { return (-14); } else { } if ((int )args->flags & 1) { return (-19); } else { } tmp___3 = i915_gem_object_alloc(dev); obj = (struct drm_i915_gem_object *)tmp___3; if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return (-12); } else { } drm_gem_private_object_init(dev, & obj->base, (size_t )args->user_size); i915_gem_object_init(obj, & i915_gem_userptr_ops); obj->cache_level = 1U; obj->base.write_domain = 1U; obj->base.read_domains = 1U; obj->__annonCompField84.userptr.ptr = (uintptr_t )args->user_ptr; obj->__annonCompField84.userptr.read_only = (unsigned int )((unsigned char )args->flags) & 1U; ret = i915_gem_userptr_init__mm_struct(obj); if (ret == 0) { ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); } else { } if (ret == 0) { ret = drm_gem_handle_create(file, & obj->base, & handle); } else { } drm_gem_object_unreference_unlocked___1(& obj->base); if (ret != 0) { return (ret); } else { } args->handle = handle; return (0); } } int i915_gem_init_userptr(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct lock_class_key __key ; { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; __mutex_init(& dev_priv->mm_lock, "&dev_priv->mm_lock", & __key); __hash_init___0((struct hlist_head *)(& dev_priv->mm_structs), 128U); return (0); } } extern int ldv_probe_135(void) ; void activate_work_5(struct work_struct *work , int state ) { { if (ldv_work_5_0 == 0) { ldv_work_struct_5_0 = work; ldv_work_5_0 = state; return; } else { } if (ldv_work_5_1 == 0) { ldv_work_struct_5_1 = work; ldv_work_5_1 = state; return; } else { } if (ldv_work_5_2 == 0) { ldv_work_struct_5_2 = work; ldv_work_5_2 = state; return; } else { } if (ldv_work_5_3 == 0) { ldv_work_struct_5_3 = work; ldv_work_5_3 = state; return; } else { } return; } } void activate_work_6(struct work_struct *work , int state ) { { if (ldv_work_6_0 == 0) { ldv_work_struct_6_0 = work; ldv_work_6_0 = state; return; } else { } if (ldv_work_6_1 == 0) { ldv_work_struct_6_1 = work; ldv_work_6_1 = state; return; } else { } if (ldv_work_6_2 == 0) { ldv_work_struct_6_2 = work; ldv_work_6_2 = state; return; } else { } if (ldv_work_6_3 == 0) { ldv_work_struct_6_3 = work; ldv_work_6_3 = state; return; } else { } return; } } void invoke_work_5(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_5_0 == 2 || ldv_work_5_0 == 3) { ldv_work_5_0 = 4; __i915_mm_struct_free__worker(ldv_work_struct_5_0); ldv_work_5_0 = 1; } else { } goto ldv_50207; case 1: ; if (ldv_work_5_1 == 2 || ldv_work_5_1 == 3) { ldv_work_5_1 = 4; __i915_mm_struct_free__worker(ldv_work_struct_5_0); ldv_work_5_1 = 1; } else { } goto ldv_50207; case 2: ; if (ldv_work_5_2 == 2 || ldv_work_5_2 == 3) { ldv_work_5_2 = 4; __i915_mm_struct_free__worker(ldv_work_struct_5_0); ldv_work_5_2 = 1; } else { } goto ldv_50207; case 3: ; if (ldv_work_5_3 == 2 || ldv_work_5_3 == 3) { ldv_work_5_3 = 4; __i915_mm_struct_free__worker(ldv_work_struct_5_0); ldv_work_5_3 = 1; } else { } goto ldv_50207; default: ldv_stop(); } ldv_50207: ; return; } } void invoke_work_6(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_6_0 == 2 || ldv_work_6_0 == 3) { ldv_work_6_0 = 4; __i915_gem_userptr_get_pages_worker(ldv_work_struct_6_0); ldv_work_6_0 = 1; } else { } goto ldv_50218; case 1: ; if (ldv_work_6_1 == 2 || ldv_work_6_1 == 3) { ldv_work_6_1 = 4; __i915_gem_userptr_get_pages_worker(ldv_work_struct_6_0); ldv_work_6_1 = 1; } else { } goto ldv_50218; case 2: ; if (ldv_work_6_2 == 2 || ldv_work_6_2 == 3) { ldv_work_6_2 = 4; __i915_gem_userptr_get_pages_worker(ldv_work_struct_6_0); ldv_work_6_2 = 1; } else { } goto ldv_50218; case 3: ; if (ldv_work_6_3 == 2 || ldv_work_6_3 == 3) { ldv_work_6_3 = 4; __i915_gem_userptr_get_pages_worker(ldv_work_struct_6_0); ldv_work_6_3 = 1; } else { } goto ldv_50218; default: ldv_stop(); } ldv_50218: ; return; } } void disable_work_5(struct work_struct *work ) { { if ((ldv_work_5_0 == 3 || ldv_work_5_0 == 2) && (unsigned long )ldv_work_struct_5_0 == (unsigned long )work) { ldv_work_5_0 = 1; } else { } if ((ldv_work_5_1 == 3 || ldv_work_5_1 == 2) && (unsigned long )ldv_work_struct_5_1 == (unsigned long )work) { ldv_work_5_1 = 1; } else { } if ((ldv_work_5_2 == 3 || ldv_work_5_2 == 2) && (unsigned long )ldv_work_struct_5_2 == (unsigned long )work) { ldv_work_5_2 = 1; } else { } if ((ldv_work_5_3 == 3 || ldv_work_5_3 == 2) && (unsigned long )ldv_work_struct_5_3 == (unsigned long )work) { ldv_work_5_3 = 1; } else { } return; } } void call_and_disable_all_6(int state ) { { if (ldv_work_6_0 == state) { call_and_disable_work_6(ldv_work_struct_6_0); } else { } if (ldv_work_6_1 == state) { call_and_disable_work_6(ldv_work_struct_6_1); } else { } if (ldv_work_6_2 == state) { call_and_disable_work_6(ldv_work_struct_6_2); } else { } if (ldv_work_6_3 == state) { call_and_disable_work_6(ldv_work_struct_6_3); } else { } return; } } void call_and_disable_all_5(int state ) { { if (ldv_work_5_0 == state) { call_and_disable_work_5(ldv_work_struct_5_0); } else { } if (ldv_work_5_1 == state) { call_and_disable_work_5(ldv_work_struct_5_1); } else { } if (ldv_work_5_2 == state) { call_and_disable_work_5(ldv_work_struct_5_2); } else { } if (ldv_work_5_3 == state) { call_and_disable_work_5(ldv_work_struct_5_3); } else { } return; } } void work_init_5(void) { { ldv_work_5_0 = 0; ldv_work_5_1 = 0; ldv_work_5_2 = 0; ldv_work_5_3 = 0; return; } } void call_and_disable_work_5(struct work_struct *work ) { { if ((ldv_work_5_0 == 2 || ldv_work_5_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_0) { __i915_mm_struct_free__worker(work); ldv_work_5_0 = 1; return; } else { } if ((ldv_work_5_1 == 2 || ldv_work_5_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_1) { __i915_mm_struct_free__worker(work); ldv_work_5_1 = 1; return; } else { } if ((ldv_work_5_2 == 2 || ldv_work_5_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_2) { __i915_mm_struct_free__worker(work); ldv_work_5_2 = 1; return; } else { } if ((ldv_work_5_3 == 2 || ldv_work_5_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_3) { __i915_mm_struct_free__worker(work); ldv_work_5_3 = 1; return; } else { } return; } } void work_init_6(void) { { ldv_work_6_0 = 0; ldv_work_6_1 = 0; ldv_work_6_2 = 0; ldv_work_6_3 = 0; return; } } void disable_work_6(struct work_struct *work ) { { if ((ldv_work_6_0 == 3 || ldv_work_6_0 == 2) && (unsigned long )ldv_work_struct_6_0 == (unsigned long )work) { ldv_work_6_0 = 1; } else { } if ((ldv_work_6_1 == 3 || ldv_work_6_1 == 2) && (unsigned long )ldv_work_struct_6_1 == (unsigned long )work) { ldv_work_6_1 = 1; } else { } if ((ldv_work_6_2 == 3 || ldv_work_6_2 == 2) && (unsigned long )ldv_work_struct_6_2 == (unsigned long )work) { ldv_work_6_2 = 1; } else { } if ((ldv_work_6_3 == 3 || ldv_work_6_3 == 2) && (unsigned long )ldv_work_struct_6_3 == (unsigned long )work) { ldv_work_6_3 = 1; } else { } return; } } void ldv_initialize_drm_i915_gem_object_ops_135(void) { void *tmp ; { tmp = ldv_init_zalloc(592UL); i915_gem_userptr_ops_group0 = (struct drm_i915_gem_object *)tmp; return; } } void call_and_disable_work_6(struct work_struct *work ) { { if ((ldv_work_6_0 == 2 || ldv_work_6_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_0) { __i915_gem_userptr_get_pages_worker(work); ldv_work_6_0 = 1; return; } else { } if ((ldv_work_6_1 == 2 || ldv_work_6_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_1) { __i915_gem_userptr_get_pages_worker(work); ldv_work_6_1 = 1; return; } else { } if ((ldv_work_6_2 == 2 || ldv_work_6_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_2) { __i915_gem_userptr_get_pages_worker(work); ldv_work_6_2 = 1; return; } else { } if ((ldv_work_6_3 == 2 || ldv_work_6_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_3) { __i915_gem_userptr_get_pages_worker(work); ldv_work_6_3 = 1; return; } else { } return; } } void ldv_main_exported_136(void) { struct mmu_notifier *ldvarg430 ; void *tmp ; unsigned long ldvarg429 ; unsigned long ldvarg427 ; struct mm_struct *ldvarg428 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(24UL); ldvarg430 = (struct mmu_notifier *)tmp; tmp___0 = ldv_init_zalloc(2296UL); ldvarg428 = (struct mm_struct *)tmp___0; ldv_memset((void *)(& ldvarg429), 0, 8UL); ldv_memset((void *)(& ldvarg427), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_136 == 1) { i915_gem_userptr_mn_invalidate_range_start(ldvarg430, ldvarg428, ldvarg427, ldvarg429); ldv_state_variable_136 = 1; } else { } goto ldv_50262; default: ldv_stop(); } ldv_50262: ; return; } } void ldv_main_exported_135(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_135 == 1) { i915_gem_userptr_dmabuf_export(i915_gem_userptr_ops_group0); ldv_state_variable_135 = 1; } else { } if (ldv_state_variable_135 == 2) { i915_gem_userptr_dmabuf_export(i915_gem_userptr_ops_group0); ldv_state_variable_135 = 2; } else { } goto ldv_50268; case 1: ; if (ldv_state_variable_135 == 2) { i915_gem_userptr_release(i915_gem_userptr_ops_group0); ldv_state_variable_135 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_50268; case 2: ; if (ldv_state_variable_135 == 1) { i915_gem_userptr_put_pages(i915_gem_userptr_ops_group0); ldv_state_variable_135 = 1; } else { } if (ldv_state_variable_135 == 2) { i915_gem_userptr_put_pages(i915_gem_userptr_ops_group0); ldv_state_variable_135 = 2; } else { } goto ldv_50268; case 3: ; if (ldv_state_variable_135 == 1) { i915_gem_userptr_get_pages(i915_gem_userptr_ops_group0); ldv_state_variable_135 = 1; } else { } if (ldv_state_variable_135 == 2) { i915_gem_userptr_get_pages(i915_gem_userptr_ops_group0); ldv_state_variable_135 = 2; } else { } goto ldv_50268; case 4: ; if (ldv_state_variable_135 == 1) { ldv_probe_135(); ldv_state_variable_135 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_50268; default: ldv_stop(); } ldv_50268: ; return; } } bool ldv_queue_work_on_357(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_358(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_359(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_360(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_361(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv__builtin_va_end(__builtin_va_list * ) ; void __builtin_va_copy(__builtin_va_list , __builtin_va_list ) ; __inline static long ldv__builtin_expect(long exp , long c ) ; void ldv__builtin_va_start(__builtin_va_list * ) ; extern int vsnprintf(char * , size_t , char const * , va_list * ) ; extern int scnprintf(char * , size_t , char const * , ...) ; extern void *__memmove(void * , void const * , size_t ) ; extern size_t strlen(char const * ) ; extern char *strcpy(char * , char const * ) ; __inline static unsigned long arch_local_save_flags___8(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void arch_local_irq_restore(unsigned long f ) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.restore_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (836), "i" (12UL)); ldv_4870: ; goto ldv_4870; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (44UL), [paravirt_opptr] "i" (& pv_irq_ops.restore_fl.func), [paravirt_clobber] "i" (1), "D" (f): "memory", "cc"); return; } } __inline static void arch_local_irq_disable(void) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.irq_disable.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (841), "i" (12UL)); ldv_4879: ; goto ldv_4879; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (45UL), [paravirt_opptr] "i" (& pv_irq_ops.irq_disable.func), [paravirt_clobber] "i" (1): "memory", "cc"); return; } } __inline static unsigned long arch_local_irq_save(void) { unsigned long f ; { f = arch_local_save_flags___8(); arch_local_irq_disable(); return (f); } } extern void trace_hardirqs_on(void) ; extern void trace_hardirqs_off(void) ; __inline static int waitqueue_active(wait_queue_head_t *q ) { int tmp ; { tmp = list_empty((struct list_head const *)(& q->task_list)); return (tmp == 0); } } extern void do_gettimeofday(struct timeval * ) ; bool ldv_queue_work_on_371(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_373(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_372(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_375(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_374(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void kref_get___8(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___10(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___10(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___10(kref, 1U, release); return (tmp); } } extern void drm_clflush_pages(struct page ** , unsigned long ) ; __inline static struct page *i915_gem_object_get_page___1(struct drm_i915_gem_object *obj , int n ) { int __ret_warn_on ; long tmp ; long tmp___0 ; struct scatterlist *tmp___1 ; int tmp___2 ; long tmp___3 ; int tmp___4 ; struct page *tmp___5 ; { __ret_warn_on = (size_t )n >= obj->base.size >> 12; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2754, "WARN_ON(n >= obj->base.size >> PAGE_SHIFT)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return ((struct page *)0); } else { } if (obj->get_page.last > n) { obj->get_page.sg = (obj->pages)->sgl; obj->get_page.last = 0; } else { } goto ldv_47336; ldv_47335: tmp___1 = obj->get_page.sg; obj->get_page.sg = obj->get_page.sg + 1; tmp___2 = __sg_page_count(tmp___1); obj->get_page.last = obj->get_page.last + tmp___2; tmp___3 = ldv__builtin_expect((long )((int )(obj->get_page.sg)->page_link) & 1L, 0L); if (tmp___3 != 0L) { obj->get_page.sg = (struct scatterlist *)((obj->get_page.sg)->page_link & 0xfffffffffffffffcUL); } else { } ldv_47336: tmp___4 = __sg_page_count(obj->get_page.sg); if (obj->get_page.last + tmp___4 <= n) { goto ldv_47335; } else { } tmp___5 = sg_page___0(obj->get_page.sg); return ((struct page *)-24189255811072L + ((unsigned long )(((long )tmp___5 + 24189255811072L) / 64L) + (unsigned long )(n - obj->get_page.last))); } } __inline static u32 i915_reset_count(struct i915_gpu_error *error ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& error->reset_counter)); return ((u32 )(((tmp & 2147483647) + 1) / 2)); } } void i915_error_printf(struct drm_i915_error_state_buf *e , char const *f , ...) ; void i915_capture_error_state(struct drm_device *dev , bool wedged , char const *error_msg ) ; void i915_get_extra_instdone(struct drm_device *dev , uint32_t *instdone ) ; struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev ) ; void intel_overlay_print_error_state(struct drm_i915_error_state_buf *m , struct intel_overlay_error_state *error ) ; struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev ) ; void intel_display_print_error_state(struct drm_i915_error_state_buf *m , struct drm_device *dev , struct intel_display_error_state *error ) ; static char const *yesno___0(int v ) { { return (v != 0 ? "yes" : "no"); } } static char const *ring_str(int ring ) { { switch (ring) { case 0: ; return ("render"); case 1: ; return ("bsd"); case 2: ; return ("blt"); case 3: ; return ("vebox"); case 4: ; return ("bsd2"); default: ; return (""); } } } static char const *pin_flag(int pinned ) { { if (pinned > 0) { return (" P"); } else if (pinned < 0) { return (" p"); } else { return (""); } } } static char const *tiling_flag(int tiling ) { { switch (tiling) { default: ; case 0: ; return (""); case 1: ; return (" X"); case 2: ; return (" Y"); } } } static char const *dirty_flag(int dirty ) { { return (dirty != 0 ? " dirty" : ""); } } static char const *purgeable_flag(int purgeable ) { { return (purgeable != 0 ? " purgeable" : ""); } } static bool __i915_error_ok(struct drm_i915_error_state_buf *e ) { int __ret_warn_on ; long tmp ; long tmp___0 ; { if (e->err == 0) { __ret_warn_on = e->bytes > e->size - 1U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_gpu_error.c", 83, "overflow"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { e->err = -28; return (0); } else { } } else { } if (e->bytes == e->size - 1U || e->err != 0) { return (0); } else { } return (1); } } static bool __i915_error_seek(struct drm_i915_error_state_buf *e , unsigned int len ) { { if (e->pos + (loff_t )len <= e->start) { e->pos = e->pos + (loff_t )len; return (0); } else { } if (e->size <= len) { e->err = -5; return (0); } else { } return (1); } } static void __i915_error_advance(struct drm_i915_error_state_buf *e , unsigned int len ) { size_t off ; { if (e->pos < e->start) { off = (size_t const )(e->start - e->pos); if ((unsigned long )len < off || e->bytes != 0U) { e->err = -5; return; } else { } __memmove((void *)e->buf, (void const *)(e->buf + off), (unsigned long )len - off); e->bytes = len - (unsigned int )off; e->pos = e->start; return; } else { } e->bytes = e->bytes + len; e->pos = e->pos + (loff_t )len; return; } } static void i915_error_vprintf(struct drm_i915_error_state_buf *e , char const *f , va_list *args ) { unsigned int len ; bool tmp ; int tmp___0 ; va_list tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; int tmp___5 ; { tmp = __i915_error_ok(e); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } if (e->pos < e->start) { __builtin_va_copy((va_list *)(& tmp___1), args); tmp___2 = vsnprintf((char *)0, 0UL, f, (va_list *)(& tmp___1)); len = (unsigned int )tmp___2; ldv__builtin_va_end((va_list *)(& tmp___1)); tmp___3 = __i915_error_seek(e, len); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return; } else { } } else { } tmp___5 = vsnprintf((char *)e->buf + (unsigned long )e->bytes, (size_t )(e->size - e->bytes), f, args); len = (unsigned int )tmp___5; if (e->size - e->bytes <= len) { len = (e->size - e->bytes) - 1U; } else { } __i915_error_advance(e, len); return; } } static void i915_error_puts(struct drm_i915_error_state_buf *e , char const *str ) { unsigned int len ; bool tmp ; int tmp___0 ; size_t tmp___1 ; bool tmp___2 ; int tmp___3 ; { tmp = __i915_error_ok(e); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } tmp___1 = strlen(str); len = (unsigned int )tmp___1; if (e->pos < e->start) { tmp___2 = __i915_error_seek(e, len); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return; } else { } } else { } if (e->size - e->bytes <= len) { len = (e->size - e->bytes) - 1U; } else { } memcpy((void *)e->buf + (unsigned long )e->bytes, (void const *)str, (size_t )len); __i915_error_advance(e, len); return; } } static void print_error_buffers(struct drm_i915_error_state_buf *m , char const *name , struct drm_i915_error_buffer *err , int count ) { int i ; char const *tmp ; char const *tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; int tmp___5 ; { i915_error_printf(m, " %s [%d]:\n", name, count); goto ldv_48042; ldv_48041: i915_error_printf(m, " %08x %8u %02x %02x [ ", err->gtt_offset, err->size, err->read_domains, err->write_domain); i = 0; goto ldv_48039; ldv_48038: i915_error_printf(m, "%02x ", err->rseqno[i]); i = i + 1; ldv_48039: ; if (i <= 4) { goto ldv_48038; } else { } i915_error_printf(m, "] %02x", err->wseqno); tmp = pin_flag((int )err->pinned); i915_error_puts(m, tmp); tmp___0 = tiling_flag((int )err->tiling); i915_error_puts(m, tmp___0); tmp___1 = dirty_flag((int )err->dirty); i915_error_puts(m, tmp___1); tmp___2 = purgeable_flag((int )err->purgeable); i915_error_puts(m, tmp___2); i915_error_puts(m, (unsigned int )*((unsigned char *)err + 45UL) != 0U ? " userptr" : ""); i915_error_puts(m, *((unsigned int *)err + 11UL) != 122880U ? " " : ""); tmp___3 = ring_str((int )err->ring); i915_error_puts(m, tmp___3); tmp___4 = i915_cache_level_str(m->i915, (int )err->cache_level); i915_error_puts(m, tmp___4); if (err->name != 0U) { i915_error_printf(m, " (name: %d)", err->name); } else { } if ((unsigned int )*((unsigned char *)err + 44UL) != 63U) { i915_error_printf(m, " (fence: %d)", (int )err->fence_reg); } else { } i915_error_puts(m, "\n"); err = err + 1; ldv_48042: tmp___5 = count; count = count - 1; if (tmp___5 != 0) { goto ldv_48041; } else { } return; } } static char const *hangcheck_action_to_str(enum intel_ring_hangcheck_action a ) { { switch ((unsigned int )a) { case 0U: ; return ("idle"); case 1U: ; return ("wait"); case 2U: ; return ("active"); case 3U: ; return ("active (loop)"); case 4U: ; return ("kick"); case 5U: ; return ("hung"); } return ("unknown"); } } static void i915_ring_error_state(struct drm_i915_error_state_buf *m , struct drm_device *dev , struct drm_i915_error_state *error , int ring_idx ) { struct drm_i915_error_ring *ring ; char const *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int i ; struct drm_i915_private *__p___2 ; char const *tmp___0 ; char const *tmp___1 ; { ring = (struct drm_i915_error_ring *)(& error->ring) + (unsigned long )ring_idx; if (! ring->valid) { return; } else { } tmp = ring_str(ring_idx); i915_error_printf(m, "%s command stream:\n", tmp); i915_error_printf(m, " START: 0x%08x\n", ring->start); i915_error_printf(m, " HEAD: 0x%08x\n", ring->head); i915_error_printf(m, " TAIL: 0x%08x\n", ring->tail); i915_error_printf(m, " CTL: 0x%08x\n", ring->ctl); i915_error_printf(m, " HWS: 0x%08x\n", ring->hws); i915_error_printf(m, " ACTHD: 0x%08x %08x\n", (unsigned int )(ring->acthd >> 32), (unsigned int )ring->acthd); i915_error_printf(m, " IPEIR: 0x%08x\n", ring->ipeir); i915_error_printf(m, " IPEHR: 0x%08x\n", ring->ipehr); i915_error_printf(m, " INSTDONE: 0x%08x\n", ring->instdone); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { i915_error_printf(m, " BBADDR: 0x%08x %08x\n", (unsigned int )(ring->bbaddr >> 32), (unsigned int )ring->bbaddr); i915_error_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate); i915_error_printf(m, " INSTPS: 0x%08x\n", ring->instps); } else { } i915_error_printf(m, " INSTPM: 0x%08x\n", ring->instpm); i915_error_printf(m, " FADDR: 0x%08x %08x\n", (unsigned int )(ring->faddr >> 32ULL), (unsigned int )ring->faddr); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 5U) { i915_error_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi); i915_error_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg); i915_error_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", ring->semaphore_mboxes[0], ring->semaphore_seqno[0]); i915_error_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", ring->semaphore_mboxes[1], ring->semaphore_seqno[1]); __p___0 = to_i915((struct drm_device const *)dev); if (((int )__p___0->info.ring_mask & 8) != 0) { i915_error_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", ring->semaphore_mboxes[2], ring->semaphore_seqno[2]); } else { } } else { } if (i915.enable_ppgtt != 0) { i915_error_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 7U) { i = 0; goto ldv_48086; ldv_48085: i915_error_printf(m, " PDP%d: 0x%016llx\n", i, ring->vm_info.__annonCompField81.pdp[i]); i = i + 1; ldv_48086: ; if (i <= 3) { goto ldv_48085; } else { } } else { i915_error_printf(m, " PP_DIR_BASE: 0x%08x\n", ring->vm_info.__annonCompField81.pp_dir_base); } } else { } i915_error_printf(m, " seqno: 0x%08x\n", ring->seqno); tmp___0 = yesno___0((int )ring->waiting); i915_error_printf(m, " waiting: %s\n", tmp___0); i915_error_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head); i915_error_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail); tmp___1 = hangcheck_action_to_str(ring->hangcheck_action); i915_error_printf(m, " hangcheck: %s [%d]\n", tmp___1, ring->hangcheck_score); return; } } void i915_error_printf(struct drm_i915_error_state_buf *e , char const *f , ...) { va_list args ; { ldv__builtin_va_start((va_list *)(& args)); i915_error_vprintf(e, f, (va_list *)(& args)); ldv__builtin_va_end((va_list *)(& args)); return; } } static void print_error_obj(struct drm_i915_error_state_buf *m , struct drm_i915_error_object *obj ) { int page ; int offset ; int elt ; { offset = 0; page = offset; goto ldv_48104; ldv_48103: elt = 0; goto ldv_48101; ldv_48100: i915_error_printf(m, "%08x : %08x\n", offset, *(obj->pages[page] + (unsigned long )elt)); offset = offset + 4; elt = elt + 1; ldv_48101: ; if ((unsigned int )elt <= 1023U) { goto ldv_48100; } else { } page = page + 1; ldv_48104: ; if (obj->page_count > page) { goto ldv_48103; } else { } return; } } int i915_error_state_to_str(struct drm_i915_error_state_buf *m , struct i915_error_state_file_priv const *error_priv ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_error_state *error ; struct drm_i915_error_object *obj ; int i ; int j ; int offset ; int elt ; int max_hangcheck_score ; char const *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev = error_priv->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; error = error_priv->error; if ((unsigned long )error == (unsigned long )((struct drm_i915_error_state *)0)) { i915_error_printf(m, "no error state collected\n"); goto out; } else { } i915_error_printf(m, "%s\n", (char *)(& error->error_msg)); i915_error_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); i915_error_printf(m, "Kernel: 4.2.0-rc1\n"); max_hangcheck_score = 0; i = 0; goto ldv_48123; ldv_48122: ; if (error->ring[i].hangcheck_score > max_hangcheck_score) { max_hangcheck_score = error->ring[i].hangcheck_score; } else { } i = i + 1; ldv_48123: ; if ((unsigned int )i <= 4U) { goto ldv_48122; } else { } i = 0; goto ldv_48128; ldv_48127: ; if (error->ring[i].hangcheck_score == max_hangcheck_score && error->ring[i].pid != -1) { tmp = ring_str(i); i915_error_printf(m, "Active process (on ring %s): %s [%d]\n", tmp, (char *)(& error->ring[i].comm), error->ring[i].pid); } else { } i = i + 1; ldv_48128: ; if ((unsigned int )i <= 4U) { goto ldv_48127; } else { } i915_error_printf(m, "Reset count: %u\n", error->reset_count); i915_error_printf(m, "Suspend count: %u\n", error->suspend_count); i915_error_printf(m, "PCI ID: 0x%04x\n", (int )(dev->pdev)->device); i915_error_printf(m, "EIR: 0x%08x\n", error->eir); i915_error_printf(m, "IER: 0x%08x\n", error->ier); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 7U) { i = 0; goto ldv_48137; ldv_48136: i915_error_printf(m, "GTIER gt %d: 0x%08x\n", i, error->gtier[i]); i = i + 1; ldv_48137: ; if (i <= 3) { goto ldv_48136; } else { } } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { i915_error_printf(m, "GTIER: 0x%08x\n", error->gtier[0]); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { i915_error_printf(m, "GTIER: 0x%08x\n", error->gtier[0]); } else { } } } i915_error_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); i915_error_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); i915_error_printf(m, "DERRMR: 0x%08x\n", error->derrmr); i915_error_printf(m, "CCID: 0x%08x\n", error->ccid); i915_error_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings); i = 0; goto ldv_48152; ldv_48151: i915_error_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); i = i + 1; ldv_48152: ; if (dev_priv->num_fence_regs > i) { goto ldv_48151; } else { } i = 0; goto ldv_48157; ldv_48156: i915_error_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); i = i + 1; ldv_48157: ; if ((unsigned int )i <= 3U) { goto ldv_48156; } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 5U) { i915_error_printf(m, "OLD_ERROR: 0x%08x\n", error->error); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 7U) { i915_error_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", error->fault_data1, error->fault_data0); } else { } i915_error_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 7U) { i915_error_printf(m, "ERR_INT: 0x%08x\n", error->err_int); } else { } i = 0; goto ldv_48180; ldv_48179: i915_ring_error_state(m, dev, error, i); i = i + 1; ldv_48180: ; if ((unsigned int )i <= 4U) { goto ldv_48179; } else { } i = 0; goto ldv_48183; ldv_48182: i915_error_printf(m, "vm[%d]\n", i); print_error_buffers(m, "Active", *(error->active_bo + (unsigned long )i), (int )*(error->active_bo_count + (unsigned long )i)); print_error_buffers(m, "Pinned", *(error->pinned_bo + (unsigned long )i), (int )*(error->pinned_bo_count + (unsigned long )i)); i = i + 1; ldv_48183: ; if ((u32 )i < error->vm_count) { goto ldv_48182; } else { } i = 0; goto ldv_48194; ldv_48193: obj = error->ring[i].batchbuffer; if ((unsigned long )obj != (unsigned long )((struct drm_i915_error_object *)0)) { i915_error_puts(m, dev_priv->ring[i].name); if (error->ring[i].pid != -1) { i915_error_printf(m, " (submitted by %s [%d])", (char *)(& error->ring[i].comm), error->ring[i].pid); } else { } i915_error_printf(m, " --- gtt_offset = 0x%08x\n", obj->gtt_offset); print_error_obj(m, obj); } else { } obj = error->ring[i].wa_batchbuffer; if ((unsigned long )obj != (unsigned long )((struct drm_i915_error_object *)0)) { i915_error_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", dev_priv->ring[i].name, obj->gtt_offset); print_error_obj(m, obj); } else { } if (error->ring[i].num_requests != 0) { i915_error_printf(m, "%s --- %d requests\n", dev_priv->ring[i].name, error->ring[i].num_requests); j = 0; goto ldv_48188; ldv_48187: i915_error_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", (error->ring[i].requests + (unsigned long )j)->seqno, (error->ring[i].requests + (unsigned long )j)->jiffies, (error->ring[i].requests + (unsigned long )j)->tail); j = j + 1; ldv_48188: ; if (error->ring[i].num_requests > j) { goto ldv_48187; } else { } } else { } obj = error->ring[i].ringbuffer; if ((unsigned long )obj != (unsigned long )((struct drm_i915_error_object *)0)) { i915_error_printf(m, "%s --- ringbuffer = 0x%08x\n", dev_priv->ring[i].name, obj->gtt_offset); print_error_obj(m, obj); } else { } obj = error->ring[i].hws_page; if ((unsigned long )obj != (unsigned long )((struct drm_i915_error_object *)0)) { i915_error_printf(m, "%s --- HW Status = 0x%08x\n", dev_priv->ring[i].name, obj->gtt_offset); offset = 0; elt = 0; goto ldv_48191; ldv_48190: i915_error_printf(m, "[%04x] %08x %08x %08x %08x\n", offset, *(obj->pages[0] + (unsigned long )elt), *(obj->pages[0] + ((unsigned long )elt + 1UL)), *(obj->pages[0] + ((unsigned long )elt + 2UL)), *(obj->pages[0] + ((unsigned long )elt + 3UL))); offset = offset + 16; elt = elt + 4; ldv_48191: ; if ((unsigned int )elt <= 255U) { goto ldv_48190; } else { } } else { } obj = error->ring[i].ctx; if ((unsigned long )obj != (unsigned long )((struct drm_i915_error_object *)0)) { i915_error_printf(m, "%s --- HW Context = 0x%08x\n", dev_priv->ring[i].name, obj->gtt_offset); print_error_obj(m, obj); } else { } i = i + 1; ldv_48194: ; if ((unsigned int )i <= 4U) { goto ldv_48193; } else { } obj = error->semaphore_obj; if ((unsigned long )obj != (unsigned long )((struct drm_i915_error_object *)0)) { i915_error_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset); elt = 0; goto ldv_48197; ldv_48196: i915_error_printf(m, "[%04x] %08x %08x %08x %08x\n", elt * 4, *(obj->pages[0] + (unsigned long )elt), *(obj->pages[0] + ((unsigned long )elt + 1UL)), *(obj->pages[0] + ((unsigned long )elt + 2UL)), *(obj->pages[0] + ((unsigned long )elt + 3UL))); elt = elt + 4; ldv_48197: ; if ((unsigned int )elt <= 255U) { goto ldv_48196; } else { } } else { } if ((unsigned long )error->overlay != (unsigned long )((struct intel_overlay_error_state *)0)) { intel_overlay_print_error_state(m, error->overlay); } else { } if ((unsigned long )error->display != (unsigned long )((struct intel_display_error_state *)0)) { intel_display_print_error_state(m, dev, error->display); } else { } out: ; if (m->bytes == 0U && m->err != 0) { return (m->err); } else { } return (0); } } int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf , struct drm_i915_private *i915___0 , size_t count , loff_t pos ) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { memset((void *)ebuf, 0, 48UL); ebuf->i915 = i915___0; ebuf->size = (unsigned int )(4096UL > count + 1UL ? 4096UL : count + 1UL); tmp = kmalloc((size_t )ebuf->size, 529104U); ebuf->buf = (u8 *)tmp; if ((unsigned long )ebuf->buf == (unsigned long )((u8 *)0U)) { ebuf->size = 4096U; tmp___0 = kmalloc((size_t )ebuf->size, 524496U); ebuf->buf = (u8 *)tmp___0; } else { } if ((unsigned long )ebuf->buf == (unsigned long )((u8 *)0U)) { ebuf->size = 128U; tmp___1 = kmalloc((size_t )ebuf->size, 524496U); ebuf->buf = (u8 *)tmp___1; } else { } if ((unsigned long )ebuf->buf == (unsigned long )((u8 *)0U)) { return (-12); } else { } ebuf->start = pos; return (0); } } static void i915_error_object_free(struct drm_i915_error_object *obj ) { int page ; { if ((unsigned long )obj == (unsigned long )((struct drm_i915_error_object *)0)) { return; } else { } page = 0; goto ldv_48210; ldv_48209: kfree((void const *)obj->pages[page]); page = page + 1; ldv_48210: ; if (obj->page_count > page) { goto ldv_48209; } else { } kfree((void const *)obj); return; } } static void i915_error_state_free(struct kref *error_ref ) { struct drm_i915_error_state *error ; struct kref const *__mptr ; int i ; { __mptr = (struct kref const *)error_ref; error = (struct drm_i915_error_state *)__mptr; i = 0; goto ldv_48222; ldv_48221: i915_error_object_free(error->ring[i].batchbuffer); i915_error_object_free(error->ring[i].wa_batchbuffer); i915_error_object_free(error->ring[i].ringbuffer); i915_error_object_free(error->ring[i].hws_page); i915_error_object_free(error->ring[i].ctx); kfree((void const *)error->ring[i].requests); i = i + 1; ldv_48222: ; if ((unsigned int )i <= 4U) { goto ldv_48221; } else { } i915_error_object_free(error->semaphore_obj); i = 0; goto ldv_48225; ldv_48224: kfree((void const *)*(error->active_bo + (unsigned long )i)); i = i + 1; ldv_48225: ; if ((u32 )i < error->vm_count) { goto ldv_48224; } else { } kfree((void const *)error->active_bo); kfree((void const *)error->active_bo_count); kfree((void const *)error->pinned_bo); kfree((void const *)error->pinned_bo_count); kfree((void const *)error->overlay); kfree((void const *)error->display); kfree((void const *)error); return; } } static struct drm_i915_error_object *i915_error_object_create(struct drm_i915_private *dev_priv , struct drm_i915_gem_object *src , struct i915_address_space *vm ) { struct drm_i915_error_object *dst ; struct i915_vma *vma ; int num_pages ; bool use_ggtt ; int i ; u32 reloc_offset ; void *tmp ; unsigned long tmp___0 ; bool tmp___1 ; bool tmp___2 ; unsigned long tmp___3 ; struct drm_i915_private *__p ; unsigned long flags ; void *d ; void *s ; struct page *page ; void *s___0 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; { vma = (struct i915_vma *)0; i = 0; if ((unsigned long )src == (unsigned long )((struct drm_i915_gem_object *)0) || (unsigned long )src->pages == (unsigned long )((struct sg_table *)0)) { return ((struct drm_i915_error_object *)0); } else { } num_pages = (int )(src->base.size >> 12); tmp = kmalloc(((unsigned long )num_pages + 1UL) * 8UL, 32U); dst = (struct drm_i915_error_object *)tmp; if ((unsigned long )dst == (unsigned long )((struct drm_i915_error_object *)0)) { return ((struct drm_i915_error_object *)0); } else { } tmp___1 = i915_gem_obj_bound(src, vm); if ((int )tmp___1) { tmp___0 = i915_gem_obj_offset(src, vm); dst->gtt_offset = (u32 )tmp___0; } else { dst->gtt_offset = 4294967295U; } reloc_offset = dst->gtt_offset; tmp___2 = i915_is_ggtt(vm); if ((int )tmp___2) { vma = i915_gem_obj_to_ggtt(src); } else { } use_ggtt = (bool )((((unsigned int )*((unsigned char *)src + 410UL) == 0U && (unsigned long )vma != (unsigned long )((struct i915_vma *)0)) && (int )vma->bound & 1) && (unsigned long )reloc_offset + (unsigned long )num_pages * 4096UL <= dev_priv->gtt.mappable_end); if ((unsigned long )src->stolen != (unsigned long )((struct drm_mm_node *)0)) { use_ggtt = 1; if ((unsigned long )vma == (unsigned long )((struct i915_vma *)0) || ((int )vma->bound & 1) == 0) { goto unwind; } else { } tmp___3 = i915_gem_obj_ggtt_offset(src); reloc_offset = (u32 )tmp___3; if ((unsigned long )reloc_offset + (unsigned long )num_pages * 4096UL > dev_priv->gtt.mappable_end) { goto unwind; } else { } } else { } if ((int )use_ggtt && (unsigned int )*((unsigned char *)src + 410UL) != 0U) { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { goto unwind; } else { } } else { } dst->page_count = num_pages; goto ldv_48264; ldv_48263: d = kmalloc(4096UL, 32U); if ((unsigned long )d == (unsigned long )((void *)0)) { goto unwind; } else { } flags = arch_local_irq_save(); trace_hardirqs_off(); if ((int )use_ggtt) { s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, (unsigned long )reloc_offset); memcpy_fromio(d, (void const volatile *)s, 4096UL); __kunmap_atomic(s); } else { page = i915_gem_object_get_page___1(src, i); drm_clflush_pages(& page, 1UL); s___0 = kmap_atomic(page); memcpy(d, (void const *)s___0, 4096UL); __kunmap_atomic(s___0); drm_clflush_pages(& page, 1UL); } tmp___4 = arch_irqs_disabled_flags(flags); if (tmp___4 != 0) { arch_local_irq_restore(flags); trace_hardirqs_off(); } else { trace_hardirqs_on(); arch_local_irq_restore(flags); } tmp___5 = i; i = i + 1; dst->pages[tmp___5] = (u32 *)d; reloc_offset = reloc_offset + 4096U; ldv_48264: tmp___6 = num_pages; num_pages = num_pages - 1; if (tmp___6 != 0) { goto ldv_48263; } else { } return (dst); unwind: ; goto ldv_48267; ldv_48266: kfree((void const *)dst->pages[i]); ldv_48267: tmp___7 = i; i = i - 1; if (tmp___7 != 0) { goto ldv_48266; } else { } kfree((void const *)dst); return ((struct drm_i915_error_object *)0); } } static void capture_bo(struct drm_i915_error_buffer *err , struct i915_vma *vma ) { struct drm_i915_gem_object *obj ; int i ; bool tmp ; struct intel_engine_cs *tmp___0 ; { obj = vma->obj; err->size = (u32 )obj->base.size; err->name = (u32 )obj->base.name; i = 0; goto ldv_48276; ldv_48275: err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]); i = i + 1; ldv_48276: ; if (i <= 4) { goto ldv_48275; } else { } err->wseqno = i915_gem_request_get_seqno(obj->last_write_req); err->gtt_offset = (u32 )vma->node.start; err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; err->fence_reg = obj->fence_reg; err->pinned = 0; tmp = i915_gem_obj_is_pinned(obj); if ((int )tmp) { err->pinned = 1; } else { } err->tiling = obj->tiling_mode; err->dirty = obj->dirty; err->purgeable = (unsigned int )*((unsigned char *)obj + 409UL) != 0U; err->userptr = (unsigned long )obj->__annonCompField84.userptr.mm != (unsigned long )((struct i915_mm_struct *)0); if ((unsigned long )obj->last_write_req != (unsigned long )((struct drm_i915_gem_request *)0)) { tmp___0 = i915_gem_request_get_ring(obj->last_write_req); err->ring = (signed char )tmp___0->id; } else { err->ring = -1; } err->cache_level = obj->cache_level; return; } } static u32 capture_active_bo(struct drm_i915_error_buffer *err , int count , struct list_head *head ) { struct i915_vma *vma ; int i ; struct list_head const *__mptr ; struct drm_i915_error_buffer *tmp ; struct list_head const *__mptr___0 ; { i = 0; __mptr = (struct list_head const *)head->next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff68UL; goto ldv_48291; ldv_48290: tmp = err; err = err + 1; capture_bo(tmp, vma); i = i + 1; if (i == count) { goto ldv_48289; } else { } __mptr___0 = (struct list_head const *)vma->mm_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff68UL; ldv_48291: ; if ((unsigned long )(& vma->mm_list) != (unsigned long )head) { goto ldv_48290; } else { } ldv_48289: ; return ((u32 )i); } } static u32 capture_pinned_bo(struct drm_i915_error_buffer *err , int count , struct list_head *head , struct i915_address_space *vm ) { struct drm_i915_gem_object *obj ; struct drm_i915_error_buffer *first ; struct drm_i915_error_buffer *last ; struct list_head const *__mptr ; struct i915_vma *vma ; struct list_head const *__mptr___0 ; struct drm_i915_error_buffer *tmp ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; { first = err; last = err + (unsigned long )count; __mptr = (struct list_head const *)head->next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; goto ldv_48315; ldv_48314: ; if ((unsigned long )err == (unsigned long )last) { goto ldv_48306; } else { } __mptr___0 = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff58UL; goto ldv_48312; ldv_48311: ; if ((unsigned long )vma->vm == (unsigned long )vm && (int )vma->pin_count > 0) { tmp = err; err = err + 1; capture_bo(tmp, vma); } else { } __mptr___1 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___1 + 0xffffffffffffff58UL; ldv_48312: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_48311; } else { } __mptr___2 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___2 + 0xfffffffffffffee8UL; ldv_48315: ; if ((unsigned long )(& obj->global_list) != (unsigned long )head) { goto ldv_48314; } else { } ldv_48306: ; return ((u32 )(((long )err - (long )first) / 48L)); } } static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv , struct drm_i915_error_state *error , int *ring_id ) { uint32_t error_code ; int i ; { error_code = 0U; i = 0; goto ldv_48324; ldv_48323: ; if ((unsigned int )error->ring[i].hangcheck_action == 5U) { if ((unsigned long )ring_id != (unsigned long )((int *)0)) { *ring_id = i; } else { } return (error->ring[i].ipehr ^ error->ring[i].instdone); } else { } i = i + 1; ldv_48324: ; if (i <= 4) { goto ldv_48323; } else { } return (error_code); } } static void i915_gem_record_fences(struct drm_device *dev , struct drm_i915_error_state *error ) { struct drm_i915_private *dev_priv ; int i ; uint32_t tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 3U) { goto _L___1; } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 2U) { _L___1: /* CIL Label */ i = 0; goto ldv_48345; ldv_48344: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((i + 2048) * 4), 1); error->fence[i] = (u64 )tmp; i = i + 1; ldv_48345: ; if (i <= 7) { goto ldv_48344; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 10098U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { _L: /* CIL Label */ i = 0; goto ldv_48366; ldv_48365: tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((i + 3072) * 4), 1); error->fence[i + 8] = (u64 )tmp___0; i = i + 1; ldv_48366: ; if (i <= 7) { goto ldv_48365; } else { } } else { } } } } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 5U) { goto _L___0; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 4U) { _L___0: /* CIL Label */ i = 0; goto ldv_48381; ldv_48380: error->fence[i] = (*(dev_priv->uncore.funcs.mmio_readq))(dev_priv, (off_t )((i + 1536) * 8), 1); i = i + 1; ldv_48381: ; if (i <= 15) { goto ldv_48380; } else { } } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 5U) { i = 0; goto ldv_48390; ldv_48389: error->fence[i] = (*(dev_priv->uncore.funcs.mmio_readq))(dev_priv, (off_t )((i + 131072) * 8), 1); i = i + 1; ldv_48390: ; if (dev_priv->num_fence_regs > i) { goto ldv_48389; } else { } } else { } } } } } return; } } static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv , struct drm_i915_error_state *error , struct intel_engine_cs *ring , struct drm_i915_error_ring *ering ) { struct intel_engine_cs *to ; int i ; bool tmp ; int tmp___0 ; int idx ; u16 signal_offset ; u32 *tmp___1 ; unsigned long tmp___2 ; u32 tmp___3 ; bool tmp___4 ; { tmp = i915_semaphore_is_enabled(dev_priv->dev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } if ((unsigned long )error->semaphore_obj == (unsigned long )((struct drm_i915_error_object *)0)) { error->semaphore_obj = i915_error_object_create(dev_priv, dev_priv->semaphore_obj, & dev_priv->gtt.base); } else { } i = 0; goto ldv_48405; ldv_48404: to = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___4 = intel_ring_initialized(to); if ((int )tmp___4) { if ((unsigned long )ring == (unsigned long )to) { goto ldv_48403; } else { } tmp___2 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); signal_offset = (u16 )(((tmp___2 + ((unsigned long )i + (unsigned long )((unsigned int )ring->id * 5U)) * 8UL) & 4095UL) / 4UL); tmp___1 = (error->semaphore_obj)->pages[0]; tmp___3 = intel_ring_sync_index(ring, to); idx = (int )tmp___3; ering->semaphore_mboxes[idx] = *(tmp___1 + (unsigned long )signal_offset); ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx]; } else { } ldv_48403: i = i + 1; ldv_48405: ; if (i <= 4) { goto ldv_48404; } else { } return; } } static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv , struct intel_engine_cs *ring , struct drm_i915_error_ring *ering ) { struct drm_i915_private *__p ; { ering->semaphore_mboxes[0] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 64U), 1); ering->semaphore_mboxes[1] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 68U), 1); ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; __p = to_i915((struct drm_device const *)dev_priv->dev); if (((int )__p->info.ring_mask & 8) != 0) { ering->semaphore_mboxes[2] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 72U), 1); ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; } else { } return; } } static void i915_record_ring_state(struct drm_device *dev , struct drm_i915_error_state *error , struct intel_engine_cs *ring , struct drm_i915_error_ring *ering ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; struct drm_i915_private *__p___1 ; uint32_t tmp___3 ; struct drm_i915_private *__p___2 ; int tmp___4 ; int mmio ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; int i ; uint32_t tmp___5 ; uint32_t tmp___6 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 5U) { ering->rc_psmi = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 80U), 1); ering->fault_reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )ring->id * 256U + 16532U), 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { gen8_record_semaphore_state(dev_priv, error, ring, ering); } else { gen6_record_semaphore_state(dev_priv, ring, ering); } } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 3U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 120U), 1); ering->faddr = (u64 )tmp; ering->ipeir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 100U), 1); ering->ipehr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 104U), 1); ering->instdone = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 108U), 1); ering->instps = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 112U), 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 320U), 1); ering->bbaddr = (u64 )tmp___0; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 7U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 96U), 1); ering->faddr = ering->faddr | ((unsigned long long )tmp___1 << 32); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 360U), 1); ering->bbaddr = ering->bbaddr | ((unsigned long long )tmp___2 << 32); } else { } ering->bbstate = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 272U), 1); } else { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8400L, 1); ering->faddr = (u64 )tmp___3; ering->ipeir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8328L, 1); ering->ipehr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8332L, 1); ering->instdone = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8336L, 1); } tmp___4 = waitqueue_active(& ring->irq_queue); ering->waiting = tmp___4 != 0; ering->instpm = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 192U), 1); ering->seqno = (*(ring->get_seqno))(ring, 0); ering->acthd = intel_ring_get_active_head(ring); ering->start = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 56U), 1); ering->head = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); ering->tail = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 48U), 1); ering->ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 60U), 1); __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 44UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 7U) { switch ((unsigned int )ring->id) { default: ; case 0U: mmio = 16512; goto ldv_48464; case 2U: mmio = 17024; goto ldv_48464; case 1U: mmio = 16768; goto ldv_48464; case 3U: mmio = 17280; goto ldv_48464; } ldv_48464: ; } else { __p___3 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 6U) { mmio = (int )(ring->mmio_base + 8320U); } else { mmio = (int )(ring->mmio_base + 128U); } } ering->hws = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )mmio, 1); } else { } ering->hangcheck_score = ring->hangcheck.score; ering->hangcheck_action = ring->hangcheck.action; if (i915.enable_ppgtt != 0) { ering->vm_info.gfx_mode = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 668U), 1); __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) == 6U) { ering->vm_info.__annonCompField81.pp_dir_base = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 1304U), 1); } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) == 7U) { ering->vm_info.__annonCompField81.pp_dir_base = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 552U), 1); } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 7U) { i = 0; goto ldv_48494; ldv_48493: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((ring->mmio_base + (u32 )(i * 8)) + 628U), 1); ering->vm_info.__annonCompField81.pdp[i] = (u64 )tmp___5; ering->vm_info.__annonCompField81.pdp[i] = ering->vm_info.__annonCompField81.pdp[i] << 32; tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((ring->mmio_base + (u32 )(i * 8)) + 624U), 1); ering->vm_info.__annonCompField81.pdp[i] = ering->vm_info.__annonCompField81.pdp[i] | (u64 )tmp___6; i = i + 1; ldv_48494: ; if (i <= 3) { goto ldv_48493; } else { } } else { } } } } else { } return; } } static void i915_gem_record_active_context(struct intel_engine_cs *ring , struct drm_i915_error_state *error , struct drm_i915_error_ring *ering ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; struct list_head const *__mptr ; bool tmp ; int tmp___0 ; unsigned long tmp___1 ; struct list_head const *__mptr___0 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; if ((unsigned int )ring->id != 0U || error->ccid == 0U) { return; } else { } __mptr = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffee8UL; goto ldv_48510; ldv_48509: tmp = i915_gem_obj_ggtt_bound(obj); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_48507; } else { } tmp___1 = i915_gem_obj_ggtt_offset(obj); if (((unsigned long )error->ccid & 0xfffffffffffff000UL) == tmp___1) { ering->ctx = i915_error_object_create(dev_priv, obj, & dev_priv->gtt.base); goto ldv_48508; } else { } ldv_48507: __mptr___0 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffee8UL; ldv_48510: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_48509; } else { } ldv_48508: ; return; } } static void i915_gem_record_rings(struct drm_device *dev , struct drm_i915_error_state *error ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_request *request ; int i ; int count ; struct intel_engine_cs *ring ; struct intel_ringbuffer *rbuf ; struct i915_address_space *vm ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct task_struct *task ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; void *tmp ; struct list_head const *__mptr___1 ; struct drm_i915_error_request *erq ; int tmp___0 ; struct list_head const *__mptr___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_48552; ldv_48551: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; error->ring[i].pid = -1; if ((unsigned long )ring->dev == (unsigned long )((struct drm_device *)0)) { goto ldv_48521; } else { } error->ring[i].valid = 1; i915_record_ring_state(dev, error, ring, (struct drm_i915_error_ring *)(& error->ring) + (unsigned long )i); request = i915_gem_find_active_request(ring); if ((unsigned long )request != (unsigned long )((struct drm_i915_gem_request *)0)) { vm = (unsigned long )request->ctx != (unsigned long )((struct intel_context *)0) && (unsigned long )(request->ctx)->ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0) ? & ((request->ctx)->ppgtt)->base : & dev_priv->gtt.base; error->ring[i].batchbuffer = i915_error_object_create(dev_priv, request->batch_obj, vm); __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 13687U) { error->ring[i].wa_batchbuffer = i915_error_object_create(dev_priv, ring->scratch.obj, & dev_priv->gtt.base); } else { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 9570U) { error->ring[i].wa_batchbuffer = i915_error_object_create(dev_priv, ring->scratch.obj, & dev_priv->gtt.base); } else { } } if ((unsigned long )request->pid != (unsigned long )((struct pid *)0)) { rcu_read_lock(); task = pid_task(request->pid, 0); if ((unsigned long )task != (unsigned long )((struct task_struct *)0)) { strcpy((char *)(& error->ring[i].comm), (char const *)(& task->comm)); error->ring[i].pid = task->pid; } else { } rcu_read_unlock(); } else { } } else { } if (i915.enable_execlists != 0) { if ((unsigned long )request != (unsigned long )((struct drm_i915_gem_request *)0)) { rbuf = (request->ctx)->engine[(unsigned int )ring->id].ringbuf; } else { rbuf = (ring->default_context)->engine[(unsigned int )ring->id].ringbuf; } } else { rbuf = ring->buffer; } error->ring[i].cpu_ring_head = rbuf->head; error->ring[i].cpu_ring_tail = rbuf->tail; error->ring[i].ringbuffer = i915_error_object_create(dev_priv, rbuf->obj, & dev_priv->gtt.base); error->ring[i].hws_page = i915_error_object_create(dev_priv, ring->status_page.obj, & dev_priv->gtt.base); i915_gem_record_active_context(ring, error, (struct drm_i915_error_ring *)(& error->ring) + (unsigned long )i); count = 0; __mptr = (struct list_head const *)ring->request_list.next; request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL; goto ldv_48541; ldv_48540: count = count + 1; __mptr___0 = (struct list_head const *)request->list.next; request = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffffb8UL; ldv_48541: ; if ((unsigned long )(& request->list) != (unsigned long )(& ring->request_list)) { goto ldv_48540; } else { } error->ring[i].num_requests = count; tmp = kcalloc((size_t )count, 16UL, 32U); error->ring[i].requests = (struct drm_i915_error_request *)tmp; if ((unsigned long )error->ring[i].requests == (unsigned long )((struct drm_i915_error_request *)0)) { error->ring[i].num_requests = 0; goto ldv_48521; } else { } count = 0; __mptr___1 = (struct list_head const *)ring->request_list.next; request = (struct drm_i915_gem_request *)__mptr___1 + 0xffffffffffffffb8UL; goto ldv_48549; ldv_48548: tmp___0 = count; count = count + 1; erq = error->ring[i].requests + (unsigned long )tmp___0; erq->seqno = request->seqno; erq->jiffies = (long )request->emitted_jiffies; erq->tail = request->postfix; __mptr___2 = (struct list_head const *)request->list.next; request = (struct drm_i915_gem_request *)__mptr___2 + 0xffffffffffffffb8UL; ldv_48549: ; if ((unsigned long )(& request->list) != (unsigned long )(& ring->request_list)) { goto ldv_48548; } else { } ldv_48521: i = i + 1; ldv_48552: ; if (i <= 4) { goto ldv_48551; } else { } return; } } static void i915_gem_capture_vm(struct drm_i915_private *dev_priv , struct drm_i915_error_state *error , struct i915_address_space *vm , int const ndx ) { struct drm_i915_error_buffer *active_bo ; struct drm_i915_error_buffer *pinned_bo ; struct drm_i915_gem_object *obj ; struct i915_vma *vma ; int i ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; void *tmp ; { active_bo = (struct drm_i915_error_buffer *)0; pinned_bo = (struct drm_i915_error_buffer *)0; i = 0; __mptr = (struct list_head const *)vm->active_list.next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff68UL; goto ldv_48570; ldv_48569: i = i + 1; __mptr___0 = (struct list_head const *)vma->mm_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff68UL; ldv_48570: ; if ((unsigned long )(& vma->mm_list) != (unsigned long )(& vm->active_list)) { goto ldv_48569; } else { } *(error->active_bo_count + (unsigned long )ndx) = (u32 )i; __mptr___1 = (struct list_head const *)dev_priv->mm.bound_list.next; obj = (struct drm_i915_gem_object *)__mptr___1 + 0xfffffffffffffee8UL; goto ldv_48584; ldv_48583: __mptr___2 = (struct list_head const *)obj->vma_list.next; vma = (struct i915_vma *)__mptr___2 + 0xffffffffffffff58UL; goto ldv_48581; ldv_48580: ; if ((unsigned long )vma->vm == (unsigned long )vm && (int )vma->pin_count > 0) { i = i + 1; } else { } __mptr___3 = (struct list_head const *)vma->vma_link.next; vma = (struct i915_vma *)__mptr___3 + 0xffffffffffffff58UL; ldv_48581: ; if ((unsigned long )(& vma->vma_link) != (unsigned long )(& obj->vma_list)) { goto ldv_48580; } else { } __mptr___4 = (struct list_head const *)obj->global_list.next; obj = (struct drm_i915_gem_object *)__mptr___4 + 0xfffffffffffffee8UL; ldv_48584: ; if ((unsigned long )(& obj->global_list) != (unsigned long )(& dev_priv->mm.bound_list)) { goto ldv_48583; } else { } *(error->pinned_bo_count + (unsigned long )ndx) = (u32 )i - *(error->active_bo_count + (unsigned long )ndx); if (i != 0) { tmp = kcalloc((size_t )i, 48UL, 32U); active_bo = (struct drm_i915_error_buffer *)tmp; if ((unsigned long )active_bo != (unsigned long )((struct drm_i915_error_buffer *)0)) { pinned_bo = active_bo + (unsigned long )*(error->active_bo_count + (unsigned long )ndx); } else { } } else { } if ((unsigned long )active_bo != (unsigned long )((struct drm_i915_error_buffer *)0)) { *(error->active_bo_count + (unsigned long )ndx) = capture_active_bo(active_bo, (int )*(error->active_bo_count + (unsigned long )ndx), & vm->active_list); } else { } if ((unsigned long )pinned_bo != (unsigned long )((struct drm_i915_error_buffer *)0)) { *(error->pinned_bo_count + (unsigned long )ndx) = capture_pinned_bo(pinned_bo, (int )*(error->pinned_bo_count + (unsigned long )ndx), & dev_priv->mm.bound_list, vm); } else { } *(error->active_bo + (unsigned long )ndx) = active_bo; *(error->pinned_bo + (unsigned long )ndx) = pinned_bo; return; } } static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv , struct drm_i915_error_state *error ) { struct i915_address_space *vm ; int cnt ; int i ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; struct list_head const *__mptr___1 ; int tmp___3 ; struct list_head const *__mptr___2 ; { cnt = 0; i = 0; __mptr = (struct list_head const *)dev_priv->vm_list.next; vm = (struct i915_address_space *)__mptr + 0xffffffffffffff50UL; goto ldv_48598; ldv_48597: cnt = cnt + 1; __mptr___0 = (struct list_head const *)vm->global_link.next; vm = (struct i915_address_space *)__mptr___0 + 0xffffffffffffff50UL; ldv_48598: ; if ((unsigned long )(& vm->global_link) != (unsigned long )(& dev_priv->vm_list)) { goto ldv_48597; } else { } tmp = kcalloc((size_t )cnt, 8UL, 32U); error->active_bo = (struct drm_i915_error_buffer **)tmp; tmp___0 = kcalloc((size_t )cnt, 8UL, 32U); error->pinned_bo = (struct drm_i915_error_buffer **)tmp___0; tmp___1 = kcalloc((size_t )cnt, 4UL, 32U); error->active_bo_count = (u32 *)tmp___1; tmp___2 = kcalloc((size_t )cnt, 4UL, 32U); error->pinned_bo_count = (u32 *)tmp___2; if ((((unsigned long )error->active_bo == (unsigned long )((struct drm_i915_error_buffer **)0) || (unsigned long )error->pinned_bo == (unsigned long )((struct drm_i915_error_buffer **)0)) || (unsigned long )error->active_bo_count == (unsigned long )((u32 *)0U)) || (unsigned long )error->pinned_bo_count == (unsigned long )((u32 *)0U)) { kfree((void const *)error->active_bo); kfree((void const *)error->active_bo_count); kfree((void const *)error->pinned_bo); kfree((void const *)error->pinned_bo_count); error->active_bo = (struct drm_i915_error_buffer **)0; error->active_bo_count = (u32 *)0U; error->pinned_bo = (struct drm_i915_error_buffer **)0; error->pinned_bo_count = (u32 *)0U; } else { __mptr___1 = (struct list_head const *)dev_priv->vm_list.next; vm = (struct i915_address_space *)__mptr___1 + 0xffffffffffffff50UL; goto ldv_48605; ldv_48604: tmp___3 = i; i = i + 1; i915_gem_capture_vm(dev_priv, error, vm, tmp___3); __mptr___2 = (struct list_head const *)vm->global_link.next; vm = (struct i915_address_space *)__mptr___2 + 0xffffffffffffff50UL; ldv_48605: ; if ((unsigned long )(& vm->global_link) != (unsigned long )(& dev_priv->vm_list)) { goto ldv_48604; } else { } error->vm_count = (u32 )cnt; } return; } } static void i915_capture_reg_state(struct drm_i915_private *dev_priv , struct drm_i915_error_state *error ) { struct drm_device *dev ; int i ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; uint16_t tmp ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { error->gtier[0] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278556L, 1); error->ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581216L, 1); error->forcewake = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245360L, 1); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 7U) { error->err_int = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278592L, 1); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 7U) { error->fault_data0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 19216L, 1); error->fault_data1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 19220L, 1); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 6U) { error->forcewake = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41356L, 1); error->gab_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 147456L, 1); error->gfx_mode = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 9504L, 1); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 6U) { error->forcewake = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 41352L, 1); } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 5U) { error->derrmr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278608L, 1); error->error = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16544L, 1); error->done_reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16560L, 1); } else { } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 6U) { error->gam_ecochk = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16528L, 1); error->gac_eco = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 82064L, 1); } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 7U) { error->gam_ecochk = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 16528L, 1); error->gac_eco = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 82064L, 1); } else { } } __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) > 5U) { error->ccid = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8576L, 1); } else { } __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___11->info.gen) > 7U) { error->ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279660L, 1); i = 0; goto ldv_48674; ldv_48673: error->gtier[i] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(i * 16 + 279308), 1); i = i + 1; ldv_48674: ; if (i <= 3) { goto ldv_48673; } else { } } else { __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___10->pch_type != 0U) { error->ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278540L, 1); error->gtier[0] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278556L, 1); } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___9->info.gen) == 2U) { tmp = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 8352L, 1); error->ier = (u32 )tmp; } else { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) == 0U) { error->ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 1); } else { } } } } error->eir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8368L, 1); error->pgtbl_er = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8228L, 1); i915_get_extra_instdone(dev, (uint32_t *)(& error->extra_instdone)); return; } } static void i915_error_capture_msg(struct drm_device *dev , struct drm_i915_error_state *error , bool wedged , char const *error_msg ) { struct drm_i915_private *dev_priv ; u32 ecode ; int ring_id ; int len ; struct drm_i915_private *__p ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring_id = -1; ecode = i915_error_generate_code(dev_priv, error, & ring_id); __p = to_i915((struct drm_device const *)dev); len = scnprintf((char *)(& error->error_msg), 128UL, "GPU HANG: ecode %d:%d:0x%08x", (int )__p->info.gen, ring_id, ecode); if (ring_id != -1 && error->ring[ring_id].pid != -1) { tmp = scnprintf((char *)(& error->error_msg) + (unsigned long )len, 128UL - (unsigned long )len, ", in %s [%d]", (char *)(& error->ring[ring_id].comm), error->ring[ring_id].pid); len = tmp + len; } else { } scnprintf((char *)(& error->error_msg) + (unsigned long )len, 128UL - (unsigned long )len, ", reason: %s, action: %s", error_msg, (int )wedged ? (char *)"reset" : (char *)"continue"); return; } } static void i915_capture_gen_state(struct drm_i915_private *dev_priv , struct drm_i915_error_state *error ) { { error->reset_count = i915_reset_count(& dev_priv->gpu_error); error->suspend_count = dev_priv->suspend_count; return; } } void i915_capture_error_state(struct drm_device *dev , bool wedged , char const *error_msg ) { bool warned ; struct drm_i915_private *dev_priv ; struct drm_i915_error_state *error ; unsigned long flags ; void *tmp ; long tmp___0 ; raw_spinlock_t *tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = kzalloc(1856UL, 32U); error = (struct drm_i915_error_state *)tmp; if ((unsigned long )error == (unsigned long )((struct drm_i915_error_state *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_capture_error_state", "out of memory, not capturing error state\n"); } else { } return; } else { } kref_init(& error->ref); i915_capture_gen_state(dev_priv, error); i915_capture_reg_state(dev_priv, error); i915_gem_capture_buffers(dev_priv, error); i915_gem_record_fences(dev, error); i915_gem_record_rings(dev, error); do_gettimeofday(& error->time); error->overlay = intel_overlay_capture_error_state(dev); error->display = intel_display_capture_error_state(dev); i915_error_capture_msg(dev, error, (int )wedged, error_msg); printk("\016[drm] %s\n", (char *)(& error->error_msg)); tmp___1 = spinlock_check(& dev_priv->gpu_error.lock); flags = _raw_spin_lock_irqsave(tmp___1); if ((unsigned long )dev_priv->gpu_error.first_error == (unsigned long )((struct drm_i915_error_state *)0)) { dev_priv->gpu_error.first_error = error; error = (struct drm_i915_error_state *)0; } else { } spin_unlock_irqrestore(& dev_priv->gpu_error.lock, flags); if ((unsigned long )error != (unsigned long )((struct drm_i915_error_state *)0)) { i915_error_state_free(& error->ref); return; } else { } if (! warned) { printk("\016[drm] GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); printk("\016[drm] Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); printk("\016[drm] drm/i915 developers can then reassign to the right component if it\'s not a kernel issue.\n"); printk("\016[drm] The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); printk("\016[drm] GPU crash dump saved to /sys/class/drm/card%d/error\n", (dev->primary)->index); warned = 1; } else { } return; } } void i915_error_state_get(struct drm_device *dev , struct i915_error_state_file_priv *error_priv ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; spin_lock_irq(& dev_priv->gpu_error.lock); error_priv->error = dev_priv->gpu_error.first_error; if ((unsigned long )error_priv->error != (unsigned long )((struct drm_i915_error_state *)0)) { kref_get___8(& (error_priv->error)->ref); } else { } spin_unlock_irq(& dev_priv->gpu_error.lock); return; } } void i915_error_state_put(struct i915_error_state_file_priv *error_priv ) { { if ((unsigned long )error_priv->error != (unsigned long )((struct drm_i915_error_state *)0)) { kref_put___10(& (error_priv->error)->ref, & i915_error_state_free); } else { } return; } } void i915_destroy_error_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_error_state *error ; { dev_priv = (struct drm_i915_private *)dev->dev_private; spin_lock_irq(& dev_priv->gpu_error.lock); error = dev_priv->gpu_error.first_error; dev_priv->gpu_error.first_error = (struct drm_i915_error_state *)0; spin_unlock_irq(& dev_priv->gpu_error.lock); if ((unsigned long )error != (unsigned long )((struct drm_i915_error_state *)0)) { kref_put___10(& error->ref, & i915_error_state_free); } else { } return; } } char const *i915_cache_level_str(struct drm_i915_private *i915___0 , int type ) { struct drm_i915_private *__p ; { switch (type) { case 0: ; return (" uncached"); case 1: __p = i915___0; return ((unsigned int )*((unsigned char *)__p + 46UL) != 0U ? " LLC" : " snooped"); case 2: ; return (" L3+LLC"); case 3: ; return (" WT"); default: ; return (""); } } } void i915_get_extra_instdone(struct drm_device *dev , uint32_t *instdone ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; memset((void *)instdone, 0, 16UL); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 2U) { *instdone = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8336L, 1); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 3U) { *instdone = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8336L, 1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 4U) { *instdone = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8300L, 1); *(instdone + 1UL) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8316L, 1); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 5U) { *instdone = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8300L, 1); *(instdone + 1UL) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8316L, 1); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 6U) { *instdone = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8300L, 1); *(instdone + 1UL) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8316L, 1); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 6U) { *instdone = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8300L, 1); *(instdone + 1UL) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 28928L, 1); *(instdone + 2UL) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 57696L, 1); *(instdone + 3UL) = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 57700L, 1); } else { } } } } } } return; } } bool ldv_queue_work_on_371(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_372(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_373(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_374(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_375(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static int test_and_set_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } extern int vscnprintf(char * , size_t , char const * , va_list * ) ; extern char *kasprintf(gfp_t , char const * , ...) ; __inline static unsigned long arch_local_save_flags___9(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static bool static_key_false___6(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } extern void __wake_up(wait_queue_head_t * , unsigned int , int , void * ) ; extern ktime_t ktime_get(void) ; __inline static int rcu_read_lock_sched_held___6(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___9(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } bool ldv_queue_work_on_385(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_387(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_386(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_389(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_388(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_390(struct work_struct *ldv_func_arg1 ) ; __inline static bool queue_work___1(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_385(8192, wq, work); return (tmp); } } __inline static bool queue_delayed_work___1(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = ldv_queue_delayed_work_on_386(8192, wq, dwork, delay); return (tmp); } } __inline static bool schedule_work___0(struct work_struct *work ) { bool tmp ; { tmp = queue_work___1(system_wq, work); return (tmp); } } extern unsigned int ioread32(void * ) ; extern int kobject_uevent_env(struct kobject * , enum kobject_action , char ** ) ; void invoke_work_8(void) ; void call_and_disable_all_11(int state ) ; void call_and_disable_all_9(int state ) ; void call_and_disable_all_12(int state ) ; void disable_work_8(struct work_struct *work ) ; void disable_work_11(struct work_struct *work ) ; void disable_work_12(struct work_struct *work ) ; void call_and_disable_work_9(struct work_struct *work ) ; void activate_work_8(struct work_struct *work , int state ) ; void disable_work_10(struct work_struct *work ) ; void activate_work_10(struct work_struct *work , int state ) ; void call_and_disable_work_11(struct work_struct *work ) ; void activate_work_7(struct work_struct *work , int state ) ; void call_and_disable_all_10(int state ) ; void call_and_disable_work_10(struct work_struct *work ) ; void disable_work_7(struct work_struct *work ) ; void invoke_work_10(void) ; void call_and_disable_work_7(struct work_struct *work ) ; void call_and_disable_work_8(struct work_struct *work ) ; void invoke_work_11(void) ; void invoke_work_9(void) ; void activate_work_9(struct work_struct *work , int state ) ; void call_and_disable_all_7(int state ) ; void invoke_work_12(void) ; void call_and_disable_work_12(struct work_struct *work ) ; void activate_work_11(struct work_struct *work , int state ) ; void invoke_work_7(void) ; void call_and_disable_all_8(int state ) ; void activate_work_12(struct work_struct *work , int state ) ; void disable_work_9(struct work_struct *work ) ; extern void synchronize_irq(unsigned int ) ; extern int drm_irq_install(struct drm_device * , int ) ; extern int drm_irq_uninstall(struct drm_device * ) ; extern bool drm_handle_vblank(struct drm_device * , int ) ; extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device * , int , int * , struct timeval * , unsigned int , struct drm_crtc const * , struct drm_display_mode const * ) ; void intel_lrc_irq_handler(struct intel_engine_cs *ring ) ; extern void pm_qos_add_request(struct pm_qos_request * , int , s32 ) ; extern void drm_kms_helper_hotplug_event(struct drm_device * ) ; void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv , enum pipe pipe ) ; void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv , enum transcoder pch_transcoder ) ; void gen5_enable_gt_irq(struct drm_i915_private *dev_priv , uint32_t mask ) ; void gen5_disable_gt_irq(struct drm_i915_private *dev_priv , uint32_t mask ) ; void gen6_enable_pm_irq(struct drm_i915_private *dev_priv , uint32_t mask ) ; void gen6_disable_pm_irq(struct drm_i915_private *dev_priv , uint32_t mask ) ; int intel_get_crtc_scanline(struct intel_crtc *crtc ) ; void intel_prepare_page_flip(struct drm_device *dev , int plane ) ; void intel_finish_page_flip(struct drm_device *dev , int pipe ) ; void intel_finish_page_flip_plane(struct drm_device *dev , int plane ) ; void intel_check_page_flip(struct drm_device *dev , int pipe ) ; void intel_prepare_reset(struct drm_device *dev ) ; void intel_finish_reset(struct drm_device *dev ) ; struct tracepoint __tracepoint_i915_gem_request_notify ; __inline static void trace_i915_gem_request_notify(struct intel_engine_cs *ring ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_384 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_386 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___6(& __tracepoint_i915_gem_request_notify.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_notify.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___6(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 545, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46334: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct intel_engine_cs * ))it_func))(__data, ring); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46334; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_request_notify.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___6(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 545, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } void intel_irq_init(struct drm_i915_private *dev_priv ) ; int intel_irq_install(struct drm_i915_private *dev_priv ) ; void intel_irq_uninstall(struct drm_i915_private *dev_priv ) ; void intel_uncore_check_errors(struct drm_device *dev ) ; void i915_enable_pipestat(struct drm_i915_private *dev_priv , enum pipe pipe , u32 status_mask ) ; void i915_disable_pipestat(struct drm_i915_private *dev_priv , enum pipe pipe , u32 status_mask ) ; void ironlake_enable_display_irq(struct drm_i915_private *dev_priv , u32 mask ) ; void ironlake_disable_display_irq(struct drm_i915_private *dev_priv , u32 mask ) ; void ibx_display_interrupt_update(struct drm_i915_private *dev_priv , uint32_t interrupt_mask , uint32_t enabled_irq_mask ) ; __inline static bool i915_gem_request_completed___2(struct drm_i915_gem_request *req , bool lazy_coherency ) { u32 seqno ; long tmp ; bool tmp___0 ; { tmp = ldv__builtin_expect((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h"), "i" (2806), "i" (12UL)); ldv_47401: ; goto ldv_47401; } else { } seqno = (*((req->ring)->get_seqno))(req->ring, (int )lazy_coherency); tmp___0 = i915_seqno_passed(seqno, req->seqno); return (tmp___0); } } void intel_opregion_asle_intr(struct drm_device *dev ) ; static u32 const hpd_ibx[7U] = { 0U, 2048U, 64U, 0U, 256U, 512U, 1024U}; static u32 const hpd_cpt[7U] = { 0U, 524288U, 262144U, 0U, 2097152U, 4194304U, 8388608U}; static u32 const hpd_mask_i915[7U] = { 0U, 512U, 67108864U, 33554432U, 536870912U, 268435456U, 134217728U}; static u32 const hpd_status_g4x[7U] = { 0U, 2048U, 4U, 8U, 393216U, 1572864U, 6291456U}; static u32 const hpd_status_i915[7U] = { 0U, 2048U, 64U, 128U, 393216U, 1572864U, 6291456U}; static u32 const hpd_bxt[7U] = { 0U, 0U, 0U, 0U, 16U, 32U}; static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv , u32 pm_iir ) ; void ironlake_enable_display_irq(struct drm_i915_private *dev_priv , u32 mask ) { int tmp ; long tmp___0 ; int __ret_warn_on ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; { tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (153), "i" (12UL)); ldv_48019: ; goto ldv_48019; } else { } tmp___1 = intel_irqs_enabled(dev_priv); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } __ret_warn_on = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 155, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { return; } else { } if ((dev_priv->__annonCompField82.irq_mask & mask) != 0U) { dev_priv->__annonCompField82.irq_mask = dev_priv->__annonCompField82.irq_mask & ~ mask; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278532L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278532L, 0); } else { } return; } } void ironlake_disable_display_irq(struct drm_i915_private *dev_priv , u32 mask ) { int tmp ; long tmp___0 ; int __ret_warn_on ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; { tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (168), "i" (12UL)); ldv_48026: ; goto ldv_48026; } else { } tmp___1 = intel_irqs_enabled(dev_priv); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } __ret_warn_on = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 170, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { return; } else { } if ((dev_priv->__annonCompField82.irq_mask & mask) != mask) { dev_priv->__annonCompField82.irq_mask = dev_priv->__annonCompField82.irq_mask | mask; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278532L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278532L, 0); } else { } return; } } static void ilk_update_gt_irq(struct drm_i915_private *dev_priv , uint32_t interrupt_mask , uint32_t enabled_irq_mask ) { int tmp ; long tmp___0 ; int __ret_warn_on ; long tmp___1 ; int __ret_warn_on___0 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; long tmp___5 ; { tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (190), "i" (12UL)); ldv_48034: ; goto ldv_48034; } else { } __ret_warn_on = (~ interrupt_mask & enabled_irq_mask) != 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 192, "WARN_ON(enabled_irq_mask & ~interrupt_mask)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = intel_irqs_enabled(dev_priv); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } __ret_warn_on___0 = tmp___3; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 194, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { return; } else { } dev_priv->gt_irq_mask = dev_priv->gt_irq_mask & ~ interrupt_mask; dev_priv->gt_irq_mask = dev_priv->gt_irq_mask | (~ enabled_irq_mask & interrupt_mask); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278548L, dev_priv->gt_irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278548L, 0); return; } } void gen5_enable_gt_irq(struct drm_i915_private *dev_priv , uint32_t mask ) { { ilk_update_gt_irq(dev_priv, mask, mask); return; } } void gen5_disable_gt_irq(struct drm_i915_private *dev_priv , uint32_t mask ) { { ilk_update_gt_irq(dev_priv, mask, 0U); return; } } static u32 gen6_pm_iir(struct drm_i915_private *dev_priv ) { struct drm_i915_private *__p ; { __p = dev_priv; return ((unsigned int )((unsigned char )__p->info.gen) > 7U ? 279336U : 278568U); } } static u32 gen6_pm_imr(struct drm_i915_private *dev_priv ) { struct drm_i915_private *__p ; { __p = dev_priv; return ((unsigned int )((unsigned char )__p->info.gen) > 7U ? 279332U : 278564U); } } static u32 gen6_pm_ier(struct drm_i915_private *dev_priv ) { struct drm_i915_private *__p ; { __p = dev_priv; return ((unsigned int )((unsigned char )__p->info.gen) > 7U ? 279340U : 278572U); } } static void snb_update_pm_irq(struct drm_i915_private *dev_priv , uint32_t interrupt_mask , uint32_t enabled_irq_mask ) { uint32_t new_val ; int __ret_warn_on ; long tmp ; int tmp___0 ; long tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; { __ret_warn_on = (~ interrupt_mask & enabled_irq_mask) != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 240, "WARN_ON(enabled_irq_mask & ~interrupt_mask)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (242), "i" (12UL)); ldv_48082: ; goto ldv_48082; } else { } new_val = dev_priv->pm_irq_mask; new_val = ~ interrupt_mask & new_val; new_val = (~ enabled_irq_mask & interrupt_mask) | new_val; if (dev_priv->pm_irq_mask != new_val) { dev_priv->pm_irq_mask = new_val; tmp___2 = gen6_pm_imr(dev_priv); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )tmp___2, dev_priv->pm_irq_mask, 1); tmp___3 = gen6_pm_imr(dev_priv); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )tmp___3, 0); } else { } return; } } void gen6_enable_pm_irq(struct drm_i915_private *dev_priv , uint32_t mask ) { int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 257, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return; } else { } snb_update_pm_irq(dev_priv, mask, mask); return; } } static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv , uint32_t mask ) { { snb_update_pm_irq(dev_priv, mask, 0U); return; } } void gen6_disable_pm_irq(struct drm_i915_private *dev_priv , uint32_t mask ) { int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 271, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return; } else { } __gen6_disable_pm_irq(dev_priv, mask); return; } } void gen6_reset_rps_interrupts(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t reg ; u32 tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = gen6_pm_iir(dev_priv); reg = tmp; spin_lock_irq(& dev_priv->irq_lock); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dev_priv->pm_rps_events, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dev_priv->pm_rps_events, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); dev_priv->rps.pm_iir = 0U; spin_unlock_irq(& dev_priv->irq_lock); return; } } void gen6_enable_rps_interrupts(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; u32 tmp___0 ; uint32_t tmp___1 ; long tmp___2 ; u32 tmp___3 ; uint32_t tmp___4 ; u32 tmp___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; spin_lock_irq(& dev_priv->irq_lock); __ret_warn_on = dev_priv->rps.pm_iir != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 296, "WARN_ON(dev_priv->rps.pm_iir)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = gen6_pm_iir(dev_priv); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )tmp___0, 1); __ret_warn_on___0 = (tmp___1 & dev_priv->pm_rps_events) != 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 297, "WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); dev_priv->rps.interrupts_enabled = 1; tmp___3 = gen6_pm_ier(dev_priv); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )tmp___3, 1); tmp___5 = gen6_pm_ier(dev_priv); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )tmp___5, tmp___4 | dev_priv->pm_rps_events, 1); gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); spin_unlock_irq(& dev_priv->irq_lock); return; } } u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv , u32 mask ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { __p = dev_priv; if ((unsigned int )((unsigned char )__p->info.gen) <= 7U) { __p___0 = dev_priv; if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { mask = mask & 4294967291U; } else { } } else { } __p___1 = dev_priv; if ((unsigned int )((unsigned char )__p___1->info.gen) > 7U) { mask = mask & 2147483647U; } else { } return (mask); } } void gen6_disable_rps_interrupts(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 tmp ; u32 tmp___0 ; uint32_t tmp___1 ; u32 tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; spin_lock_irq(& dev_priv->irq_lock); dev_priv->rps.interrupts_enabled = 0; spin_unlock_irq(& dev_priv->irq_lock); ldv_cancel_work_sync_390(& dev_priv->rps.work); spin_lock_irq(& dev_priv->irq_lock); tmp = gen6_sanitize_rps_pm_mask(dev_priv, 4294967295U); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 41320L, tmp, 1); __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); tmp___0 = gen6_pm_ier(dev_priv); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )tmp___0, 1); tmp___2 = gen6_pm_ier(dev_priv); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )tmp___2, tmp___1 & ~ dev_priv->pm_rps_events, 1); spin_unlock_irq(& dev_priv->irq_lock); synchronize_irq((unsigned int )dev->irq); return; } } void ibx_display_interrupt_update(struct drm_i915_private *dev_priv , uint32_t interrupt_mask , uint32_t enabled_irq_mask ) { uint32_t sdeimr ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; int tmp___1 ; long tmp___2 ; int __ret_warn_on___0 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; long tmp___6 ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802820L, 1); sdeimr = tmp; sdeimr = ~ interrupt_mask & sdeimr; sdeimr = (~ enabled_irq_mask & interrupt_mask) | sdeimr; __ret_warn_on = (~ interrupt_mask & enabled_irq_mask) != 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 360, "WARN_ON(enabled_irq_mask & ~interrupt_mask)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (362), "i" (12UL)); ldv_48146: ; goto ldv_48146; } else { } tmp___3 = intel_irqs_enabled(dev_priv); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } __ret_warn_on___0 = tmp___4; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 364, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___6 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___6 != 0L) { return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802820L, sdeimr, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802820L, 0); return; } } static void __i915_enable_pipestat(struct drm_i915_private *dev_priv , enum pipe pipe , u32 enable_mask , u32 status_mask ) { u32 reg ; u32 pipestat ; uint32_t tmp ; int tmp___0 ; long tmp___1 ; int __ret_warn_on ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; bool __warned ; int __ret_warn_once ; int __ret_warn_on___0 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; { reg = ((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); pipestat = tmp & 2147418112U; tmp___0 = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (378), "i" (12UL)); ldv_48157: ; goto ldv_48157; } else { } tmp___2 = intel_irqs_enabled(dev_priv); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } __ret_warn_on = tmp___3; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 379, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_once = (enable_mask & 2147549183U) != 0U || (status_mask & 4294901760U) != 0U; tmp___7 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___7 != 0L) { __ret_warn_on___0 = ! __warned; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 384, "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", (int )pipe + 65, enable_mask, status_mask); } else { } tmp___6 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___6 != 0L) { __warned = 1; } else { } } else { } tmp___8 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___8 != 0L) { return; } else { } if ((pipestat & enable_mask) == enable_mask) { return; } else { } dev_priv->pipestat_irq_mask[(int )pipe] = dev_priv->pipestat_irq_mask[(int )pipe] | status_mask; pipestat = (enable_mask | status_mask) | pipestat; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, pipestat, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } } static void __i915_disable_pipestat(struct drm_i915_private *dev_priv , enum pipe pipe , u32 enable_mask , u32 status_mask ) { u32 reg ; u32 pipestat ; uint32_t tmp ; int tmp___0 ; long tmp___1 ; int __ret_warn_on ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; bool __warned ; int __ret_warn_once ; int __ret_warn_on___0 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; { reg = ((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); pipestat = tmp & 2147418112U; tmp___0 = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (405), "i" (12UL)); ldv_48173: ; goto ldv_48173; } else { } tmp___2 = intel_irqs_enabled(dev_priv); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } __ret_warn_on = tmp___3; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 406, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_once = (enable_mask & 2147549183U) != 0U || (status_mask & 4294901760U) != 0U; tmp___7 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___7 != 0L) { __ret_warn_on___0 = ! __warned; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 411, "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", (int )pipe + 65, enable_mask, status_mask); } else { } tmp___6 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___6 != 0L) { __warned = 1; } else { } } else { } tmp___8 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___8 != 0L) { return; } else { } if ((pipestat & enable_mask) == 0U) { return; } else { } dev_priv->pipestat_irq_mask[(int )pipe] = dev_priv->pipestat_irq_mask[(int )pipe] & ~ status_mask; pipestat = ~ enable_mask & pipestat; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, pipestat, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } } static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev , u32 status_mask ) { u32 enable_mask ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; bool __warned___0 ; int __ret_warn_once___0 ; int __ret_warn_on___0 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; { enable_mask = status_mask << 16; __ret_warn_once = ((unsigned long )status_mask & 64UL) != 0UL; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 432, "WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { return (0U); } else { } __ret_warn_once___0 = ((unsigned long )status_mask & 8UL) != 0UL; tmp___5 = ldv__builtin_expect(__ret_warn_once___0 != 0, 0L); if (tmp___5 != 0L) { __ret_warn_on___0 = ! __warned___0; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 438, "WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { __warned___0 = 1; } else { } } else { } tmp___6 = ldv__builtin_expect(__ret_warn_once___0 != 0, 0L); if (tmp___6 != 0L) { return (0U); } else { } enable_mask = enable_mask & 1069547519U; if (((unsigned long )status_mask & 16384UL) != 0UL) { enable_mask = enable_mask | 4194304U; } else { } if (((unsigned long )status_mask & 32768UL) != 0UL) { enable_mask = enable_mask | 1073741824U; } else { } return (enable_mask); } } void i915_enable_pipestat(struct drm_i915_private *dev_priv , enum pipe pipe , u32 status_mask ) { u32 enable_mask ; struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, status_mask); } else { enable_mask = status_mask << 16; } __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); return; } } void i915_disable_pipestat(struct drm_i915_private *dev_priv , enum pipe pipe , u32 status_mask ) { u32 enable_mask ; struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, status_mask); } else { enable_mask = status_mask << 16; } __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); return; } } static void i915_enable_asle_pipestat(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->opregion.asle == (unsigned long )((struct opregion_asle *)0)) { return; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) == 0U) { return; } else { } } spin_lock_irq(& dev_priv->irq_lock); i915_enable_pipestat(dev_priv, 1, 64U); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 3U) { i915_enable_pipestat(dev_priv, 0, 64U); } else { } spin_unlock_irq(& dev_priv->irq_lock); return; } } static u32 i8xx_get_vblank_counter(struct drm_device *dev , int pipe ) { { return (0U); } } static u32 i915_get_vblank_counter(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; unsigned long high_frame ; unsigned long low_frame ; u32 high1 ; u32 high2 ; u32 low ; u32 pixel ; u32 vbl_start ; u32 hsync_start ; u32 htotal ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode const *mode ; uint32_t tmp ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)dev_priv->pipe_to_crtc_mapping[pipe]; intel_crtc = (struct intel_crtc *)__mptr; mode = (struct drm_display_mode const *)(& (intel_crtc->config)->base.adjusted_mode); htotal = (u32 )mode->crtc_htotal; hsync_start = (u32 )mode->crtc_hsync_start; vbl_start = (u32 )mode->crtc_vblank_start; if (((unsigned int )mode->flags & 16U) != 0U) { vbl_start = (vbl_start + 1U) / 2U; } else { } vbl_start = vbl_start * htotal; vbl_start = (hsync_start - htotal) + vbl_start; high_frame = (unsigned long )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458816U); low_frame = (unsigned long )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458820U); ldv_48258: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )high_frame, 1); high1 = tmp & 65535U; low = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )low_frame, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )high_frame, 1); high2 = tmp___0 & 65535U; if (high1 != high2) { goto ldv_48258; } else { } high1 = high1; pixel = low & 16777215U; low = low >> 24; return ((((high1 << 8) | low) + (unsigned int )(pixel >= vbl_start)) & 16777215U); } } static u32 gm45_get_vblank_counter(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; int reg ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458816U); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); return (tmp); } } static int __intel_get_crtc_scanline(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_display_mode const *mode ; enum pipe pipe ; int position ; int vtotal ; unsigned int tmp ; unsigned int tmp___0 ; struct drm_i915_private *__p ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; mode = (struct drm_display_mode const *)(& (crtc->config)->base.adjusted_mode); pipe = crtc->pipe; vtotal = mode->crtc_vtotal; if (((unsigned int )mode->flags & 16U) != 0U) { vtotal = vtotal / 2; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { tmp = readl((void const volatile *)dev_priv->regs + (unsigned long )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458752U)); position = (int )tmp & 4095; } else { tmp___0 = readl((void const volatile *)dev_priv->regs + (unsigned long )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458752U)); position = (int )tmp___0 & 8191; } return ((crtc->scanline_offset + position) % vtotal); } } static int i915_get_crtc_scanoutpos(struct drm_device *dev , int pipe , unsigned int flags , int *vpos , int *hpos , ktime_t *stime , ktime_t *etime ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode const *mode ; int position ; int vbl_start ; int vbl_end ; int hsync_start ; int htotal ; int vtotal ; bool in_vbl ; int ret ; unsigned long irqflags ; long tmp ; raw_spinlock_t *tmp___0 ; unsigned int tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = dev_priv->pipe_to_crtc_mapping[pipe]; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; mode = (struct drm_display_mode const *)(& (intel_crtc->config)->base.adjusted_mode); in_vbl = 1; ret = 0; if (! intel_crtc->active) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_get_crtc_scanoutpos", "trying to get scanoutpos for disabled pipe %c\n", pipe + 65); } else { } return (0); } else { } htotal = mode->crtc_htotal; hsync_start = mode->crtc_hsync_start; vtotal = mode->crtc_vtotal; vbl_start = mode->crtc_vblank_start; vbl_end = mode->crtc_vblank_end; if (((unsigned int )mode->flags & 16U) != 0U) { vbl_start = (vbl_start + 1) / 2; vbl_end = vbl_end / 2; vtotal = vtotal / 2; } else { } ret = ret | 5; tmp___0 = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp___0); if ((unsigned long )stime != (unsigned long )((ktime_t *)0)) { *stime = ktime_get(); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { position = __intel_get_crtc_scanline(intel_crtc); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { position = __intel_get_crtc_scanline(intel_crtc); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 4U) { position = __intel_get_crtc_scanline(intel_crtc); } else { tmp___1 = readl((void const volatile *)dev_priv->regs + (unsigned long )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458820U)); position = (int )tmp___1 & 16777215; vbl_start = vbl_start * htotal; vbl_end = vbl_end * htotal; vtotal = vtotal * htotal; if (position >= vtotal) { position = vtotal + -1; } else { } position = ((position + htotal) - hsync_start) % vtotal; } } } if ((unsigned long )etime != (unsigned long )((ktime_t *)0)) { *etime = ktime_get(); } else { } spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); in_vbl = (bool )(position >= vbl_start && position < vbl_end); if (position >= vbl_start) { position = position - vbl_end; } else { position = (vtotal - vbl_end) + position; } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 2U) { *vpos = position; *hpos = 0; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) != 0U) { *vpos = position; *hpos = 0; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 4U) { *vpos = position; *hpos = 0; } else { *vpos = position / htotal; *hpos = position - *vpos * htotal; } } } if ((int )in_vbl) { ret = ret | 2; } else { } return (ret); } } int intel_get_crtc_scanline(struct intel_crtc *crtc ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; int position ; raw_spinlock_t *tmp ; { dev_priv = (struct drm_i915_private *)(crtc->base.dev)->dev_private; tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); position = __intel_get_crtc_scanline(crtc); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return (position); } } static int i915_get_vblank_timestamp(struct drm_device *dev , int pipe , int *max_error , struct timeval *vblank_time , unsigned int flags ) { struct drm_crtc *crtc ; struct drm_i915_private *__p ; long tmp ; struct drm_crtc const *__mptr ; int tmp___0 ; { if (pipe < 0) { drm_err("Invalid crtc %d\n", pipe); return (-22); } else { __p = to_i915((struct drm_device const *)dev); if ((int )__p->info.num_pipes <= pipe) { drm_err("Invalid crtc %d\n", pipe); return (-22); } else { } } crtc = intel_get_crtc_for_pipe(dev, pipe); if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { drm_err("Invalid crtc %d\n", pipe); return (-22); } else { } if (! (crtc->state)->enable) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_get_vblank_timestamp", "crtc %d is disabled\n", pipe); } else { } return (-16); } else { } __mptr = (struct drm_crtc const *)crtc; tmp___0 = drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, vblank_time, flags, (struct drm_crtc const *)crtc, (struct drm_display_mode const *)(& (((struct intel_crtc *)__mptr)->config)->base.adjusted_mode)); return (tmp___0); } } static bool intel_hpd_irq_event(struct drm_device *dev , struct drm_connector *connector ) { enum drm_connector_status old_status ; int __ret_warn_on ; int tmp ; long tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; long tmp___3 ; { tmp = mutex_is_locked(& dev->mode_config.mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 816, "WARN_ON(!mutex_is_locked(&dev->mode_config.mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); old_status = connector->status; connector->status = (*((connector->funcs)->detect))(connector, 0); if ((unsigned int )connector->status == (unsigned int )old_status) { return (0); } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { tmp___1 = drm_get_connector_status_name(connector->status); tmp___2 = drm_get_connector_status_name(old_status); drm_ut_debug_printk("intel_hpd_irq_event", "[CONNECTOR:%d:%s] status updated from %s to %s\n", connector->base.id, connector->name, tmp___2, tmp___1); } else { } return (1); } } static void i915_digport_work_func(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; u32 long_port_mask ; u32 short_port_mask ; struct intel_digital_port *intel_dig_port ; int i ; u32 old_bits ; bool valid ; bool long_hpd ; enum irqreturn ret ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff3678UL; old_bits = 0U; spin_lock_irq(& dev_priv->irq_lock); long_port_mask = dev_priv->long_hpd_port_mask; dev_priv->long_hpd_port_mask = 0U; short_port_mask = dev_priv->short_hpd_port_mask; dev_priv->short_hpd_port_mask = 0U; spin_unlock_irq(& dev_priv->irq_lock); i = 0; goto ldv_48395; ldv_48394: valid = 0; long_hpd = 0; intel_dig_port = dev_priv->hpd_irq_port[i]; if ((unsigned long )intel_dig_port == (unsigned long )((struct intel_digital_port *)0) || (unsigned long )intel_dig_port->hpd_pulse == (unsigned long )((enum irqreturn (*)(struct intel_digital_port * , bool ))0)) { goto ldv_48392; } else { } if (((u32 )(1 << i) & long_port_mask) != 0U) { valid = 1; long_hpd = 1; } else if (((u32 )(1 << i) & short_port_mask) != 0U) { valid = 1; } else { } if ((int )valid) { ret = (*(intel_dig_port->hpd_pulse))(intel_dig_port, (int )long_hpd); if ((unsigned int )ret == 0U) { old_bits = (u32 )(1 << (int )intel_dig_port->base.hpd_pin) | old_bits; } else { } } else { } ldv_48392: i = i + 1; ldv_48395: ; if (i <= 4) { goto ldv_48394; } else { } if (old_bits != 0U) { spin_lock_irq(& dev_priv->irq_lock); dev_priv->hpd_event_bits = dev_priv->hpd_event_bits | old_bits; spin_unlock_irq(& dev_priv->irq_lock); schedule_work___0(& dev_priv->hotplug_work); } else { } return; } } static void i915_hotplug_work_func(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; struct drm_device *dev ; struct drm_mode_config *mode_config ; struct intel_connector *intel_connector ; struct intel_encoder *intel_encoder ; struct drm_connector *connector ; bool hpd_disabled ; bool changed ; u32 hpd_event_bits ; long tmp ; struct list_head const *__mptr___0 ; struct drm_connector const *__mptr___1 ; long tmp___0 ; struct list_head const *__mptr___2 ; unsigned long tmp___1 ; struct list_head const *__mptr___3 ; struct drm_connector const *__mptr___4 ; bool tmp___2 ; struct list_head const *__mptr___5 ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff6328UL; dev = dev_priv->dev; mode_config = & dev->mode_config; hpd_disabled = 0; changed = 0; mutex_lock_nested(& mode_config->mutex, 0U); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_hotplug_work_func", "running encoder hotplug functions\n"); } else { } spin_lock_irq(& dev_priv->irq_lock); hpd_event_bits = dev_priv->hpd_event_bits; dev_priv->hpd_event_bits = 0U; __mptr___0 = (struct list_head const *)mode_config->connector_list.next; connector = (struct drm_connector *)__mptr___0 + 0xffffffffffffffe8UL; goto ldv_48420; ldv_48419: __mptr___1 = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr___1; if ((unsigned long )intel_connector->encoder == (unsigned long )((struct intel_encoder *)0)) { goto ldv_48418; } else { } intel_encoder = intel_connector->encoder; if (((unsigned int )intel_encoder->hpd_pin != 0U && (unsigned int )dev_priv->hpd_stats[(unsigned int )intel_encoder->hpd_pin].hpd_mark == 2U) && (unsigned int )connector->polled == 1U) { printk("\016[drm] HPD interrupt storm detected on connector %s: switching from hotplug detection to polling\n", connector->name); dev_priv->hpd_stats[(unsigned int )intel_encoder->hpd_pin].hpd_mark = 1; connector->polled = 6U; hpd_disabled = 1; } else { } if (((u32 )(1 << (int )intel_encoder->hpd_pin) & hpd_event_bits) != 0U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_hotplug_work_func", "Connector %s (pin %i) received hotplug event.\n", connector->name, (unsigned int )intel_encoder->hpd_pin); } else { } } else { } ldv_48418: __mptr___2 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___2 + 0xffffffffffffffe8UL; ldv_48420: ; if ((unsigned long )(& connector->head) != (unsigned long )(& mode_config->connector_list)) { goto ldv_48419; } else { } if ((int )hpd_disabled) { drm_kms_helper_poll_enable(dev); tmp___1 = msecs_to_jiffies(120000U); mod_delayed_work(system_wq, & dev_priv->hotplug_reenable_work, tmp___1); } else { } spin_unlock_irq(& dev_priv->irq_lock); __mptr___3 = (struct list_head const *)mode_config->connector_list.next; connector = (struct drm_connector *)__mptr___3 + 0xffffffffffffffe8UL; goto ldv_48430; ldv_48429: __mptr___4 = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr___4; if ((unsigned long )intel_connector->encoder == (unsigned long )((struct intel_encoder *)0)) { goto ldv_48428; } else { } intel_encoder = intel_connector->encoder; if (((u32 )(1 << (int )intel_encoder->hpd_pin) & hpd_event_bits) != 0U) { if ((unsigned long )intel_encoder->hot_plug != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(intel_encoder->hot_plug))(intel_encoder); } else { } tmp___2 = intel_hpd_irq_event(dev, connector); if ((int )tmp___2) { changed = 1; } else { } } else { } ldv_48428: __mptr___5 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___5 + 0xffffffffffffffe8UL; ldv_48430: ; if ((unsigned long )(& connector->head) != (unsigned long )(& mode_config->connector_list)) { goto ldv_48429; } else { } mutex_unlock(& mode_config->mutex); if ((int )changed) { drm_kms_helper_hotplug_event(dev); } else { } return; } } static void ironlake_rps_change_irq_handler(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 busy_up ; u32 busy_down ; u32 max_avg ; u32 min_avg ; u8 new_delay ; uint32_t tmp ; bool tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; spin_lock(& mchdev_lock); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70020L, 1); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 70020L, (int )((uint16_t )tmp), 1); new_delay = dev_priv->ips.cur_delay; (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 70020L, 16, 1); busy_up = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70584L, 1); busy_down = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70588L, 1); max_avg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70044L, 1); min_avg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 70048L, 1); if (busy_up > max_avg) { if ((int )dev_priv->ips.cur_delay != (int )dev_priv->ips.max_delay) { new_delay = (unsigned int )dev_priv->ips.cur_delay + 255U; } else { } if ((int )dev_priv->ips.max_delay > (int )new_delay) { new_delay = dev_priv->ips.max_delay; } else { } } else if (busy_down < min_avg) { if ((int )dev_priv->ips.cur_delay != (int )dev_priv->ips.min_delay) { new_delay = (unsigned int )dev_priv->ips.cur_delay + 1U; } else { } if ((int )dev_priv->ips.min_delay < (int )new_delay) { new_delay = dev_priv->ips.min_delay; } else { } } else { } tmp___0 = ironlake_set_drps(dev, (int )new_delay); if ((int )tmp___0) { dev_priv->ips.cur_delay = new_delay; } else { } spin_unlock(& mchdev_lock); return; } } static void notify_ring(struct intel_engine_cs *ring ) { bool tmp ; int tmp___0 ; { tmp = intel_ring_initialized(ring); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } trace_i915_gem_request_notify(ring); __wake_up(& ring->irq_queue, 3U, 0, (void *)0); return; } } static void vlv_c0_read(struct drm_i915_private *dev_priv , struct intel_rps_ei *ei ) { { ei->cz_clock = vlv_punit_read(dev_priv, 206U); ei->render_c0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278232L, 1); ei->media_c0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278236L, 1); return; } } static bool vlv_c0_above(struct drm_i915_private *dev_priv , struct intel_rps_ei const *old , struct intel_rps_ei const *now , int threshold ) { u64 time ; u64 c0 ; { if ((unsigned int )old->cz_clock == 0U) { return (0); } else { } time = (u64 )((unsigned int )now->cz_clock - (unsigned int )old->cz_clock); time = (u64 )(dev_priv->mem_freq * (unsigned int )threshold) * time; c0 = (u64 )((unsigned int )now->render_c0 - (unsigned int )old->render_c0); c0 = (u64 )((unsigned int )now->media_c0 - (unsigned int )old->media_c0) + c0; c0 = c0 * 40000ULL; return (c0 >= time); } } void gen6_rps_reset_ei(struct drm_i915_private *dev_priv ) { { vlv_c0_read(dev_priv, & dev_priv->rps.down_ei); dev_priv->rps.up_ei = dev_priv->rps.down_ei; return; } } static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv , u32 pm_iir ) { struct intel_rps_ei now ; u32 events ; bool tmp ; int tmp___0 ; bool tmp___1 ; { events = 0U; if ((pm_iir & 6U) == 0U) { return (0U); } else { } vlv_c0_read(dev_priv, & now); if (now.cz_clock == 0U) { return (0U); } else { } if ((pm_iir & 2U) != 0U) { tmp = vlv_c0_above(dev_priv, (struct intel_rps_ei const *)(& dev_priv->rps.down_ei), (struct intel_rps_ei const *)(& now), (int )dev_priv->rps.down_threshold); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { events = events | 16U; } else { } dev_priv->rps.down_ei = now; } else { } if ((pm_iir & 4U) != 0U) { tmp___1 = vlv_c0_above(dev_priv, (struct intel_rps_ei const *)(& dev_priv->rps.up_ei), (struct intel_rps_ei const *)(& now), (int )dev_priv->rps.up_threshold); if ((int )tmp___1) { events = events | 32U; } else { } dev_priv->rps.up_ei = now; } else { } return (events); } } static bool any_waiters(struct drm_i915_private *dev_priv ) { struct intel_engine_cs *ring ; int i ; bool tmp ; { i = 0; goto ldv_48471; ldv_48470: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { if (ring->irq_refcount != 0U) { return (1); } else { } } else { } i = i + 1; ldv_48471: ; if (i <= 4) { goto ldv_48470; } else { } return (0); } } static void gen6_pm_rps_work(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; bool client_boost ; int new_delay ; int adj ; int min ; int max ; u32 pm_iir ; int __ret_warn_on ; long tmp ; u32 tmp___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; bool tmp___1 ; int __min1 ; int __max1 ; int __max2 ; int __min2 ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff46e8UL; spin_lock_irq(& dev_priv->irq_lock); if (! dev_priv->rps.interrupts_enabled) { spin_unlock_irq(& dev_priv->irq_lock); return; } else { } pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0U; gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); client_boost = dev_priv->rps.client_boost; dev_priv->rps.client_boost = 0; spin_unlock_irq(& dev_priv->irq_lock); __ret_warn_on = (~ dev_priv->pm_rps_events & pm_iir) != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 1108, "WARN_ON(pm_iir & ~dev_priv->pm_rps_events)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((dev_priv->pm_rps_events & pm_iir) == 0U && ! client_boost) { return; } else { } mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp___0 = vlv_wa_c0_ei(dev_priv, pm_iir); pm_iir = tmp___0 | pm_iir; adj = dev_priv->rps.last_adj; new_delay = (int )dev_priv->rps.cur_freq; min = (int )dev_priv->rps.min_freq_softlimit; max = (int )dev_priv->rps.max_freq_softlimit; if ((int )client_boost) { new_delay = (int )dev_priv->rps.max_freq_softlimit; adj = 0; } else if ((pm_iir & 32U) != 0U) { if (adj > 0) { adj = adj * 2; } else { __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { adj = 2; } else { adj = 1; } } else { adj = 1; } } if ((int )dev_priv->rps.efficient_freq - adj > new_delay) { new_delay = (int )dev_priv->rps.efficient_freq; adj = 0; } else { } } else { tmp___1 = any_waiters(dev_priv); if ((int )tmp___1) { adj = 0; } else if ((pm_iir & 64U) != 0U) { if ((int )dev_priv->rps.cur_freq > (int )dev_priv->rps.efficient_freq) { new_delay = (int )dev_priv->rps.efficient_freq; } else { new_delay = (int )dev_priv->rps.min_freq_softlimit; } adj = 0; } else if ((pm_iir & 16U) != 0U) { if (adj < 0) { adj = adj * 2; } else { __p___5 = dev_priv; if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { __p___6 = dev_priv; if ((unsigned int )((unsigned char )__p___6->info.gen) == 8U) { adj = -2; } else { adj = -1; } } else { adj = -1; } } } else { adj = 0; } } dev_priv->rps.last_adj = adj; new_delay = new_delay + adj; __max1 = new_delay; __max2 = min; __min1 = __max1 > __max2 ? __max1 : __max2; __min2 = max; new_delay = __min1 < __min2 ? __min1 : __min2; intel_set_rps(dev_priv->dev, (int )((u8 )new_delay)); mutex_unlock(& dev_priv->rps.hw_lock); return; } } static void ivybridge_parity_work(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; u32 error_status ; u32 row ; u32 bank ; u32 subbank ; char *parity_event[6U] ; uint32_t misccpctl ; uint8_t slice ; int __ret_warn_on ; long tmp ; long tmp___0 ; u32 reg ; bool __warned ; int __ret_warn_once ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int tmp___1 ; int tmp___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int __ret_warn_on___0 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; int tmp___8 ; int __ret_warn_on___1 ; long tmp___9 ; struct drm_i915_private *__p___5 ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff4748UL; slice = 0U; mutex_lock_nested(& (dev_priv->dev)->struct_mutex, 0U); __ret_warn_on = dev_priv->l3_parity.which_slice == 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 1194, "WARN_ON(!dev_priv->l3_parity.which_slice)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { goto out; } else { } misccpctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37924L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37924L, misccpctl & 4294967294U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37924L, 0); goto ldv_48566; ldv_48565: slice = (uint8_t )((int )slice - 1); __p___3 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { goto _L; } else { __p___4 = to_i915((struct drm_device const *)dev_priv->dev); if (((int )__p___4->info.device_id & 240) != 32) { _L: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { tmp___1 = 1; } else { __p___2 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { tmp___1 = 1; } else { tmp___1 = 0; } } tmp___2 = tmp___1; } else { tmp___2 = 2; } } __ret_warn_once = (int )slice >= tmp___2; tmp___5 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___5 != 0L) { __ret_warn_on___0 = ! __warned; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 1205, "WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { __warned = 1; } else { } } else { } tmp___6 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___6 != 0L) { goto ldv_48563; } else { } dev_priv->l3_parity.which_slice = dev_priv->l3_parity.which_slice & ~ (1 << (int )slice); reg = (u32 )((int )slice * 512 + 45064); error_status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); row = (error_status & 33538048U) >> 14; bank = (error_status & 6144U) >> 11; subbank = (error_status & 1792U) >> 8; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, 8320U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); parity_event[0] = (char *)"L3_PARITY_ERROR=1"; parity_event[1] = kasprintf(208U, "ROW=%d", row); parity_event[2] = kasprintf(208U, "BANK=%d", bank); parity_event[3] = kasprintf(208U, "SUBBANK=%d", subbank); parity_event[4] = kasprintf(208U, "SLICE=%d", (int )slice); parity_event[5] = (char *)0; kobject_uevent_env(& (((dev_priv->dev)->primary)->kdev)->kobj, 2, (char **)(& parity_event)); tmp___7 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("ivybridge_parity_work", "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", (int )slice, row, bank, subbank); } else { } kfree((void const *)parity_event[4]); kfree((void const *)parity_event[3]); kfree((void const *)parity_event[2]); kfree((void const *)parity_event[1]); ldv_48566: tmp___8 = ffs(dev_priv->l3_parity.which_slice); slice = (uint8_t )tmp___8; if ((unsigned int )slice != 0U) { goto ldv_48565; } else { } ldv_48563: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 37924L, misccpctl, 1); out: __ret_warn_on___1 = dev_priv->l3_parity.which_slice != 0; tmp___9 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 1242, "WARN_ON(dev_priv->l3_parity.which_slice)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); spin_lock_irq(& dev_priv->irq_lock); __p___5 = to_i915((struct drm_device const *)dev_priv->dev); gen5_enable_gt_irq(dev_priv, (unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U ? 2080U : 32U); spin_unlock_irq(& dev_priv->irq_lock); mutex_unlock(& (dev_priv->dev)->struct_mutex); return; } } static void ivybridge_parity_error_irq_handler(struct drm_device *dev , u32 iir ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { return; } else { } } else { } spin_lock(& dev_priv->irq_lock); __p___1 = to_i915((struct drm_device const *)dev); gen5_disable_gt_irq(dev_priv, (unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U ? 2080U : 32U); spin_unlock(& dev_priv->irq_lock); __p___2 = to_i915((struct drm_device const *)dev); iir = ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U ? 2080U : 32U) & iir; if ((iir & 2048U) != 0U) { dev_priv->l3_parity.which_slice = dev_priv->l3_parity.which_slice | 2; } else { } if ((iir & 32U) != 0U) { dev_priv->l3_parity.which_slice = dev_priv->l3_parity.which_slice | 1; } else { } queue_work___1(dev_priv->wq, & dev_priv->l3_parity.error_work); return; } } static void ilk_gt_irq_handler(struct drm_device *dev , struct drm_i915_private *dev_priv , u32 gt_iir ) { { if ((gt_iir & 17U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring)); } else { } if ((gt_iir & 32U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring) + 1UL); } else { } return; } } static void snb_gt_irq_handler(struct drm_device *dev , struct drm_i915_private *dev_priv , u32 gt_iir ) { long tmp ; struct drm_i915_private *__p ; { if ((gt_iir & 17U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring)); } else { } if ((gt_iir & 4096U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring) + 1UL); } else { } if ((gt_iir & 4194304U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring) + 2UL); } else { } if ((gt_iir & 33587208U) != 0U) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("snb_gt_irq_handler", "Command parser error, gt_iir 0x%08x\n", gt_iir); } else { } } else { } __p = to_i915((struct drm_device const *)dev); if ((((unsigned int )*((unsigned char *)__p + 45UL) != 0U ? 2080U : 32U) & gt_iir) != 0U) { ivybridge_parity_error_irq_handler(dev, gt_iir); } else { } return; } } static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv , u32 master_ctl ) { irqreturn_t ret ; u32 tmp ; unsigned int tmp___0 ; u32 tmp___1 ; unsigned int tmp___2 ; u32 tmp___3 ; unsigned int tmp___4 ; u32 tmp___5 ; unsigned int tmp___6 ; { ret = 0; if ((master_ctl & 3U) != 0U) { tmp___0 = readl((void const volatile *)dev_priv->regs + 279304U); tmp = tmp___0; if (tmp != 0U) { writel(tmp, (void volatile *)dev_priv->regs + 279304U); ret = 1; if ((tmp & 256U) != 0U) { intel_lrc_irq_handler((struct intel_engine_cs *)(& dev_priv->ring)); } else { } if ((int )tmp & 1) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring)); } else { } if ((tmp & 16777216U) != 0U) { intel_lrc_irq_handler((struct intel_engine_cs *)(& dev_priv->ring) + 2UL); } else { } if ((tmp & 65536U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring) + 2UL); } else { } } else { drm_err("The master control interrupt lied (GT0)!\n"); } } else { } if ((master_ctl & 12U) != 0U) { tmp___2 = readl((void const volatile *)dev_priv->regs + 279320U); tmp___1 = tmp___2; if (tmp___1 != 0U) { writel(tmp___1, (void volatile *)dev_priv->regs + 279320U); ret = 1; if ((tmp___1 & 256U) != 0U) { intel_lrc_irq_handler((struct intel_engine_cs *)(& dev_priv->ring) + 1UL); } else { } if ((int )tmp___1 & 1) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring) + 1UL); } else { } if ((tmp___1 & 16777216U) != 0U) { intel_lrc_irq_handler((struct intel_engine_cs *)(& dev_priv->ring) + 4UL); } else { } if ((tmp___1 & 65536U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring) + 4UL); } else { } } else { drm_err("The master control interrupt lied (GT1)!\n"); } } else { } if ((master_ctl & 64U) != 0U) { tmp___4 = readl((void const volatile *)dev_priv->regs + 279352U); tmp___3 = tmp___4; if (tmp___3 != 0U) { writel(tmp___3, (void volatile *)dev_priv->regs + 279352U); ret = 1; if ((tmp___3 & 256U) != 0U) { intel_lrc_irq_handler((struct intel_engine_cs *)(& dev_priv->ring) + 3UL); } else { } if ((int )tmp___3 & 1) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring) + 3UL); } else { } } else { drm_err("The master control interrupt lied (GT3)!\n"); } } else { } if ((master_ctl & 16U) != 0U) { tmp___6 = readl((void const volatile *)dev_priv->regs + 279336U); tmp___5 = tmp___6; if ((dev_priv->pm_rps_events & tmp___5) != 0U) { writel(dev_priv->pm_rps_events & tmp___5, (void volatile *)dev_priv->regs + 279336U); ret = 1; gen6_rps_irq_handler(dev_priv, tmp___5); } else { drm_err("The master control interrupt lied (PM)!\n"); } } else { } return (ret); } } static int pch_port_to_hotplug_shift(enum port port ) { { switch ((unsigned int )port) { case 0U: ; case 4U: ; default: ; return (-1); case 1U: ; return (0); case 2U: ; return (8); case 3U: ; return (16); } } } static int i915_port_to_hotplug_shift(enum port port ) { { switch ((unsigned int )port) { case 0U: ; case 4U: ; default: ; return (-1); case 1U: ; return (17); case 2U: ; return (19); case 3U: ; return (21); } } } static enum port get_port_from_pin(enum hpd_pin pin ) { { switch ((unsigned int )pin) { case 4U: ; return (1); case 5U: ; return (2); case 6U: ; return (3); default: ; return (0); } } } static void intel_hpd_irq_handler(struct drm_device *dev , u32 hotplug_trigger , u32 dig_hotplug_reg , u32 const *hpd ) { struct drm_i915_private *dev_priv ; int i ; enum port port ; bool storm_detected ; bool queue_dig ; bool queue_hp ; u32 dig_shift ; u32 dig_port_mask ; long tmp ; bool long_hpd ; int tmp___0 ; int tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; long tmp___2 ; bool __warned ; int __ret_warn_once ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int tmp___3 ; int __ret_warn_on ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; unsigned long tmp___10 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; storm_detected = 0; queue_dig = 0; queue_hp = 0; dig_port_mask = 0U; if (hotplug_trigger == 0U) { return; } else { } tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_hpd_irq_handler", "hotplug event received, stat 0x%08x, dig 0x%08x\n", hotplug_trigger, dig_hotplug_reg); } else { } spin_lock(& dev_priv->irq_lock); i = 1; goto ldv_48685; ldv_48684: ; if (((unsigned int )*(hpd + (unsigned long )i) & hotplug_trigger) == 0U) { goto ldv_48670; } else { } port = get_port_from_pin((enum hpd_pin )i); if ((unsigned int )port != 0U && (unsigned long )dev_priv->hpd_irq_port[(unsigned int )port] != (unsigned long )((struct intel_digital_port *)0)) { __p = dev_priv; if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { __p___0 = dev_priv; if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { tmp___0 = pch_port_to_hotplug_shift(port); dig_shift = (u32 )tmp___0; long_hpd = ((dig_hotplug_reg >> (int )dig_shift) & 2U) != 0U; } else { tmp___1 = i915_port_to_hotplug_shift(port); dig_shift = (u32 )tmp___1; long_hpd = ((hotplug_trigger >> (int )dig_shift) & 2U) != 0U; } } else { tmp___1 = i915_port_to_hotplug_shift(port); dig_shift = (u32 )tmp___1; long_hpd = ((hotplug_trigger >> (int )dig_shift) & 2U) != 0U; } tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_hpd_irq_handler", "digital hpd port %c - %s\n", (unsigned int )port + 65U, (int )long_hpd ? (char *)"long" : (char *)"short"); } else { } if ((int )long_hpd) { dev_priv->long_hpd_port_mask = dev_priv->long_hpd_port_mask | (u32 )(1 << (int )port); dig_port_mask = (u32 )*(hpd + (unsigned long )i) | dig_port_mask; } else { dev_priv->short_hpd_port_mask = dev_priv->short_hpd_port_mask | (u32 )(1 << (int )port); hotplug_trigger = (u32 )(~ *(hpd + (unsigned long )i)) & hotplug_trigger; } queue_dig = 1; } else { } ldv_48670: i = i + 1; ldv_48685: ; if (i <= 6) { goto ldv_48684; } else { } i = 1; goto ldv_48718; ldv_48717: ; if (((unsigned int )*(hpd + (unsigned long )i) & hotplug_trigger) != 0U && (unsigned int )dev_priv->hpd_stats[i].hpd_mark == 1U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 4U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { tmp___3 = 1; } else { tmp___3 = 0; } } else { tmp___3 = 0; } __ret_warn_once = tmp___3; tmp___6 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___6 != 0L) { __ret_warn_on = ! __warned; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 1488, "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", hotplug_trigger, i, *(hpd + (unsigned long )i)); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___5 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); goto ldv_48704; } else { } if (((unsigned int )*(hpd + (unsigned long )i) & hotplug_trigger) == 0U || (unsigned int )dev_priv->hpd_stats[i].hpd_mark != 0U) { goto ldv_48704; } else { } if (((u32 )*(hpd + (unsigned long )i) & dig_port_mask) == 0U) { dev_priv->hpd_event_bits = dev_priv->hpd_event_bits | (u32 )(1 << i); queue_hp = 1; } else { } if ((long )((unsigned long )jiffies - dev_priv->hpd_stats[i].hpd_last_jiffies) < 0L) { goto _L; } else { tmp___10 = msecs_to_jiffies(1000U); if ((long )((dev_priv->hpd_stats[i].hpd_last_jiffies + tmp___10) - (unsigned long )jiffies) < 0L) { _L: /* CIL Label */ dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; dev_priv->hpd_stats[i].hpd_cnt = 0; tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_hpd_irq_handler", "Received HPD interrupt on PIN %d - cnt: 0\n", i); } else { } } else if (dev_priv->hpd_stats[i].hpd_cnt > 5) { dev_priv->hpd_stats[i].hpd_mark = 2; dev_priv->hpd_event_bits = dev_priv->hpd_event_bits & (u32 )(~ (1 << i)); tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_hpd_irq_handler", "HPD interrupt storm detected on PIN %d\n", i); } else { } storm_detected = 1; } else { dev_priv->hpd_stats[i].hpd_cnt = dev_priv->hpd_stats[i].hpd_cnt + 1; tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("intel_hpd_irq_handler", "Received HPD interrupt on PIN %d - cnt: %d\n", i, dev_priv->hpd_stats[i].hpd_cnt); } else { } } } ldv_48704: i = i + 1; ldv_48718: ; if (i <= 6) { goto ldv_48717; } else { } if ((int )storm_detected) { (*(dev_priv->display.hpd_irq_setup))(dev); } else { } spin_unlock(& dev_priv->irq_lock); if ((int )queue_dig) { queue_work___1(dev_priv->dp_wq, & dev_priv->dig_port_work); } else { } if ((int )queue_hp) { schedule_work___0(& dev_priv->hotplug_work); } else { } return; } } static void gmbus_irq_handler(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __wake_up(& dev_priv->gmbus_wait_queue, 3U, 0, (void *)0); return; } } static void dp_aux_irq_handler(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __wake_up(& dev_priv->gmbus_wait_queue, 3U, 0, (void *)0); return; } } static void display_pipe_crc_irq_handler(struct drm_device *dev , enum pipe pipe , uint32_t crc0 , uint32_t crc1 , uint32_t crc2 , uint32_t crc3 , uint32_t crc4 ) { struct drm_i915_private *dev_priv ; struct intel_pipe_crc *pipe_crc ; struct intel_pipe_crc_entry *entry ; int head ; int tail ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pipe_crc = (struct intel_pipe_crc *)(& dev_priv->pipe_crc) + (unsigned long )pipe; spin_lock(& pipe_crc->lock); if ((unsigned long )pipe_crc->entries == (unsigned long )((struct intel_pipe_crc_entry *)0)) { spin_unlock(& pipe_crc->lock); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("display_pipe_crc_irq_handler", "spurious interrupt\n"); } else { } return; } else { } head = pipe_crc->head; tail = pipe_crc->tail; if (((~ head + tail) & 127) <= 0) { spin_unlock(& pipe_crc->lock); drm_err("CRC buffer overflowing\n"); return; } else { } entry = pipe_crc->entries + (unsigned long )head; entry->frame = (*((dev->driver)->get_vblank_counter))(dev, (int )pipe); entry->crc[0] = crc0; entry->crc[1] = crc1; entry->crc[2] = crc2; entry->crc[3] = crc3; entry->crc[4] = crc4; head = (head + 1) & 127; pipe_crc->head = head; spin_unlock(& pipe_crc->lock); __wake_up(& pipe_crc->wq, 1U, 1, (void *)0); return; } } static void hsw_pipe_crc_irq_handler(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393316U), 1); display_pipe_crc_irq_handler(dev, pipe, tmp, 0U, 0U, 0U, 0U); return; } } static void ivb_pipe_crc_irq_handler(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393332U), 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393328U), 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393324U), 1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393320U), 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393316U), 1); display_pipe_crc_irq_handler(dev, pipe, tmp___3, tmp___2, tmp___1, tmp___0, tmp); return; } } static void i9xx_pipe_crc_irq_handler(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *dev_priv ; uint32_t res1 ; uint32_t res2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 2U) { res1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393324U), 1); } else { res1 = 0U; } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 4U) { res2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393344U), 1); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { res2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393344U), 1); } else { res2 = 0U; } } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393320U), 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393316U), 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393312U), 1); display_pipe_crc_irq_handler(dev, pipe, tmp___1, tmp___0, tmp, res1, res2); return; } } static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv , u32 pm_iir ) { struct drm_i915_private *__p ; long tmp ; struct drm_i915_private *__p___0 ; { if ((dev_priv->pm_rps_events & pm_iir) != 0U) { spin_lock(& dev_priv->irq_lock); gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events & pm_iir); if ((int )dev_priv->rps.interrupts_enabled) { dev_priv->rps.pm_iir = dev_priv->rps.pm_iir | (dev_priv->pm_rps_events & pm_iir); queue_work___1(dev_priv->wq, & dev_priv->rps.work); } else { } spin_unlock(& dev_priv->irq_lock); } else { } __p = dev_priv; if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { return; } else { } __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if (((int )__p___0->info.ring_mask & 8) != 0) { if ((pm_iir & 1024U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring) + 3UL); } else { } if ((pm_iir & 4096U) != 0U) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen6_rps_irq_handler", "Command parser error, pm_iir 0x%08x\n", pm_iir); } else { } } else { } } else { } return; } } static bool intel_pipe_handle_vblank(struct drm_device *dev , enum pipe pipe ) { bool tmp ; int tmp___0 ; { tmp = drm_handle_vblank(dev, (int )pipe); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } return (1); } } static void valleyview_pipestat_irq_handler(struct drm_device *dev , u32 iir ) { struct drm_i915_private *dev_priv ; u32 pipe_stats[3U] ; int pipe ; int reg ; u32 mask ; u32 iir_bit ; uint32_t tmp ; struct drm_i915_private *__p ; bool tmp___0 ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pipe_stats[0] = 0U; pipe_stats[1] = 0U; pipe_stats[2] = 0U; spin_lock(& dev_priv->irq_lock); pipe = 0; goto ldv_48821; ldv_48820: iir_bit = 0U; mask = 2147483648U; switch (pipe) { case 0: iir_bit = 64U; goto ldv_48816; case 1: iir_bit = 16U; goto ldv_48816; case 2: iir_bit = 512U; goto ldv_48816; } ldv_48816: ; if ((iir & iir_bit) != 0U) { mask = dev_priv->pipestat_irq_mask[pipe] | mask; } else { } if (mask == 0U) { goto ldv_48819; } else { } reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U); mask = mask | 2147418112U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); pipe_stats[pipe] = tmp & mask; if (((unsigned long )pipe_stats[pipe] & 2147549183UL) != 0UL) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, pipe_stats[pipe], 1); } else { } ldv_48819: pipe = pipe + 1; ldv_48821: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_48820; } else { } spin_unlock(& dev_priv->irq_lock); pipe = 0; goto ldv_48830; ldv_48829: ; if (((unsigned long )pipe_stats[pipe] & 4UL) != 0UL) { tmp___0 = intel_pipe_handle_vblank(dev, (enum pipe )pipe); if ((int )tmp___0) { intel_check_page_flip(dev, pipe); } else { } } else { } if (((unsigned long )pipe_stats[pipe] & 1024UL) != 0UL) { intel_prepare_page_flip(dev, pipe); intel_finish_page_flip(dev, pipe); } else { } if (((unsigned long )pipe_stats[pipe] & 4096UL) != 0UL) { i9xx_pipe_crc_irq_handler(dev, (enum pipe )pipe); } else { } if ((int )pipe_stats[pipe] < 0) { intel_cpu_fifo_underrun_irq_handler(dev_priv, (enum pipe )pipe); } else { } pipe = pipe + 1; ldv_48830: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > pipe) { goto ldv_48829; } else { } if (((unsigned long )pipe_stats[0] & 2048UL) != 0UL) { gmbus_irq_handler(dev); } else { } return; } } static void i9xx_hpd_irq_handler(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 hotplug_status ; uint32_t tmp ; u32 hotplug_trigger ; u32 hotplug_trigger___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 1); hotplug_status = tmp; if (hotplug_status != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), hotplug_status, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 0); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { hotplug_trigger = hotplug_status & 8259596U; intel_hpd_irq_handler(dev, hotplug_trigger, 0U, (u32 const *)(& hpd_status_g4x)); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { hotplug_trigger = hotplug_status & 8259596U; intel_hpd_irq_handler(dev, hotplug_trigger, 0U, (u32 const *)(& hpd_status_g4x)); } else { hotplug_trigger___0 = hotplug_status & 8259776U; intel_hpd_irq_handler(dev, hotplug_trigger___0, 0U, (u32 const *)(& hpd_status_i915)); } } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { _L: /* CIL Label */ if ((hotplug_status & 112U) != 0U) { dp_aux_irq_handler(dev); } else { } } else { } } } else { } return; } } static irqreturn_t valleyview_irq_handler(int irq , void *arg ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 iir ; u32 gt_iir ; u32 pm_iir ; irqreturn_t ret ; bool tmp ; int tmp___0 ; { dev = (struct drm_device *)arg; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } ldv_48874: gt_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278552L, 1); if (gt_iir != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278552L, gt_iir, 1); } else { } pm_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 1); if (pm_iir != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278568L, pm_iir, 1); } else { } iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581220L, 1); if (iir != 0U) { if ((iir & 131072U) != 0U) { i9xx_hpd_irq_handler(dev); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, iir, 1); } else { } if ((gt_iir == 0U && pm_iir == 0U) && iir == 0U) { goto out; } else { } ret = 1; if (gt_iir != 0U) { snb_gt_irq_handler(dev, dev_priv, gt_iir); } else { } if (pm_iir != 0U) { gen6_rps_irq_handler(dev_priv, pm_iir); } else { } valleyview_pipestat_irq_handler(dev, iir); goto ldv_48874; out: ; return (ret); } } static irqreturn_t cherryview_irq_handler(int irq , void *arg ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 master_ctl ; u32 iir ; irqreturn_t ret ; bool tmp ; int tmp___0 ; uint32_t tmp___1 ; { dev = (struct drm_device *)arg; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } ldv_48885: tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279040L, 1); master_ctl = tmp___1 & 2147483647U; iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581220L, 1); if (master_ctl == 0U && iir == 0U) { goto ldv_48884; } else { } ret = 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279040L, 0U, 1); if (iir != 0U) { if ((iir & 131072U) != 0U) { i9xx_hpd_irq_handler(dev); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, iir, 1); } else { } gen8_gt_irq_handler(dev_priv, master_ctl); valleyview_pipestat_irq_handler(dev, iir); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279040L, 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279040L, 0); goto ldv_48885; ldv_48884: ; return (ret); } } static void ibx_irq_handler(struct drm_device *dev , u32 pch_iir ) { struct drm_i915_private *dev_priv ; int pipe ; u32 hotplug_trigger ; u32 dig_hotplug_reg ; int port ; int tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; uint32_t tmp___3 ; long tmp___4 ; struct drm_i915_private *__p ; long tmp___5 ; long tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; hotplug_trigger = pch_iir & 3904U; dig_hotplug_reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802864L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802864L, dig_hotplug_reg, 1); intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, (u32 const *)(& hpd_ibx)); if ((pch_iir & 234881024U) != 0U) { tmp = ffs((int )((pch_iir & 234881024U) >> 25)); port = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ibx_irq_handler", "PCH audio power change on port %d\n", port + 65); } else { } } else { } if ((pch_iir & 57344U) != 0U) { dp_aux_irq_handler(dev); } else { } if ((pch_iir & 16777216U) != 0U) { gmbus_irq_handler(dev); } else { } if ((pch_iir & 12582912U) != 0U) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ibx_irq_handler", "PCH HDCP audio interrupt\n"); } else { } } else { } if ((pch_iir & 3145728U) != 0U) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ibx_irq_handler", "PCH transcoder audio interrupt\n"); } else { } } else { } if ((pch_iir & 524288U) != 0U) { drm_err("PCH poison interrupt\n"); } else { } if ((pch_iir & 196608U) != 0U) { pipe = 0; goto ldv_48903; ldv_48902: tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 983060), 1); drm_ut_debug_printk("ibx_irq_handler", " pipe %c FDI IIR: 0x%08x\n", pipe + 65, tmp___3); } else { } pipe = pipe + 1; ldv_48903: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_48902; } else { } } else { } if ((pch_iir & 36U) != 0U) { tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("ibx_irq_handler", "PCH transcoder CRC done interrupt\n"); } else { } } else { } if ((pch_iir & 18U) != 0U) { tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("ibx_irq_handler", "PCH transcoder CRC error interrupt\n"); } else { } } else { } if ((int )pch_iir & 1) { intel_pch_fifo_underrun_irq_handler(dev_priv, 0); } else { } if ((pch_iir & 8U) != 0U) { intel_pch_fifo_underrun_irq_handler(dev_priv, 1); } else { } return; } } static void ivb_err_int_handler(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 err_int ; uint32_t tmp ; enum pipe pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278592L, 1); err_int = tmp; if ((int )err_int < 0) { drm_err("Poison interrupt\n"); } else { } pipe = 0; goto ldv_48924; ldv_48923: ; if (((u32 )(1 << (int )pipe * 3) & err_int) != 0U) { intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } else { } if (((u32 )(1 << ((int )pipe * 3 + 2)) & err_int) != 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { ivb_pipe_crc_irq_handler(dev, pipe); } else { hsw_pipe_crc_irq_handler(dev, pipe); } } else { } pipe = (enum pipe )((int )pipe + 1); ldv_48924: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > (int )pipe) { goto ldv_48923; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278592L, err_int, 1); return; } } static void cpt_serr_int_handler(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 serr_int ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802880L, 1); serr_int = tmp; if ((int )serr_int < 0) { drm_err("PCH poison interrupt\n"); } else { } if ((int )serr_int & 1) { intel_pch_fifo_underrun_irq_handler(dev_priv, 0); } else { } if ((serr_int & 8U) != 0U) { intel_pch_fifo_underrun_irq_handler(dev_priv, 1); } else { } if ((serr_int & 64U) != 0U) { intel_pch_fifo_underrun_irq_handler(dev_priv, 2); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802880L, serr_int, 1); return; } } static void cpt_irq_handler(struct drm_device *dev , u32 pch_iir ) { struct drm_i915_private *dev_priv ; int pipe ; u32 hotplug_trigger ; u32 dig_hotplug_reg ; int port ; int tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; uint32_t tmp___3 ; long tmp___4 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; hotplug_trigger = pch_iir & 15466496U; dig_hotplug_reg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802864L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802864L, dig_hotplug_reg, 1); intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, (u32 const *)(& hpd_cpt)); if ((pch_iir & 3758096384U) != 0U) { tmp = ffs((int )(pch_iir >> 29)); port = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("cpt_irq_handler", "PCH audio power change on port %c\n", port + 65); } else { } } else { } if ((pch_iir & 234881024U) != 0U) { dp_aux_irq_handler(dev); } else { } if ((pch_iir & 131072U) != 0U) { gmbus_irq_handler(dev); } else { } if ((pch_iir & 1092U) != 0U) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("cpt_irq_handler", "Audio CP request interrupt\n"); } else { } } else { } if ((pch_iir & 546U) != 0U) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("cpt_irq_handler", "Audio CP change interrupt\n"); } else { } } else { } if ((pch_iir & 273U) != 0U) { pipe = 0; goto ldv_48948; ldv_48947: tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 983060), 1); drm_ut_debug_printk("cpt_irq_handler", " pipe %c FDI IIR: 0x%08x\n", pipe + 65, tmp___3); } else { } pipe = pipe + 1; ldv_48948: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_48947; } else { } } else { } if ((pch_iir & 65536U) != 0U) { cpt_serr_int_handler(dev); } else { } return; } } static void ilk_display_irq_handler(struct drm_device *dev , u32 de_iir ) { struct drm_i915_private *dev_priv ; enum pipe pipe ; bool tmp ; struct drm_i915_private *__p ; u32 pch_iir ; uint32_t tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((de_iir & 1048576U) != 0U) { dp_aux_irq_handler(dev); } else { } if ((de_iir & 262144U) != 0U) { intel_opregion_asle_intr(dev); } else { } if ((de_iir & 8388608U) != 0U) { drm_err("Poison interrupt\n"); } else { } pipe = 0; goto ldv_48963; ldv_48962: ; if (((u32 )(1 << ((int )pipe * 8 + 7)) & de_iir) != 0U) { tmp = intel_pipe_handle_vblank(dev, pipe); if ((int )tmp) { intel_check_page_flip(dev, (int )pipe); } else { } } else { } if (((u32 )(1 << (int )pipe * 8) & de_iir) != 0U) { intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } else { } if (((u32 )(1 << ((int )pipe * 8 + 2)) & de_iir) != 0U) { i9xx_pipe_crc_irq_handler(dev, pipe); } else { } if (((u32 )(1 << ((int )pipe + 26)) & de_iir) != 0U) { intel_prepare_page_flip(dev, (int )pipe); intel_finish_page_flip_plane(dev, (int )pipe); } else { } pipe = (enum pipe )((int )pipe + 1); ldv_48963: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_48962; } else { } if ((de_iir & 2097152U) != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802824L, 1); pch_iir = tmp___0; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { cpt_irq_handler(dev, pch_iir); } else { ibx_irq_handler(dev, pch_iir); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802824L, pch_iir, 1); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 5U && (de_iir & 33554432U) != 0U) { ironlake_rps_change_irq_handler(dev); } else { } return; } } static void ivb_display_irq_handler(struct drm_device *dev , u32 de_iir ) { struct drm_i915_private *dev_priv ; enum pipe pipe ; bool tmp ; struct drm_i915_private *__p ; u32 pch_iir ; uint32_t tmp___0 ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((de_iir & 1073741824U) != 0U) { ivb_err_int_handler(dev); } else { } if ((de_iir & 67108864U) != 0U) { dp_aux_irq_handler(dev); } else { } if ((de_iir & 536870912U) != 0U) { intel_opregion_asle_intr(dev); } else { } pipe = 0; goto ldv_48991; ldv_48990: ; if (((u32 )(1 << (int )pipe * 5) & de_iir) != 0U) { tmp = intel_pipe_handle_vblank(dev, pipe); if ((int )tmp) { intel_check_page_flip(dev, (int )pipe); } else { } } else { } if (((u32 )(1 << ((int )pipe * 5 + 3)) & de_iir) != 0U) { intel_prepare_page_flip(dev, (int )pipe); intel_finish_page_flip_plane(dev, (int )pipe); } else { } pipe = (enum pipe )((int )pipe + 1); ldv_48991: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_48990; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 5U && (de_iir & 268435456U) != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802824L, 1); pch_iir = tmp___0; cpt_irq_handler(dev, pch_iir); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802824L, pch_iir, 1); } else { } return; } } static irqreturn_t ironlake_irq_handler(int irq , void *arg ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 de_iir ; u32 gt_iir ; u32 de_ier ; u32 sde_ier ; irqreturn_t ret ; bool tmp ; int tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; u32 pm_iir ; uint32_t tmp___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { dev = (struct drm_device *)arg; dev_priv = (struct drm_i915_private *)dev->dev_private; sde_ier = 0U; ret = 0; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } intel_uncore_check_errors(dev); de_ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278540L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278540L, de_ier & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278540L, 0); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 5U) { sde_ier = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802828L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802828L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802828L, 0); } else { } gt_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278552L, 1); if (gt_iir != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278552L, gt_iir, 1); ret = 1; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 5U) { snb_gt_irq_handler(dev, dev_priv, gt_iir); } else { ilk_gt_irq_handler(dev, dev_priv, gt_iir); } } else { } de_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278536L, 1); if (de_iir != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278536L, de_iir, 1); ret = 1; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 6U) { ivb_display_irq_handler(dev, de_iir); } else { ilk_display_irq_handler(dev, de_iir); } } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 5U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 1); pm_iir = tmp___1; if (pm_iir != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278568L, pm_iir, 1); ret = 1; gen6_rps_irq_handler(dev_priv, pm_iir); } else { } } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278540L, de_ier, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278540L, 0); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type != 5U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802828L, sde_ier, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802828L, 0); } else { } return (ret); } } static void bxt_hpd_handler(struct drm_device *dev , uint32_t iir_status ) { struct drm_i915_private *dev_priv ; uint32_t hp_control ; uint32_t hp_trigger ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; hp_trigger = iir_status & 56U; hp_control = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802864L, 1); if ((hp_control & 268439568U) == 0U) { drm_err("Interrupt when HPD disabled\n"); return; } else { } tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("bxt_hpd_handler", "hotplug event received, stat 0x%08x\n", hp_control & 268439568U); } else { } intel_hpd_irq_handler(dev, hp_trigger, hp_control, (u32 const *)(& hpd_bxt)); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802864L, hp_control, 1); return; } } static irqreturn_t gen8_irq_handler(int irq , void *arg ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 master_ctl ; irqreturn_t ret ; uint32_t tmp ; enum pipe pipe ; u32 aux_mask ; bool tmp___0 ; int tmp___1 ; struct drm_i915_private *__p ; bool found ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; uint32_t pipe_iir ; uint32_t flip_done ; uint32_t fault_errors ; bool tmp___2 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; u32 pch_iir ; uint32_t tmp___3 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; { dev = (struct drm_device *)arg; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; tmp = 0U; aux_mask = 1U; tmp___0 = intel_irqs_enabled(dev_priv); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 9U) { aux_mask = aux_mask | 234881024U; } else { } master_ctl = readl((void const volatile *)dev_priv->regs + 279040U); master_ctl = master_ctl & 2147483647U; if (master_ctl == 0U) { return (0); } else { } writel(0U, (void volatile *)dev_priv->regs + 279040U); ret = gen8_gt_irq_handler(dev_priv, master_ctl); if ((master_ctl & 4194304U) != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279656L, 1); if (tmp != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279656L, tmp, 1); ret = 1; if ((tmp & 134217728U) != 0U) { intel_opregion_asle_intr(dev); } else { drm_err("Unexpected DE Misc interrupt\n"); } } else { drm_err("The master control interrupt lied (DE MISC)!\n"); } } else { } if ((master_ctl & 1048576U) != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279624L, 1); if (tmp != 0U) { found = 0; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279624L, tmp, 1); ret = 1; if ((tmp & aux_mask) != 0U) { dp_aux_irq_handler(dev); found = 1; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 9U) { if ((tmp & 56U) != 0U) { bxt_hpd_handler(dev, tmp); found = 1; } else { } } else { } } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { if ((tmp & 2U) != 0U) { gmbus_irq_handler(dev); found = 1; } else { } } else { } } else { } if (! found) { drm_err("Unexpected DE Port interrupt\n"); } else { } } else { drm_err("The master control interrupt lied (DE PORT)!\n"); } } else { } pipe = 0; goto ldv_49115; ldv_49114: flip_done = 0U; fault_errors = 0U; if (((u32 )(1 << ((int )pipe + 16)) & master_ctl) == 0U) { goto ldv_49101; } else { } pipe_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 16 + 279560), 1); if (pipe_iir != 0U) { ret = 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 16 + 279560), pipe_iir, 1); if ((int )pipe_iir & 1) { tmp___2 = intel_pipe_handle_vblank(dev, pipe); if ((int )tmp___2) { intel_check_page_flip(dev, (int )pipe); } else { } } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 9U) { flip_done = pipe_iir & 8U; } else { flip_done = pipe_iir & 16U; } if (flip_done != 0U) { intel_prepare_page_flip(dev, (int )pipe); intel_finish_page_flip_plane(dev, (int )pipe); } else { } if ((pipe_iir & 268435456U) != 0U) { hsw_pipe_crc_irq_handler(dev, pipe); } else { } if ((int )pipe_iir < 0) { intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } else { } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 9U) { fault_errors = pipe_iir & 3968U; } else { fault_errors = pipe_iir & 1792U; } if (fault_errors != 0U) { drm_err("Fault errors on pipe %c\n: 0x%08x", (int )pipe + 65, pipe_iir & 1792U); } else { } } else { drm_err("The master control interrupt lied (DE PIPE)!\n"); } ldv_49101: pipe = (enum pipe )((int )pipe + 1); ldv_49115: __p___6 = dev_priv; if ((int )__p___6->info.num_pipes > (int )pipe) { goto ldv_49114; } else { } __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___7->pch_type != 0U) { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___8->pch_type != 5U) { if ((master_ctl & 8388608U) != 0U) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802824L, 1); pch_iir = tmp___3; if (pch_iir != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802824L, pch_iir, 1); ret = 1; cpt_irq_handler(dev, pch_iir); } else { drm_err("The master control interrupt lied (SDE)!\n"); } } else { } } else { } } else { } writel(2147483648U, (void volatile *)dev_priv->regs + 279040U); readl((void const volatile *)dev_priv->regs + 279040U); return (ret); } } static void i915_error_wake_up(struct drm_i915_private *dev_priv , bool reset_completed ) { struct intel_engine_cs *ring ; int i ; bool tmp ; { i = 0; goto ldv_49137; ldv_49136: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { __wake_up(& ring->irq_queue, 3U, 0, (void *)0); } else { } i = i + 1; ldv_49137: ; if (i <= 4) { goto ldv_49136; } else { } __wake_up(& dev_priv->pending_flip_queue, 3U, 0, (void *)0); if ((int )reset_completed) { __wake_up(& dev_priv->gpu_error.reset_queue, 3U, 0, (void *)0); } else { } return; } } static void i915_reset_and_wakeup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct i915_gpu_error *error ; char *error_event[2U] ; char *reset_event[2U] ; char *reset_done_event[2U] ; int ret ; long tmp___0 ; bool tmp___1 ; bool tmp___2 ; int tmp___3 ; { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; error = & dev_priv->gpu_error; error_event[0] = (char *)"OLD_ERROR=1"; error_event[1] = (char *)0; reset_event[0] = (char *)"RESET=1"; reset_event[1] = (char *)0; reset_done_event[0] = (char *)"OLD_ERROR=0"; reset_done_event[1] = (char *)0; kobject_uevent_env(& ((dev->primary)->kdev)->kobj, 2, (char **)(& error_event)); tmp___1 = i915_reset_in_progress(error); if ((int )tmp___1) { tmp___2 = i915_terminally_wedged(error); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_reset_and_wakeup", "resetting chip\n"); } else { } kobject_uevent_env(& ((dev->primary)->kdev)->kobj, 2, (char **)(& reset_event)); intel_runtime_pm_get(dev_priv); intel_prepare_reset(dev); ret = i915_reset(dev); intel_finish_reset(dev); intel_runtime_pm_put(dev_priv); if (ret == 0) { __asm__ volatile ("": : : "memory"); atomic_inc(& dev_priv->gpu_error.reset_counter); kobject_uevent_env(& ((dev->primary)->kdev)->kobj, 2, (char **)(& reset_done_event)); } else { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; orl %0,%1": : "r" (2147483648U), "m" (error->reset_counter): "memory"); } i915_error_wake_up(dev_priv, 1); } else { } } else { } return; } } static void i915_report_and_clear_eir(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t instdone[4U] ; u32 eir ; uint32_t tmp ; int pipe ; int i ; u32 ipeir ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; u32 pgtbl_err ; uint32_t tmp___5 ; struct drm_i915_private *__p ; u32 pgtbl_err___0 ; uint32_t tmp___6 ; struct drm_i915_private *__p___0 ; uint32_t tmp___7 ; struct drm_i915_private *__p___1 ; uint32_t tmp___8 ; u32 ipeir___0 ; uint32_t tmp___9 ; uint32_t tmp___10 ; uint32_t tmp___11 ; uint32_t tmp___12 ; u32 ipeir___1 ; uint32_t tmp___13 ; uint32_t tmp___14 ; uint32_t tmp___15 ; uint32_t tmp___16 ; uint32_t tmp___17 ; struct drm_i915_private *__p___2 ; uint32_t tmp___18 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8368L, 1); eir = tmp; if (eir == 0U) { return; } else { } printk("\vi915: render error detected, EIR: 0x%08x\n", eir); i915_get_extra_instdone(dev, (uint32_t *)(& instdone)); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { if ((eir & 24U) != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8292L, 1); ipeir = tmp___0; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8292L, 1); printk("\vi915: IPEIR: 0x%08x\n", tmp___1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8296L, 1); printk("\vi915: IPEHR: 0x%08x\n", tmp___2); i = 0; goto ldv_49167; ldv_49166: printk("\vi915: INSTDONE_%d: 0x%08x\n", i, instdone[i]); i = i + 1; ldv_49167: ; if ((unsigned int )i <= 3U) { goto ldv_49166; } else { } tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8304L, 1); printk("\vi915: INSTPS: 0x%08x\n", tmp___3); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8308L, 1); printk("\vi915: ACTHD: 0x%08x\n", tmp___4); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8292L, ipeir, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8292L, 0); } else { } if ((eir & 32U) != 0U) { tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8228L, 1); pgtbl_err = tmp___5; printk("\vi915: page table error\n"); printk("\vi915: PGTBL_ER: 0x%08x\n", pgtbl_err); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8228L, pgtbl_err, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8228L, 0); } else { } } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 2U) { if ((eir & 16U) != 0U) { tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8228L, 1); pgtbl_err___0 = tmp___6; printk("\vi915: page table error\n"); printk("\vi915: PGTBL_ER: 0x%08x\n", pgtbl_err___0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8228L, pgtbl_err___0, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8228L, 0); } else { } } else { } if ((eir & 2U) != 0U) { printk("\vi915: memory refresh error:\n"); pipe = 0; goto ldv_49184; ldv_49183: tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 1); printk("\vi915: pipe %c stat: 0x%08x\n", pipe + 65, tmp___7); pipe = pipe + 1; ldv_49184: __p___1 = dev_priv; if ((int )__p___1->info.num_pipes > pipe) { goto ldv_49183; } else { } } else { } if ((int )eir & 1) { printk("\vi915: instruction error\n"); tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8384L, 1); printk("\vi915: INSTPM: 0x%08x\n", tmp___8); i = 0; goto ldv_49189; ldv_49188: printk("\vi915: INSTDONE_%d: 0x%08x\n", i, instdone[i]); i = i + 1; ldv_49189: ; if ((unsigned int )i <= 3U) { goto ldv_49188; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 3U) { tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8328L, 1); ipeir___0 = tmp___9; tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8328L, 1); printk("\vi915: IPEIR: 0x%08x\n", tmp___10); tmp___11 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8332L, 1); printk("\vi915: IPEHR: 0x%08x\n", tmp___11); tmp___12 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8392L, 1); printk("\vi915: ACTHD: 0x%08x\n", tmp___12); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8328L, ipeir___0, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8328L, 0); } else { tmp___13 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8292L, 1); ipeir___1 = tmp___13; tmp___14 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8292L, 1); printk("\vi915: IPEIR: 0x%08x\n", tmp___14); tmp___15 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8296L, 1); printk("\vi915: IPEHR: 0x%08x\n", tmp___15); tmp___16 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8304L, 1); printk("\vi915: INSTPS: 0x%08x\n", tmp___16); tmp___17 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8308L, 1); printk("\vi915: ACTHD: 0x%08x\n", tmp___17); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8292L, ipeir___1, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8292L, 0); } } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8368L, eir, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8368L, 0); eir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8368L, 1); if (eir != 0U) { drm_err("EIR stuck: 0x%08x, masking\n", eir); tmp___18 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8372L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8372L, tmp___18 | eir, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8356L, 32768U, 1); } else { } return; } } void i915_handle_error(struct drm_device *dev , bool wedged , char const *fmt , ...) { struct drm_i915_private *dev_priv ; va_list args ; char error_msg[80U] ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ldv__builtin_va_start((va_list *)(& args)); vscnprintf((char *)(& error_msg), 80UL, fmt, (va_list *)(& args)); ldv__builtin_va_end((va_list *)(& args)); i915_capture_error_state(dev, (int )wedged, (char const *)(& error_msg)); i915_report_and_clear_eir(dev); if ((int )wedged) { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; orl %0,%1": : "r" (1U), "m" (dev_priv->gpu_error.reset_counter): "memory"); i915_error_wake_up(dev_priv, 0); } else { } i915_reset_and_wakeup(dev); return; } } static int i915_enable_vblank(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; raw_spinlock_t *tmp ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); irqflags = _raw_spin_lock_irqsave(tmp); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { i915_enable_pipestat(dev_priv, (enum pipe )pipe, 4U); } else { i915_enable_pipestat(dev_priv, (enum pipe )pipe, 2U); } spin_unlock_irqrestore(& dev_priv->irq_lock, irqflags); return (0); } } static int ironlake_enable_vblank(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; uint32_t bit ; struct drm_i915_private *__p ; raw_spinlock_t *tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); bit = (uint32_t )((unsigned int )((unsigned char )__p->info.gen) > 6U ? 1 << pipe * 5 : 1 << (pipe * 8 + 7)); tmp = spinlock_check(& dev_priv->irq_lock); irqflags = _raw_spin_lock_irqsave(tmp); ironlake_enable_display_irq(dev_priv, bit); spin_unlock_irqrestore(& dev_priv->irq_lock, irqflags); return (0); } } static int valleyview_enable_vblank(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; raw_spinlock_t *tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); irqflags = _raw_spin_lock_irqsave(tmp); i915_enable_pipestat(dev_priv, (enum pipe )pipe, 4U); spin_unlock_irqrestore(& dev_priv->irq_lock, irqflags); return (0); } } static int gen8_enable_vblank(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; raw_spinlock_t *tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); irqflags = _raw_spin_lock_irqsave(tmp); dev_priv->__annonCompField82.de_irq_mask[pipe] = dev_priv->__annonCompField82.de_irq_mask[pipe] & 4294967294U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279556), dev_priv->__annonCompField82.de_irq_mask[pipe], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279556), 0); spin_unlock_irqrestore(& dev_priv->irq_lock, irqflags); return (0); } } static void i915_disable_vblank(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; raw_spinlock_t *tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); irqflags = _raw_spin_lock_irqsave(tmp); i915_disable_pipestat(dev_priv, (enum pipe )pipe, 6U); spin_unlock_irqrestore(& dev_priv->irq_lock, irqflags); return; } } static void ironlake_disable_vblank(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; uint32_t bit ; struct drm_i915_private *__p ; raw_spinlock_t *tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); bit = (uint32_t )((unsigned int )((unsigned char )__p->info.gen) > 6U ? 1 << pipe * 5 : 1 << (pipe * 8 + 7)); tmp = spinlock_check(& dev_priv->irq_lock); irqflags = _raw_spin_lock_irqsave(tmp); ironlake_disable_display_irq(dev_priv, bit); spin_unlock_irqrestore(& dev_priv->irq_lock, irqflags); return; } } static void valleyview_disable_vblank(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; raw_spinlock_t *tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); irqflags = _raw_spin_lock_irqsave(tmp); i915_disable_pipestat(dev_priv, (enum pipe )pipe, 4U); spin_unlock_irqrestore(& dev_priv->irq_lock, irqflags); return; } } static void gen8_disable_vblank(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; raw_spinlock_t *tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); irqflags = _raw_spin_lock_irqsave(tmp); dev_priv->__annonCompField82.de_irq_mask[pipe] = dev_priv->__annonCompField82.de_irq_mask[pipe] | 1U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279556), dev_priv->__annonCompField82.de_irq_mask[pipe], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279556), 0); spin_unlock_irqrestore(& dev_priv->irq_lock, irqflags); return; } } static struct drm_i915_gem_request *ring_last_request(struct intel_engine_cs *ring ) { struct list_head const *__mptr ; { __mptr = (struct list_head const *)ring->request_list.prev; return ((struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL); } } static bool ring_idle(struct intel_engine_cs *ring ) { int tmp ; struct drm_i915_gem_request *tmp___0 ; bool tmp___1 ; int tmp___2 ; { tmp = list_empty((struct list_head const *)(& ring->request_list)); if (tmp != 0) { tmp___2 = 1; } else { tmp___0 = ring_last_request(ring); tmp___1 = i915_gem_request_completed___2(tmp___0, 0); if ((int )tmp___1) { tmp___2 = 1; } else { tmp___2 = 0; } } return ((bool )tmp___2); } } static bool ipehr_is_semaphore_wait(struct drm_device *dev , u32 ipehr ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { return (ipehr >> 23 == 28U); } else { ipehr = ipehr & 4294770687U; return (ipehr == 185860097U); } } } static struct intel_engine_cs *semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring , u32 ipehr , u64 offset ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *signaller ; int i ; bool tmp ; u32 sync_bits ; bool tmp___0 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { i = 0; goto ldv_49333; ldv_49332: signaller = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(signaller); if ((int )tmp) { if ((unsigned long )ring == (unsigned long )signaller) { goto ldv_49331; } else { } if (signaller->semaphore.__annonCompField77.signal_ggtt[(unsigned int )ring->id] == offset) { return (signaller); } else { } } else { } ldv_49331: i = i + 1; ldv_49333: ; if (i <= 4) { goto ldv_49332; } else { } } else { sync_bits = ipehr & 196608U; i = 0; goto ldv_49338; ldv_49337: signaller = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(signaller); if ((int )tmp___0) { if ((unsigned long )ring == (unsigned long )signaller) { goto ldv_49336; } else { } if (signaller->semaphore.__annonCompField77.mbox.wait[(unsigned int )ring->id] == sync_bits) { return (signaller); } else { } } else { } ldv_49336: i = i + 1; ldv_49338: ; if (i <= 4) { goto ldv_49337; } else { } } drm_err("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", (unsigned int )ring->id, ipehr, offset); return ((struct intel_engine_cs *)0); } } static struct intel_engine_cs *semaphore_waits_for(struct intel_engine_cs *ring , u32 *seqno ) { struct drm_i915_private *dev_priv ; u32 cmd ; u32 ipehr ; u32 head ; u64 offset ; int i ; int backwards ; bool tmp ; int tmp___0 ; uint32_t tmp___1 ; struct drm_i915_private *__p ; unsigned int tmp___2 ; unsigned int tmp___3 ; unsigned int tmp___4 ; struct drm_i915_private *__p___0 ; struct intel_engine_cs *tmp___5 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; offset = 0ULL; ipehr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 104U), 1); tmp = ipehr_is_semaphore_wait(ring->dev, ipehr); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return ((struct intel_engine_cs *)0); } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); head = tmp___1 & 2097148U; __p = to_i915((struct drm_device const *)ring->dev); backwards = (unsigned int )((unsigned char )__p->info.gen) > 7U ? 5 : 4; i = backwards; goto ldv_49359; ldv_49358: head = (u32 )((ring->buffer)->size + -1) & head; cmd = ioread32((ring->buffer)->virtual_start + (unsigned long )head); if (cmd == ipehr) { goto ldv_49357; } else { } head = head - 4U; i = i - 1; ldv_49359: ; if (i != 0) { goto ldv_49358; } else { } ldv_49357: ; if (i == 0) { return ((struct intel_engine_cs *)0); } else { } tmp___2 = ioread32((ring->buffer)->virtual_start + ((unsigned long )head + 4UL)); *seqno = tmp___2 + 1U; __p___0 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { tmp___3 = ioread32((ring->buffer)->virtual_start + ((unsigned long )head + 12UL)); offset = (u64 )tmp___3; offset = offset << 32; tmp___4 = ioread32((ring->buffer)->virtual_start + ((unsigned long )head + 8UL)); offset = (u64 )tmp___4; } else { } tmp___5 = semaphore_wait_to_signaller_ring(ring, ipehr, offset); return (tmp___5); } } static int semaphore_passed(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *signaller ; u32 seqno ; u32 tmp ; bool tmp___0 ; uint32_t tmp___1 ; int tmp___2 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; ring->hangcheck.deadlock = ring->hangcheck.deadlock + 1; signaller = semaphore_waits_for(ring, & seqno); if ((unsigned long )signaller == (unsigned long )((struct intel_engine_cs *)0)) { return (-1); } else { } if (signaller->hangcheck.deadlock > 4) { return (-1); } else { } tmp = (*(signaller->get_seqno))(signaller, 0); tmp___0 = i915_seqno_passed(tmp, seqno); if ((int )tmp___0) { return (1); } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(signaller->mmio_base + 60U), 1); if ((tmp___1 & 1024U) != 0U) { tmp___2 = semaphore_passed(signaller); if (tmp___2 < 0) { return (-1); } else { } } else { } return (0); } } static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv ) { struct intel_engine_cs *ring ; int i ; bool tmp ; { i = 0; goto ldv_49378; ldv_49377: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp = intel_ring_initialized(ring); if ((int )tmp) { ring->hangcheck.deadlock = 0; } else { } i = i + 1; ldv_49378: ; if (i <= 4) { goto ldv_49377; } else { } return; } } static enum intel_ring_hangcheck_action ring_stuck(struct intel_engine_cs *ring , u64 acthd ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 tmp ; struct drm_i915_private *__p ; int tmp___0 ; struct drm_i915_private *__p___0 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (ring->hangcheck.acthd != acthd) { if (ring->hangcheck.max_acthd < acthd) { ring->hangcheck.max_acthd = acthd; return (2); } else { } return (3); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { return (5); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 60U), 1); if ((tmp & 2048U) != 0U) { i915_handle_error(dev, 0, "Kicking stuck wait on %s", ring->name); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 60U), tmp, 1); return (4); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 5U && (tmp & 1024U) != 0U) { tmp___0 = semaphore_passed(ring); switch (tmp___0) { default: ; return (5); case 1: i915_handle_error(dev, 0, "Kicking stuck semaphore on %s", ring->name); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 60U), tmp, 1); return (4); case 0: ; return (1); } } else { } return (5); } } static void i915_hangcheck_elapsed(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; struct drm_device *dev ; struct intel_engine_cs *ring ; int i ; int busy_count ; int rings_hung ; bool stuck[5U] ; unsigned int tmp ; u64 acthd ; u32 seqno ; bool busy ; unsigned int tmp___0 ; int tmp___1 ; int tmp___2 ; bool tmp___3 ; u64 tmp___4 ; bool tmp___5 ; bool tmp___6 ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff4128UL; dev = dev_priv->dev; busy_count = 0; rings_hung = 0; stuck[0] = 0; tmp = 1U; while (1) { if (tmp >= 5U) { break; } else { } stuck[tmp] = (_Bool)0; tmp = tmp + 1U; } if (! i915.enable_hangcheck) { return; } else { } i = 0; goto ldv_49425; ldv_49424: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___5 = intel_ring_initialized(ring); if ((int )tmp___5) { busy = 1; semaphore_clear_deadlocks(dev_priv); seqno = (*(ring->get_seqno))(ring, 0); acthd = intel_ring_get_active_head(ring); if (ring->hangcheck.seqno == seqno) { tmp___3 = ring_idle(ring); if ((int )tmp___3) { ring->hangcheck.action = 0; tmp___2 = waitqueue_active(& ring->irq_queue); if (tmp___2 != 0) { tmp___1 = test_and_set_bit((long )ring->id, (unsigned long volatile *)(& dev_priv->gpu_error.missed_irq_rings)); if (tmp___1 == 0) { tmp___0 = intel_ring_flag(ring); if ((dev_priv->gpu_error.test_irq_rings & tmp___0) == 0U) { drm_err("Hangcheck timer elapsed... %s idle\n", ring->name); } else { printk("\016[drm] Fake missed irq on %s\n", ring->name); } __wake_up(& ring->irq_queue, 3U, 0, (void *)0); } else { } ring->hangcheck.score = ring->hangcheck.score + 1; } else { busy = 0; } } else { ring->hangcheck.action = ring_stuck(ring, acthd); switch ((unsigned int )ring->hangcheck.action) { case 0U: ; case 1U: ; case 2U: ; goto ldv_49420; case 3U: ring->hangcheck.score = ring->hangcheck.score + 1; goto ldv_49420; case 4U: ring->hangcheck.score = ring->hangcheck.score + 5; goto ldv_49420; case 5U: ring->hangcheck.score = ring->hangcheck.score + 20; stuck[i] = 1; goto ldv_49420; } ldv_49420: ; } } else { ring->hangcheck.action = 2; if (ring->hangcheck.score > 0) { ring->hangcheck.score = ring->hangcheck.score - 1; } else { } tmp___4 = 0ULL; ring->hangcheck.max_acthd = tmp___4; ring->hangcheck.acthd = tmp___4; } ring->hangcheck.seqno = seqno; ring->hangcheck.acthd = acthd; busy_count = (int )busy + busy_count; } else { } i = i + 1; ldv_49425: ; if (i <= 4) { goto ldv_49424; } else { } i = 0; goto ldv_49428; ldv_49427: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___6 = intel_ring_initialized(ring); if ((int )tmp___6) { if (ring->hangcheck.score > 30) { printk("\016[drm] %s on %s\n", (int )stuck[i] ? (char *)"stuck" : (char *)"no progress", ring->name); rings_hung = rings_hung + 1; } else { } } else { } i = i + 1; ldv_49428: ; if (i <= 4) { goto ldv_49427; } else { } if (rings_hung != 0) { return; } else { } if (busy_count != 0) { i915_queue_hangcheck(dev); } else { } return; } } void i915_queue_hangcheck(struct drm_device *dev ) { struct i915_gpu_error *e ; struct drm_i915_private *tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; { tmp = to_i915((struct drm_device const *)dev); e = & tmp->gpu_error; if (! i915.enable_hangcheck) { return; } else { } tmp___0 = msecs_to_jiffies(1500U); tmp___1 = round_jiffies_up_relative(tmp___0); queue_delayed_work___1(e->hangcheck_wq, & e->hangcheck_work, tmp___1); return; } } static void ibx_irq_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 5U) { return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802820L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802820L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802828L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802824L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802824L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802824L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802824L, 0); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802880L, 4294967295U, 1); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type == 3U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802880L, 4294967295U, 1); } else { } } return; } } static void ibx_irq_pre_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; int __ret_warn_on ; uint32_t tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 5U) { return; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802828L, 1); __ret_warn_on = tmp != 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3075, "WARN_ON(I915_READ(SDEIER) != 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802828L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802828L, 0); return; } } static void gen5_gt_irq_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278548L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278548L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278556L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278552L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278552L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278552L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278552L, 0); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278564L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278564L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278572L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278568L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278568L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 0); } else { } return; } } static void ironlake_irq_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8344L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278532L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278532L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278540L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278536L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278536L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278536L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278536L, 0); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 7U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278592L, 4294967295U, 1); } else { } gen5_gt_irq_reset(dev); ibx_irq_reset(dev); return; } } static void vlv_display_irq_reset(struct drm_i915_private *dev_priv ) { enum pipe pipe ; uint32_t tmp ; struct drm_i915_private *__p ; { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), tmp, 1); pipe = 0; goto ldv_49499; ldv_49498: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 65535U, 1); pipe = (enum pipe )((int )pipe + 1); ldv_49499: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_49498; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581224L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581224L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581216L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581220L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581220L, 0); return; } } static void valleyview_irq_preinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581224L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8360L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 73896L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 139432L, 0U, 1); gen5_gt_irq_reset(dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031660L, 255U, 1); vlv_display_irq_reset(dev_priv); return; } } static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv ) { { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279300L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279300L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279308L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279304L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279304L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279304L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279304L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279316L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279316L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279324L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279320L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279320L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279320L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279320L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279332L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279332L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279340L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279336L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279336L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279336L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279336L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279348L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279348L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279356L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279352L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279352L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279352L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279352L, 0); return; } } static void gen8_irq_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int pipe ; bool tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279040L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279040L, 0); gen8_gt_irq_reset(dev_priv); pipe = 0; goto ldv_49520; ldv_49519: tmp = intel_display_power_is_enabled(dev_priv, (enum intel_display_power_domain )pipe); if ((int )tmp) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279556), 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279556), 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279564), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279560), 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279560), 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279560), 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279560), 0); } else { } pipe = pipe + 1; ldv_49520: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_49519; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279620L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279620L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279628L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279624L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279624L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279624L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279624L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279652L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279652L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279660L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279656L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279656L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279656L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279656L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279780L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279780L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279788L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279784L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279784L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279784L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279784L, 0); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 0U) { ibx_irq_reset(dev); } else { } return; } } void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv , unsigned int pipe_mask ) { uint32_t extra_ier ; u32 val ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; u32 val___0 ; uint32_t tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; u32 val___1 ; uint32_t tmp___3 ; int __ret_warn_on___1 ; long tmp___4 ; { extra_ier = 2147483649U; spin_lock_irq(& dev_priv->irq_lock); if ((int )pipe_mask & 1) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279560L, 1); val = tmp; if (val != 0U) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3176, "Interrupt register 0x%x is not zero: 0x%08x\n", 279560, val); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279560L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279560L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279560L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279560L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279564L, ~ dev_priv->__annonCompField82.de_irq_mask[0] | extra_ier, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279556L, dev_priv->__annonCompField82.de_irq_mask[0], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279556L, 0); } else { } if ((pipe_mask & 2U) != 0U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279576L, 1); val___0 = tmp___1; if (val___0 != 0U) { __ret_warn_on___0 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3180, "Interrupt register 0x%x is not zero: 0x%08x\n", 279576, val___0); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279576L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279576L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279576L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279576L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279580L, ~ dev_priv->__annonCompField82.de_irq_mask[1] | extra_ier, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279572L, dev_priv->__annonCompField82.de_irq_mask[1], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279572L, 0); } else { } if ((pipe_mask & 4U) != 0U) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279592L, 1); val___1 = tmp___3; if (val___1 != 0U) { __ret_warn_on___1 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3184, "Interrupt register 0x%x is not zero: 0x%08x\n", 279592, val___1); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279592L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279592L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279592L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279592L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279596L, ~ dev_priv->__annonCompField82.de_irq_mask[2] | extra_ier, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279588L, dev_priv->__annonCompField82.de_irq_mask[2], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279588L, 0); } else { } spin_unlock_irq(& dev_priv->irq_lock); return; } } static void cherryview_irq_preinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279040L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279040L, 0); gen8_gt_irq_reset(dev_priv); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279780L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279780L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279788L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279784L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279784L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279784L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279784L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2031660L, 4095U, 1); vlv_display_irq_reset(dev_priv); return; } } static void ibx_hpd_irq_setup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_encoder *intel_encoder ; u32 hotplug_irqs ; u32 hotplug ; u32 enabled_irqs ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; enabled_irqs = 0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U) { hotplug_irqs = 3904U; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_49565; ldv_49564: ; if ((unsigned int )dev_priv->hpd_stats[(unsigned int )intel_encoder->hpd_pin].hpd_mark == 0U) { enabled_irqs = (u32 )hpd_ibx[(unsigned int )intel_encoder->hpd_pin] | enabled_irqs; } else { } __mptr___0 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49565: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_49564; } else { } } else { hotplug_irqs = 15466496U; __mptr___1 = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; goto ldv_49572; ldv_49571: ; if ((unsigned int )dev_priv->hpd_stats[(unsigned int )intel_encoder->hpd_pin].hpd_mark == 0U) { enabled_irqs = (u32 )hpd_cpt[(unsigned int )intel_encoder->hpd_pin] | enabled_irqs; } else { } __mptr___2 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; ldv_49572: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_49571; } else { } } ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); hotplug = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802864L, 1); hotplug = hotplug & 4294177779U; hotplug = hotplug | 1048576U; hotplug = hotplug | 4096U; hotplug = hotplug | 16U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802864L, hotplug, 1); return; } } static void bxt_hpd_irq_setup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_encoder *intel_encoder ; u32 hotplug_port ; u32 hotplug_ctrl ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; uint32_t tmp ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; hotplug_port = 0U; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_49586; ldv_49585: ; if ((unsigned int )dev_priv->hpd_stats[(unsigned int )intel_encoder->hpd_pin].hpd_mark == 0U) { hotplug_port = (u32 )hpd_bxt[(unsigned int )intel_encoder->hpd_pin] | hotplug_port; } else { } __mptr___0 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49586: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_49585; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802864L, 1); hotplug_ctrl = tmp & 4026527727U; __ret_warn_once = (hotplug_port & 8U) != 0U; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3257, "WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); if ((hotplug_port & 16U) != 0U) { hotplug_ctrl = hotplug_ctrl | 16U; } else { } if ((hotplug_port & 32U) != 0U) { hotplug_ctrl = hotplug_ctrl | 4096U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802864L, hotplug_ctrl, 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279620L, 1); hotplug_ctrl = tmp___3 & ~ hotplug_port; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279620L, hotplug_ctrl, 1); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279628L, 1); hotplug_ctrl = tmp___4 | hotplug_port; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279628L, hotplug_ctrl, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279628L, 0); return; } } static void ibx_irq_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 mask ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; u32 val ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 5U) { return; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 1U) { mask = 17358848U; } else { mask = 235012096U; } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802824L, 1); val = tmp; if (val != 0U) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3287, "Interrupt register 0x%x is not zero: 0x%08x\n", 802824, val); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802824L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802824L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802824L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802824L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802820L, ~ mask, 1); return; } } static void gen5_gt_irq_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 pm_irqs ; u32 gt_irqs ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; u32 val ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p___4 ; u32 val___0 ; uint32_t tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; struct drm_i915_private *__p___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; gt_irqs = 0U; pm_irqs = gt_irqs; dev_priv->gt_irq_mask = 4294967295U; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p = to_i915((struct drm_device const *)dev); dev_priv->gt_irq_mask = (unsigned int )*((unsigned char *)__p + 45UL) != 0U ? 4294965215U : 4294967263U; __p___0 = to_i915((struct drm_device const *)dev); gt_irqs = ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U ? 2080U : 32U) | gt_irqs; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { __p = to_i915((struct drm_device const *)dev); dev_priv->gt_irq_mask = (unsigned int )*((unsigned char *)__p + 45UL) != 0U ? 4294965215U : 4294967263U; __p___0 = to_i915((struct drm_device const *)dev); gt_irqs = ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U ? 2080U : 32U) | gt_irqs; } else { } } gt_irqs = gt_irqs | 1U; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 5U) { gt_irqs = gt_irqs | 48U; } else { gt_irqs = gt_irqs | 4198400U; } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278552L, 1); val = tmp; if (val != 0U) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3313, "Interrupt register 0x%x is not zero: 0x%08x\n", 278552, val); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278552L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278552L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278552L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278552L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278556L, gt_irqs, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278548L, dev_priv->gt_irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278548L, 0); __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) > 5U) { __p___4 = to_i915((struct drm_device const *)dev); if (((int )__p___4->info.ring_mask & 8) != 0) { pm_irqs = pm_irqs | 1024U; } else { } dev_priv->pm_irq_mask = 4294967295U; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 1); val___0 = tmp___1; if (val___0 != 0U) { __ret_warn_on___0 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3324, "Interrupt register 0x%x is not zero: 0x%08x\n", 278568, val___0); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278568L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278568L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278568L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278572L, pm_irqs, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278564L, dev_priv->pm_irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278564L, 0); } else { } return; } } static int ironlake_irq_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 display_mask ; u32 extra_mask ; struct drm_i915_private *__p ; u32 val ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 6U) { display_mask = 3019907336U; extra_mask = 1073742881U; } else { display_mask = 2360607748U; extra_mask = 33587585U; } dev_priv->__annonCompField82.irq_mask = ~ display_mask; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8344L, 61438U, 1); ibx_irq_pre_postinstall(dev); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278536L, 1); val = tmp; if (val != 0U) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3356, "Interrupt register 0x%x is not zero: 0x%08x\n", 278536, val); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278536L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278536L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278536L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278536L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278540L, display_mask | extra_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278532L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278532L, 0); gen5_gt_irq_postinstall(dev); ibx_irq_postinstall(dev); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 70U) { spin_lock_irq(& dev_priv->irq_lock); ironlake_enable_display_irq(dev_priv, 33554432U); spin_unlock_irq(& dev_priv->irq_lock); } else { } return (0); } } static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv ) { u32 pipestat_mask ; u32 iir_mask ; enum pipe pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { pipestat_mask = 2147549183U; pipe = 0; goto ldv_49701; ldv_49700: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), pipestat_mask, 1); pipe = (enum pipe )((int )pipe + 1); ldv_49701: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_49700; } else { } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458788U), 0); pipestat_mask = 5120U; i915_enable_pipestat(dev_priv, 0, 2048U); pipe = 0; goto ldv_49710; ldv_49709: i915_enable_pipestat(dev_priv, pipe, pipestat_mask); pipe = (enum pipe )((int )pipe + 1); ldv_49710: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > (int )pipe) { goto ldv_49709; } else { } iir_mask = 131152U; __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { iir_mask = iir_mask | 512U; } else { } } else { } dev_priv->__annonCompField82.irq_mask = dev_priv->__annonCompField82.irq_mask & ~ iir_mask; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, iir_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, iir_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581216L, ~ dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581224L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581224L, 0); return; } } static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv ) { u32 pipestat_mask ; u32 iir_mask ; enum pipe pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { iir_mask = 131152U; __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { iir_mask = iir_mask | 512U; } else { } } else { } dev_priv->__annonCompField82.irq_mask = dev_priv->__annonCompField82.irq_mask | iir_mask; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581224L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581216L, ~ dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, iir_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, iir_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581220L, 0); pipestat_mask = 5120U; i915_disable_pipestat(dev_priv, 0, 2048U); pipe = 0; goto ldv_49749; ldv_49748: i915_disable_pipestat(dev_priv, pipe, pipestat_mask); pipe = (enum pipe )((int )pipe + 1); ldv_49749: __p___1 = dev_priv; if ((int )__p___1->info.num_pipes > (int )pipe) { goto ldv_49748; } else { } pipestat_mask = 2147549183U; pipe = 0; goto ldv_49758; ldv_49757: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), pipestat_mask, 1); pipe = (enum pipe )((int )pipe + 1); ldv_49758: __p___2 = dev_priv; if ((int )__p___2->info.num_pipes > (int )pipe) { goto ldv_49757; } else { } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 458788U), 0); return; } } void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv ) { int tmp ; long tmp___0 ; bool tmp___1 ; { tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (3446), "i" (12UL)); ldv_49763: ; goto ldv_49763; } else { } if ((int )dev_priv->display_irqs_enabled) { return; } else { } dev_priv->display_irqs_enabled = 1; tmp___1 = intel_irqs_enabled(dev_priv); if ((int )tmp___1) { valleyview_display_irqs_install(dev_priv); } else { } return; } } void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv ) { int tmp ; long tmp___0 ; bool tmp___1 ; { tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (3459), "i" (12UL)); ldv_49767: ; goto ldv_49767; } else { } if (! dev_priv->display_irqs_enabled) { return; } else { } dev_priv->display_irqs_enabled = 0; tmp___1 = intel_irqs_enabled(dev_priv); if ((int )tmp___1) { valleyview_display_irqs_uninstall(dev_priv); } else { } return; } } static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv ) { { dev_priv->__annonCompField82.irq_mask = 4294967295U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581220L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581216L, ~ dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581224L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581224L, 0); spin_lock_irq(& dev_priv->irq_lock); if ((int )dev_priv->display_irqs_enabled) { valleyview_display_irqs_install(dev_priv); } else { } spin_unlock_irq(& dev_priv->irq_lock); return; } } static int valleyview_irq_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; vlv_display_irq_postinstall(dev_priv); gen5_gt_irq_postinstall(dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278540L, 2147483648U, 1); return (0); } } static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv ) { uint32_t gt_interrupts[4U] ; u32 val ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; u32 val___0 ; uint32_t tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; u32 val___1 ; uint32_t tmp___3 ; int __ret_warn_on___1 ; long tmp___4 ; u32 val___2 ; uint32_t tmp___5 ; int __ret_warn_on___2 ; long tmp___6 ; { gt_interrupts[0] = 16843041U; gt_interrupts[1] = 16843009U; gt_interrupts[2] = 0U; gt_interrupts[3] = 257U; dev_priv->pm_irq_mask = 4294967295U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279304L, 1); val = tmp; if (val != 0U) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3529, "Interrupt register 0x%x is not zero: 0x%08x\n", 279304, val); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279304L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279304L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279304L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279304L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279308L, gt_interrupts[0], 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279300L, ~ gt_interrupts[0], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279300L, 0); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279320L, 1); val___0 = tmp___1; if (val___0 != 0U) { __ret_warn_on___0 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3530, "Interrupt register 0x%x is not zero: 0x%08x\n", 279320, val___0); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279320L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279320L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279320L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279320L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279324L, gt_interrupts[1], 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279316L, ~ gt_interrupts[1], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279316L, 0); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279336L, 1); val___1 = tmp___3; if (val___1 != 0U) { __ret_warn_on___1 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3535, "Interrupt register 0x%x is not zero: 0x%08x\n", 279336, val___1); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279336L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279336L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279336L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279336L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279340L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279332L, dev_priv->pm_irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279332L, 0); tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279352L, 1); val___2 = tmp___5; if (val___2 != 0U) { __ret_warn_on___2 = 1; tmp___6 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3536, "Interrupt register 0x%x is not zero: 0x%08x\n", 279352, val___2); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279352L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279352L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279352L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279352L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279356L, gt_interrupts[3], 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279348L, ~ gt_interrupts[3], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279348L, 0); return; } } static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv ) { uint32_t de_pipe_masked ; uint32_t de_pipe_enables ; int pipe ; u32 de_port_en ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; u32 val ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; bool tmp___1 ; struct drm_i915_private *__p___2 ; u32 val___0 ; uint32_t tmp___2 ; int __ret_warn_on___0 ; long tmp___3 ; { de_pipe_masked = 268435456U; de_port_en = 1U; __p___1 = dev_priv; if ((unsigned int )((unsigned char )__p___1->info.gen) == 9U) { de_pipe_masked = de_pipe_masked | 3976U; de_port_en = de_port_en | 234881024U; __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { de_port_en = de_port_en | 2U; } else { } } else { } } else { de_pipe_masked = de_pipe_masked | 1808U; } de_pipe_enables = de_pipe_masked | 2147483649U; dev_priv->__annonCompField82.de_irq_mask[0] = ~ de_pipe_masked; dev_priv->__annonCompField82.de_irq_mask[1] = ~ de_pipe_masked; dev_priv->__annonCompField82.de_irq_mask[2] = ~ de_pipe_masked; pipe = 0; goto ldv_49826; ldv_49825: tmp___1 = intel_display_power_is_enabled(dev_priv, (enum intel_display_power_domain )pipe); if ((int )tmp___1) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279560), 1); val = tmp; if (val != 0U) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3570, "Interrupt register 0x%x is not zero: 0x%08x\n", pipe * 16 + 279560, val); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279560), 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279560), 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279560), 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279560), 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279564), de_pipe_enables, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 16 + 279556), dev_priv->__annonCompField82.de_irq_mask[pipe], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 16 + 279556), 0); } else { } pipe = pipe + 1; ldv_49826: __p___2 = dev_priv; if ((int )__p___2->info.num_pipes > pipe) { goto ldv_49825; } else { } tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279624L, 1); val___0 = tmp___2; if (val___0 != 0U) { __ret_warn_on___0 = 1; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c", 3572, "Interrupt register 0x%x is not zero: 0x%08x\n", 279624, val___0); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279624L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279624L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279624L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279624L, 0); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279628L, de_port_en, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279620L, ~ de_port_en, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279620L, 0); return; } } static int gen8_irq_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { ibx_irq_pre_postinstall(dev); } else { } gen8_gt_irq_postinstall(dev_priv); gen8_de_irq_postinstall(dev_priv); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 0U) { ibx_irq_postinstall(dev); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279040L, 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279040L, 0); return (0); } } static int cherryview_irq_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; vlv_display_irq_postinstall(dev_priv); gen8_gt_irq_postinstall(dev_priv); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279040L, 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279040L, 0); return (0); } } static void gen8_irq_uninstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv == (unsigned long )((struct drm_i915_private *)0)) { return; } else { } gen8_irq_reset(dev); return; } } static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv ) { { spin_lock_irq(& dev_priv->irq_lock); if ((int )dev_priv->display_irqs_enabled) { valleyview_display_irqs_uninstall(dev_priv); } else { } spin_unlock_irq(& dev_priv->irq_lock); vlv_display_irq_reset(dev_priv); dev_priv->__annonCompField82.irq_mask = 4294967295U; return; } } static void valleyview_irq_uninstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv == (unsigned long )((struct drm_i915_private *)0)) { return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278540L, 0U, 1); gen5_gt_irq_reset(dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8344L, 4294967295U, 1); vlv_display_irq_uninstall(dev_priv); return; } } static void cherryview_irq_uninstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv == (unsigned long )((struct drm_i915_private *)0)) { return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279040L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279040L, 0); gen8_gt_irq_reset(dev_priv); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279780L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279780L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279788L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279784L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279784L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 279784L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 279784L, 0); vlv_display_irq_uninstall(dev_priv); return; } } static void ironlake_irq_uninstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv == (unsigned long )((struct drm_i915_private *)0)) { return; } else { } ironlake_irq_reset(dev); return; } } static void i8xx_irq_preinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int pipe ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = 0; goto ldv_49882; ldv_49881: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 0U, 1); pipe = pipe + 1; ldv_49882: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_49881; } else { } (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8360L, 65535, 1); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8352L, 0, 1); (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 8352L, 0); return; } } static int i8xx_irq_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8372L, 65517, 1); dev_priv->__annonCompField82.irq_mask = 4294964143U; (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8360L, (int )((uint16_t )dev_priv->__annonCompField82.irq_mask), 1); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8352L, 82, 1); (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 8352L, 0); spin_lock_irq(& dev_priv->irq_lock); i915_enable_pipestat(dev_priv, 0, 4096U); i915_enable_pipestat(dev_priv, 1, 4096U); spin_unlock_irq(& dev_priv->irq_lock); return (0); } } static bool i8xx_handle_vblank(struct drm_device *dev , int plane , int pipe , u32 iir ) { struct drm_i915_private *dev_priv ; u16 flip_pending ; bool tmp ; int tmp___0 ; uint16_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; flip_pending = (u16 )(1 << (11 - plane)); tmp = intel_pipe_handle_vblank(dev, (enum pipe )pipe); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } if (((u32 )flip_pending & iir) == 0U) { goto check_page_flip; } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 8364L, 1); if ((unsigned int )((int )tmp___1 & (int )flip_pending) != 0U) { goto check_page_flip; } else { } intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return (1); check_page_flip: intel_check_page_flip(dev, pipe); return (0); } } static irqreturn_t i8xx_irq_handler(int irq , void *arg ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u16 iir ; u16 new_iir ; u32 pipe_stats[2U] ; int pipe ; u16 flip_mask ; bool tmp ; int tmp___0 ; long tmp___1 ; int reg ; struct drm_i915_private *__p ; int plane ; struct drm_i915_private *__p___0 ; bool tmp___2 ; struct drm_i915_private *__p___1 ; { dev = (struct drm_device *)arg; dev_priv = (struct drm_i915_private *)dev->dev_private; flip_mask = 3072U; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } iir = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 8356L, 1); if ((unsigned int )iir == 0U) { return (0); } else { } goto ldv_49936; ldv_49935: spin_lock(& dev_priv->irq_lock); if ((int )((short )iir) < 0) { tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i8xx_irq_handler", "Command parser error, iir 0x%08x\n", (int )iir); } else { } } else { } pipe = 0; goto ldv_49917; ldv_49916: reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U); pipe_stats[pipe] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((pipe_stats[pipe] & 2147549183U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, pipe_stats[pipe], 1); } else { } pipe = pipe + 1; ldv_49917: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_49916; } else { } spin_unlock(& dev_priv->irq_lock); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8356L, (int )((uint16_t )(~ ((int )((short )flip_mask)) & (int )((short )iir))), 1); new_iir = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 8356L, 1); if (((int )iir & 2) != 0) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring)); } else { } pipe = 0; goto ldv_49933; ldv_49932: plane = pipe; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { plane = plane == 0; } else { } if (((unsigned long )pipe_stats[pipe] & 2UL) != 0UL) { tmp___2 = i8xx_handle_vblank(dev, plane, pipe, (u32 )iir); if ((int )tmp___2) { flip_mask = (u16 )(~ ((int )((short )(1 << (11 - plane)))) & (int )((short )flip_mask)); } else { } } else { } if (((unsigned long )pipe_stats[pipe] & 4096UL) != 0UL) { i9xx_pipe_crc_irq_handler(dev, (enum pipe )pipe); } else { } if ((int )pipe_stats[pipe] < 0) { intel_cpu_fifo_underrun_irq_handler(dev_priv, (enum pipe )pipe); } else { } pipe = pipe + 1; ldv_49933: __p___1 = dev_priv; if ((int )__p___1->info.num_pipes > pipe) { goto ldv_49932; } else { } iir = new_iir; ldv_49936: ; if (((int )iir & ~ ((int )flip_mask)) != 0) { goto ldv_49935; } else { } return (1); } } static void i8xx_irq_uninstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int pipe ; uint32_t tmp ; struct drm_i915_private *__p ; uint16_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = 0; goto ldv_49950; ldv_49949: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 0U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), tmp, 1); pipe = pipe + 1; ldv_49950: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_49949; } else { } (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8360L, 65535, 1); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8352L, 0, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 8356L, 1); (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8356L, (int )tmp___0, 1); return; } } static void i915_irq_preinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int pipe ; uint32_t tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), tmp, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8344L, 61438, 1); pipe = 0; goto ldv_49970; ldv_49969: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 0U, 1); pipe = pipe + 1; ldv_49970: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > pipe) { goto ldv_49969; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8360L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 0); return; } } static int i915_irq_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 enable_mask ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8372L, 4294967277U, 1); dev_priv->__annonCompField82.irq_mask = 4294964142U; enable_mask = 83U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0); enable_mask = enable_mask | 131072U; dev_priv->__annonCompField82.irq_mask = dev_priv->__annonCompField82.irq_mask & 4294836223U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8360L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, enable_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 0); i915_enable_asle_pipestat(dev); spin_lock_irq(& dev_priv->irq_lock); i915_enable_pipestat(dev_priv, 0, 4096U); i915_enable_pipestat(dev_priv, 1, 4096U); spin_unlock_irq(& dev_priv->irq_lock); return (0); } } static bool i915_handle_vblank(struct drm_device *dev , int plane , int pipe , u32 iir ) { struct drm_i915_private *dev_priv ; u32 flip_pending ; bool tmp ; int tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; flip_pending = (u32 )(1 << (11 - plane)); tmp = intel_pipe_handle_vblank(dev, (enum pipe )pipe); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } if ((iir & flip_pending) == 0U) { goto check_page_flip; } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8364L, 1); if ((tmp___1 & flip_pending) != 0U) { goto check_page_flip; } else { } intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return (1); check_page_flip: intel_check_page_flip(dev, pipe); return (0); } } static irqreturn_t i915_irq_handler(int irq , void *arg ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 iir ; u32 new_iir ; u32 pipe_stats[3U] ; u32 flip_mask ; int pipe ; int ret ; bool tmp ; int tmp___0 ; bool irq_received ; bool blc_event ; long tmp___1 ; int reg ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int plane ; struct drm_i915_private *__p___1 ; bool tmp___2 ; struct drm_i915_private *__p___2 ; { dev = (struct drm_device *)arg; dev_priv = (struct drm_i915_private *)dev->dev_private; flip_mask = 3072U; ret = 0; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8356L, 1); ldv_50040: irq_received = (~ flip_mask & iir) != 0U; blc_event = 0; spin_lock(& dev_priv->irq_lock); if ((iir & 32768U) != 0U) { tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i915_irq_handler", "Command parser error, iir 0x%08x\n", iir); } else { } } else { } pipe = 0; goto ldv_50015; ldv_50014: reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U); pipe_stats[pipe] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((pipe_stats[pipe] & 2147549183U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, pipe_stats[pipe], 1); irq_received = 1; } else { } pipe = pipe + 1; ldv_50015: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_50014; } else { } spin_unlock(& dev_priv->irq_lock); if (! irq_received) { goto ldv_50017; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) != 0U && (iir & 131072U) != 0U) { i9xx_hpd_irq_handler(dev); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8356L, ~ flip_mask & iir, 1); new_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8356L, 1); if ((iir & 2U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring)); } else { } pipe = 0; goto ldv_50038; ldv_50037: plane = pipe; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { plane = plane == 0; } else { } if (((unsigned long )pipe_stats[pipe] & 2UL) != 0UL) { tmp___2 = i915_handle_vblank(dev, plane, pipe, iir); if ((int )tmp___2) { flip_mask = (u32 )(~ (1 << (11 - plane))) & flip_mask; } else { } } else { } if (((unsigned long )pipe_stats[pipe] & 64UL) != 0UL) { blc_event = 1; } else { } if (((unsigned long )pipe_stats[pipe] & 4096UL) != 0UL) { i9xx_pipe_crc_irq_handler(dev, (enum pipe )pipe); } else { } if ((int )pipe_stats[pipe] < 0) { intel_cpu_fifo_underrun_irq_handler(dev_priv, (enum pipe )pipe); } else { } pipe = pipe + 1; ldv_50038: __p___2 = dev_priv; if ((int )__p___2->info.num_pipes > pipe) { goto ldv_50037; } else { } if ((int )blc_event || (int )iir & 1) { intel_opregion_asle_intr(dev); } else { } ret = 1; iir = new_iir; if ((~ flip_mask & iir) != 0U) { goto ldv_50040; } else { } ldv_50017: ; return ((irqreturn_t )ret); } } static void i915_irq_uninstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int pipe ; uint32_t tmp ; struct drm_i915_private *__p ; uint32_t tmp___0 ; struct drm_i915_private *__p___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), tmp, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8344L, 65535, 1); pipe = 0; goto ldv_50059; ldv_50058: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 0U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), tmp___0, 1); pipe = pipe + 1; ldv_50059: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > pipe) { goto ldv_50058; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8360L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, 0U, 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8356L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8356L, tmp___1, 1); return; } } static void i965_irq_preinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int pipe ; uint32_t tmp ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), tmp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8344L, 61438U, 1); pipe = 0; goto ldv_50073; ldv_50072: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 0U, 1); pipe = pipe + 1; ldv_50073: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_50072; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8360L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 0); return; } } static int i965_irq_postinstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 enable_mask ; u32 error_mask ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->__annonCompField82.irq_mask = 4294800302U; enable_mask = ~ dev_priv->__annonCompField82.irq_mask; enable_mask = enable_mask & 4294964223U; enable_mask = enable_mask | 2U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { enable_mask = enable_mask | 33554432U; } else { } spin_lock_irq(& dev_priv->irq_lock); i915_enable_pipestat(dev_priv, 0, 2048U); i915_enable_pipestat(dev_priv, 0, 4096U); i915_enable_pipestat(dev_priv, 1, 4096U); spin_unlock_irq(& dev_priv->irq_lock); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { error_mask = 4294967237U; } else { error_mask = 4294967277U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8372L, error_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8360L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, enable_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8352L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0); i915_enable_asle_pipestat(dev); return (0); } } static void i915_hpd_irq_setup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_encoder *intel_encoder ; u32 hotplug_en ; int tmp ; long tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_irq.c"), "i" (4133), "i" (12UL)); ldv_50099: ; goto ldv_50099; } else { } hotplug_en = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 1); hotplug_en = hotplug_en & 3254779391U; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_50105; ldv_50104: ; if ((unsigned int )dev_priv->hpd_stats[(unsigned int )intel_encoder->hpd_pin].hpd_mark == 0U) { hotplug_en = (u32 )hpd_mask_i915[(unsigned int )intel_encoder->hpd_pin] | hotplug_en; } else { } __mptr___0 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_50105: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_50104; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { hotplug_en = hotplug_en | 256U; } else { } hotplug_en = hotplug_en & 4294967199U; hotplug_en = hotplug_en | 32U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), hotplug_en, 1); return; } } static irqreturn_t i965_irq_handler(int irq , void *arg ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 iir ; u32 new_iir ; u32 pipe_stats[3U] ; int ret ; int pipe ; u32 flip_mask ; bool tmp ; int tmp___0 ; bool irq_received ; bool blc_event ; long tmp___1 ; int reg ; struct drm_i915_private *__p ; bool tmp___2 ; struct drm_i915_private *__p___0 ; { dev = (struct drm_device *)arg; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; flip_mask = 3072U; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8356L, 1); ldv_50148: irq_received = (~ flip_mask & iir) != 0U; blc_event = 0; spin_lock(& dev_priv->irq_lock); if ((iir & 32768U) != 0U) { tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i965_irq_handler", "Command parser error, iir 0x%08x\n", iir); } else { } } else { } pipe = 0; goto ldv_50136; ldv_50135: reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U); pipe_stats[pipe] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((pipe_stats[pipe] & 2147549183U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, pipe_stats[pipe], 1); irq_received = 1; } else { } pipe = pipe + 1; ldv_50136: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_50135; } else { } spin_unlock(& dev_priv->irq_lock); if (! irq_received) { goto ldv_50138; } else { } ret = 1; if ((iir & 131072U) != 0U) { i9xx_hpd_irq_handler(dev); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8356L, ~ flip_mask & iir, 1); new_iir = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8356L, 1); if ((iir & 2U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring)); } else { } if ((iir & 33554432U) != 0U) { notify_ring((struct intel_engine_cs *)(& dev_priv->ring) + 1UL); } else { } pipe = 0; goto ldv_50146; ldv_50145: ; if (((unsigned long )pipe_stats[pipe] & 4UL) != 0UL) { tmp___2 = i915_handle_vblank(dev, pipe, pipe, iir); if ((int )tmp___2) { flip_mask = (u32 )(~ (1 << (11 - pipe))) & flip_mask; } else { } } else { } if (((unsigned long )pipe_stats[pipe] & 64UL) != 0UL) { blc_event = 1; } else { } if (((unsigned long )pipe_stats[pipe] & 4096UL) != 0UL) { i9xx_pipe_crc_irq_handler(dev, (enum pipe )pipe); } else { } if ((int )pipe_stats[pipe] < 0) { intel_cpu_fifo_underrun_irq_handler(dev_priv, (enum pipe )pipe); } else { } pipe = pipe + 1; ldv_50146: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > pipe) { goto ldv_50145; } else { } if ((int )blc_event || (int )iir & 1) { intel_opregion_asle_intr(dev); } else { } if (((unsigned long )pipe_stats[0] & 2048UL) != 0UL) { gmbus_irq_handler(dev); } else { } iir = new_iir; goto ldv_50148; ldv_50138: ; return ((irqreturn_t )ret); } } static void i965_irq_uninstall(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int pipe ; uint32_t tmp ; struct drm_i915_private *__p ; uint32_t tmp___0 ; struct drm_i915_private *__p___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv == (unsigned long )((struct drm_i915_private *)0)) { return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 0U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), tmp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8344L, 4294967295U, 1); pipe = 0; goto ldv_50161; ldv_50160: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 0U, 1); pipe = pipe + 1; ldv_50161: __p = dev_priv; if ((int )__p->info.num_pipes > pipe) { goto ldv_50160; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8360L, 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8352L, 0U, 1); pipe = 0; goto ldv_50170; ldv_50169: tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), tmp___0 & 2147549183U, 1); pipe = pipe + 1; ldv_50170: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > pipe) { goto ldv_50169; } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8356L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8356L, tmp___1, 1); return; } } static void intel_hpd_irq_reenable_work(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; struct drm_device *dev ; struct drm_mode_config *mode_config ; int i ; struct drm_connector *connector ; struct list_head const *__mptr___0 ; struct intel_connector *intel_connector ; struct drm_connector const *__mptr___1 ; long tmp ; struct list_head const *__mptr___2 ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff6260UL; dev = dev_priv->dev; mode_config = & dev->mode_config; intel_runtime_pm_get(dev_priv); spin_lock_irq(& dev_priv->irq_lock); i = 1; goto ldv_50195; ldv_50194: ; if ((unsigned int )dev_priv->hpd_stats[i].hpd_mark != 1U) { goto ldv_50182; } else { } dev_priv->hpd_stats[i].hpd_mark = 0; __mptr___0 = (struct list_head const *)mode_config->connector_list.next; connector = (struct drm_connector *)__mptr___0 + 0xffffffffffffffe8UL; goto ldv_50192; ldv_50191: __mptr___1 = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr___1; if ((unsigned int )(intel_connector->encoder)->hpd_pin == (unsigned int )i) { if ((int )connector->polled != (int )intel_connector->polled) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_hpd_irq_reenable_work", "Reenabling HPD on connector %s\n", connector->name); } else { } } else { } connector->polled = intel_connector->polled; if ((unsigned int )connector->polled == 0U) { connector->polled = 1U; } else { } } else { } __mptr___2 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___2 + 0xffffffffffffffe8UL; ldv_50192: ; if ((unsigned long )(& connector->head) != (unsigned long )(& mode_config->connector_list)) { goto ldv_50191; } else { } ldv_50182: i = i + 1; ldv_50195: ; if (i <= 6) { goto ldv_50194; } else { } if ((unsigned long )dev_priv->display.hpd_irq_setup != (unsigned long )((void (*)(struct drm_device * ))0)) { (*(dev_priv->display.hpd_irq_setup))(dev); } else { } spin_unlock_irq(& dev_priv->irq_lock); intel_runtime_pm_put(dev_priv); return; } } void intel_irq_init(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_1 ; struct lock_class_key __key___1 ; atomic_long_t __constr_expr_2 ; struct lock_class_key __key___2 ; atomic_long_t __constr_expr_3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct lock_class_key __key___3 ; atomic_long_t __constr_expr_4 ; struct lock_class_key __key___4 ; struct lock_class_key __key___5 ; atomic_long_t __constr_expr_5 ; struct lock_class_key __key___6 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; struct drm_i915_private *__p___12 ; struct drm_i915_private *__p___13 ; struct drm_i915_private *__p___14 ; { dev = dev_priv->dev; __init_work(& dev_priv->hotplug_work, 0); __constr_expr_0___0.counter = 137438953408L; dev_priv->hotplug_work.data = __constr_expr_0___0; lockdep_init_map(& dev_priv->hotplug_work.lockdep_map, "(&dev_priv->hotplug_work)", & __key, 0); INIT_LIST_HEAD(& dev_priv->hotplug_work.entry); dev_priv->hotplug_work.func = & i915_hotplug_work_func; __init_work(& dev_priv->dig_port_work, 0); __constr_expr_1.counter = 137438953408L; dev_priv->dig_port_work.data = __constr_expr_1; lockdep_init_map(& dev_priv->dig_port_work.lockdep_map, "(&dev_priv->dig_port_work)", & __key___0, 0); INIT_LIST_HEAD(& dev_priv->dig_port_work.entry); dev_priv->dig_port_work.func = & i915_digport_work_func; __init_work(& dev_priv->rps.work, 0); __constr_expr_2.counter = 137438953408L; dev_priv->rps.work.data = __constr_expr_2; lockdep_init_map(& dev_priv->rps.work.lockdep_map, "(&dev_priv->rps.work)", & __key___1, 0); INIT_LIST_HEAD(& dev_priv->rps.work.entry); dev_priv->rps.work.func = & gen6_pm_rps_work; __init_work(& dev_priv->l3_parity.error_work, 0); __constr_expr_3.counter = 137438953408L; dev_priv->l3_parity.error_work.data = __constr_expr_3; lockdep_init_map(& dev_priv->l3_parity.error_work.lockdep_map, "(&dev_priv->l3_parity.error_work)", & __key___2, 0); INIT_LIST_HEAD(& dev_priv->l3_parity.error_work.entry); dev_priv->l3_parity.error_work.func = & ivybridge_parity_work; __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = dev_priv; if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { dev_priv->pm_rps_events = 6U; } else { __p___1 = dev_priv; if ((unsigned int )((unsigned char )__p___1->info.gen) != 8U) { dev_priv->pm_rps_events = 6U; } else { dev_priv->pm_rps_events = 112U; } } } else { dev_priv->pm_rps_events = 112U; } __init_work(& dev_priv->gpu_error.hangcheck_work.work, 0); __constr_expr_4.counter = 137438953408L; dev_priv->gpu_error.hangcheck_work.work.data = __constr_expr_4; lockdep_init_map(& dev_priv->gpu_error.hangcheck_work.work.lockdep_map, "(&(&dev_priv->gpu_error.hangcheck_work)->work)", & __key___3, 0); INIT_LIST_HEAD(& dev_priv->gpu_error.hangcheck_work.work.entry); dev_priv->gpu_error.hangcheck_work.work.func = & i915_hangcheck_elapsed; init_timer_key(& dev_priv->gpu_error.hangcheck_work.timer, 2097152U, "(&(&dev_priv->gpu_error.hangcheck_work)->timer)", & __key___4); dev_priv->gpu_error.hangcheck_work.timer.function = & delayed_work_timer_fn; dev_priv->gpu_error.hangcheck_work.timer.data = (unsigned long )(& dev_priv->gpu_error.hangcheck_work); __init_work(& dev_priv->hotplug_reenable_work.work, 0); __constr_expr_5.counter = 137438953408L; dev_priv->hotplug_reenable_work.work.data = __constr_expr_5; lockdep_init_map(& dev_priv->hotplug_reenable_work.work.lockdep_map, "(&(&dev_priv->hotplug_reenable_work)->work)", & __key___5, 0); INIT_LIST_HEAD(& dev_priv->hotplug_reenable_work.work.entry); dev_priv->hotplug_reenable_work.work.func = & intel_hpd_irq_reenable_work; init_timer_key(& dev_priv->hotplug_reenable_work.timer, 2097152U, "(&(&dev_priv->hotplug_reenable_work)->timer)", & __key___6); dev_priv->hotplug_reenable_work.timer.function = & delayed_work_timer_fn; dev_priv->hotplug_reenable_work.timer.data = (unsigned long )(& dev_priv->hotplug_reenable_work); pm_qos_add_request(& dev_priv->pm_qos, 1, -1); __p___4 = dev_priv; if ((unsigned int )((unsigned char )__p___4->info.gen) == 2U) { dev->max_vblank_count = 0U; (dev->driver)->get_vblank_counter = & i8xx_get_vblank_counter; } else { __p___2 = dev_priv; if ((unsigned int )*((unsigned char *)__p___2 + 44UL) != 0U) { dev->max_vblank_count = 4294967295U; (dev->driver)->get_vblank_counter = & gm45_get_vblank_counter; } else { __p___3 = dev_priv; if ((unsigned int )((unsigned char )__p___3->info.gen) > 4U) { dev->max_vblank_count = 4294967295U; (dev->driver)->get_vblank_counter = & gm45_get_vblank_counter; } else { (dev->driver)->get_vblank_counter = & i915_get_vblank_counter; dev->max_vblank_count = 16777215U; } } } __p___5 = dev_priv; if ((unsigned int )((unsigned char )__p___5->info.gen) != 2U) { dev->vblank_disable_immediate = 1; } else { } (dev->driver)->get_vblank_timestamp = & i915_get_vblank_timestamp; (dev->driver)->get_scanout_position = & i915_get_crtc_scanoutpos; __p___13 = dev_priv; if ((unsigned int )*((unsigned char *)__p___13 + 45UL) != 0U) { __p___14 = dev_priv; if ((unsigned int )((unsigned char )__p___14->info.gen) == 8U) { (dev->driver)->irq_handler = & cherryview_irq_handler; (dev->driver)->irq_preinstall = & cherryview_irq_preinstall; (dev->driver)->irq_postinstall = & cherryview_irq_postinstall; (dev->driver)->irq_uninstall = & cherryview_irq_uninstall; (dev->driver)->enable_vblank = & valleyview_enable_vblank; (dev->driver)->disable_vblank = & valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = & i915_hpd_irq_setup; } else { goto _L; } } else { _L: /* CIL Label */ __p___12 = dev_priv; if ((unsigned int )*((unsigned char *)__p___12 + 45UL) != 0U) { (dev->driver)->irq_handler = & valleyview_irq_handler; (dev->driver)->irq_preinstall = & valleyview_irq_preinstall; (dev->driver)->irq_postinstall = & valleyview_irq_postinstall; (dev->driver)->irq_uninstall = & valleyview_irq_uninstall; (dev->driver)->enable_vblank = & valleyview_enable_vblank; (dev->driver)->disable_vblank = & valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = & i915_hpd_irq_setup; } else { __p___11 = dev_priv; if ((unsigned int )((unsigned char )__p___11->info.gen) > 7U) { (dev->driver)->irq_handler = & gen8_irq_handler; (dev->driver)->irq_preinstall = & gen8_irq_reset; (dev->driver)->irq_postinstall = & gen8_irq_postinstall; (dev->driver)->irq_uninstall = & gen8_irq_uninstall; (dev->driver)->enable_vblank = & gen8_enable_vblank; (dev->driver)->disable_vblank = & gen8_disable_vblank; __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___6->pch_type != 0U) { dev_priv->display.hpd_irq_setup = & ibx_hpd_irq_setup; } else { dev_priv->display.hpd_irq_setup = & bxt_hpd_irq_setup; } } else { __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___10->pch_type != 0U) { (dev->driver)->irq_handler = & ironlake_irq_handler; (dev->driver)->irq_preinstall = & ironlake_irq_reset; (dev->driver)->irq_postinstall = & ironlake_irq_postinstall; (dev->driver)->irq_uninstall = & ironlake_irq_uninstall; (dev->driver)->enable_vblank = & ironlake_enable_vblank; (dev->driver)->disable_vblank = & ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = & ibx_hpd_irq_setup; } else { __p___8 = dev_priv; if ((unsigned int )((unsigned char )__p___8->info.gen) == 2U) { (dev->driver)->irq_preinstall = & i8xx_irq_preinstall; (dev->driver)->irq_postinstall = & i8xx_irq_postinstall; (dev->driver)->irq_handler = & i8xx_irq_handler; (dev->driver)->irq_uninstall = & i8xx_irq_uninstall; } else { __p___7 = dev_priv; if ((unsigned int )((unsigned char )__p___7->info.gen) == 3U) { (dev->driver)->irq_preinstall = & i915_irq_preinstall; (dev->driver)->irq_postinstall = & i915_irq_postinstall; (dev->driver)->irq_uninstall = & i915_irq_uninstall; (dev->driver)->irq_handler = & i915_irq_handler; } else { (dev->driver)->irq_preinstall = & i965_irq_preinstall; (dev->driver)->irq_postinstall = & i965_irq_postinstall; (dev->driver)->irq_uninstall = & i965_irq_uninstall; (dev->driver)->irq_handler = & i965_irq_handler; } } __p___9 = dev_priv; if ((unsigned int )*((unsigned char *)__p___9 + 46UL) != 0U) { dev_priv->display.hpd_irq_setup = & i915_hpd_irq_setup; } else { } (dev->driver)->enable_vblank = & i915_enable_vblank; (dev->driver)->disable_vblank = & i915_disable_vblank; } } } } return; } } void intel_hpd_init(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct drm_mode_config *mode_config ; struct drm_connector *connector ; int i ; struct list_head const *__mptr ; struct intel_connector *intel_connector ; struct drm_connector const *__mptr___0 ; struct drm_i915_private *__p ; struct list_head const *__mptr___1 ; { dev = dev_priv->dev; mode_config = & dev->mode_config; i = 1; goto ldv_50319; ldv_50318: dev_priv->hpd_stats[i].hpd_cnt = 0; dev_priv->hpd_stats[i].hpd_mark = 0; i = i + 1; ldv_50319: ; if (i <= 6) { goto ldv_50318; } else { } __mptr = (struct list_head const *)mode_config->connector_list.next; connector = (struct drm_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_50335; ldv_50334: __mptr___0 = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr___0; connector->polled = intel_connector->polled; if ((unsigned long )connector->encoder != (unsigned long )((struct drm_encoder *)0) && (unsigned int )connector->polled == 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { if ((unsigned int )(intel_connector->encoder)->hpd_pin != 0U) { connector->polled = 1U; } else { } } else { } } else { } if ((unsigned long )intel_connector->mst_port != (unsigned long )((struct intel_dp *)0)) { connector->polled = 1U; } else { } __mptr___1 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_50335: ; if ((unsigned long )(& connector->head) != (unsigned long )(& mode_config->connector_list)) { goto ldv_50334; } else { } spin_lock_irq(& dev_priv->irq_lock); if ((unsigned long )dev_priv->display.hpd_irq_setup != (unsigned long )((void (*)(struct drm_device * ))0)) { (*(dev_priv->display.hpd_irq_setup))(dev); } else { } spin_unlock_irq(& dev_priv->irq_lock); return; } } int intel_irq_install(struct drm_i915_private *dev_priv ) { int tmp ; { dev_priv->pm.irqs_enabled = 1; tmp = drm_irq_install(dev_priv->dev, (int )((dev_priv->dev)->pdev)->irq); return (tmp); } } void intel_irq_uninstall(struct drm_i915_private *dev_priv ) { { drm_irq_uninstall(dev_priv->dev); intel_hpd_cancel_work(dev_priv); dev_priv->pm.irqs_enabled = 0; return; } } void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv ) { { (*(((dev_priv->dev)->driver)->irq_uninstall))(dev_priv->dev); dev_priv->pm.irqs_enabled = 0; synchronize_irq((unsigned int )(dev_priv->dev)->irq); return; } } void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv ) { { dev_priv->pm.irqs_enabled = 1; (*(((dev_priv->dev)->driver)->irq_preinstall))(dev_priv->dev); (*(((dev_priv->dev)->driver)->irq_postinstall))(dev_priv->dev); return; } } void invoke_work_8(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_8_0 == 2 || ldv_work_8_0 == 3) { ldv_work_8_0 = 4; i915_digport_work_func(ldv_work_struct_8_0); ldv_work_8_0 = 1; } else { } goto ldv_50355; case 1: ; if (ldv_work_8_1 == 2 || ldv_work_8_1 == 3) { ldv_work_8_1 = 4; i915_digport_work_func(ldv_work_struct_8_0); ldv_work_8_1 = 1; } else { } goto ldv_50355; case 2: ; if (ldv_work_8_2 == 2 || ldv_work_8_2 == 3) { ldv_work_8_2 = 4; i915_digport_work_func(ldv_work_struct_8_0); ldv_work_8_2 = 1; } else { } goto ldv_50355; case 3: ; if (ldv_work_8_3 == 2 || ldv_work_8_3 == 3) { ldv_work_8_3 = 4; i915_digport_work_func(ldv_work_struct_8_0); ldv_work_8_3 = 1; } else { } goto ldv_50355; default: ldv_stop(); } ldv_50355: ; return; } } void call_and_disable_all_11(int state ) { { if (ldv_work_11_0 == state) { call_and_disable_work_11(ldv_work_struct_11_0); } else { } if (ldv_work_11_1 == state) { call_and_disable_work_11(ldv_work_struct_11_1); } else { } if (ldv_work_11_2 == state) { call_and_disable_work_11(ldv_work_struct_11_2); } else { } if (ldv_work_11_3 == state) { call_and_disable_work_11(ldv_work_struct_11_3); } else { } return; } } void call_and_disable_all_9(int state ) { { if (ldv_work_9_0 == state) { call_and_disable_work_9(ldv_work_struct_9_0); } else { } if (ldv_work_9_1 == state) { call_and_disable_work_9(ldv_work_struct_9_1); } else { } if (ldv_work_9_2 == state) { call_and_disable_work_9(ldv_work_struct_9_2); } else { } if (ldv_work_9_3 == state) { call_and_disable_work_9(ldv_work_struct_9_3); } else { } return; } } void call_and_disable_all_12(int state ) { { if (ldv_work_12_0 == state) { call_and_disable_work_12(ldv_work_struct_12_0); } else { } if (ldv_work_12_1 == state) { call_and_disable_work_12(ldv_work_struct_12_1); } else { } if (ldv_work_12_2 == state) { call_and_disable_work_12(ldv_work_struct_12_2); } else { } if (ldv_work_12_3 == state) { call_and_disable_work_12(ldv_work_struct_12_3); } else { } return; } } void work_init_10(void) { { ldv_work_10_0 = 0; ldv_work_10_1 = 0; ldv_work_10_2 = 0; ldv_work_10_3 = 0; return; } } void disable_work_8(struct work_struct *work ) { { if ((ldv_work_8_0 == 3 || ldv_work_8_0 == 2) && (unsigned long )ldv_work_struct_8_0 == (unsigned long )work) { ldv_work_8_0 = 1; } else { } if ((ldv_work_8_1 == 3 || ldv_work_8_1 == 2) && (unsigned long )ldv_work_struct_8_1 == (unsigned long )work) { ldv_work_8_1 = 1; } else { } if ((ldv_work_8_2 == 3 || ldv_work_8_2 == 2) && (unsigned long )ldv_work_struct_8_2 == (unsigned long )work) { ldv_work_8_2 = 1; } else { } if ((ldv_work_8_3 == 3 || ldv_work_8_3 == 2) && (unsigned long )ldv_work_struct_8_3 == (unsigned long )work) { ldv_work_8_3 = 1; } else { } return; } } void disable_work_11(struct work_struct *work ) { { if ((ldv_work_11_0 == 3 || ldv_work_11_0 == 2) && (unsigned long )ldv_work_struct_11_0 == (unsigned long )work) { ldv_work_11_0 = 1; } else { } if ((ldv_work_11_1 == 3 || ldv_work_11_1 == 2) && (unsigned long )ldv_work_struct_11_1 == (unsigned long )work) { ldv_work_11_1 = 1; } else { } if ((ldv_work_11_2 == 3 || ldv_work_11_2 == 2) && (unsigned long )ldv_work_struct_11_2 == (unsigned long )work) { ldv_work_11_2 = 1; } else { } if ((ldv_work_11_3 == 3 || ldv_work_11_3 == 2) && (unsigned long )ldv_work_struct_11_3 == (unsigned long )work) { ldv_work_11_3 = 1; } else { } return; } } void disable_work_12(struct work_struct *work ) { { if ((ldv_work_12_0 == 3 || ldv_work_12_0 == 2) && (unsigned long )ldv_work_struct_12_0 == (unsigned long )work) { ldv_work_12_0 = 1; } else { } if ((ldv_work_12_1 == 3 || ldv_work_12_1 == 2) && (unsigned long )ldv_work_struct_12_1 == (unsigned long )work) { ldv_work_12_1 = 1; } else { } if ((ldv_work_12_2 == 3 || ldv_work_12_2 == 2) && (unsigned long )ldv_work_struct_12_2 == (unsigned long )work) { ldv_work_12_2 = 1; } else { } if ((ldv_work_12_3 == 3 || ldv_work_12_3 == 2) && (unsigned long )ldv_work_struct_12_3 == (unsigned long )work) { ldv_work_12_3 = 1; } else { } return; } } void call_and_disable_work_9(struct work_struct *work ) { { if ((ldv_work_9_0 == 2 || ldv_work_9_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_0) { gen6_pm_rps_work(work); ldv_work_9_0 = 1; return; } else { } if ((ldv_work_9_1 == 2 || ldv_work_9_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_1) { gen6_pm_rps_work(work); ldv_work_9_1 = 1; return; } else { } if ((ldv_work_9_2 == 2 || ldv_work_9_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_2) { gen6_pm_rps_work(work); ldv_work_9_2 = 1; return; } else { } if ((ldv_work_9_3 == 2 || ldv_work_9_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_9_3) { gen6_pm_rps_work(work); ldv_work_9_3 = 1; return; } else { } return; } } void activate_work_8(struct work_struct *work , int state ) { { if (ldv_work_8_0 == 0) { ldv_work_struct_8_0 = work; ldv_work_8_0 = state; return; } else { } if (ldv_work_8_1 == 0) { ldv_work_struct_8_1 = work; ldv_work_8_1 = state; return; } else { } if (ldv_work_8_2 == 0) { ldv_work_struct_8_2 = work; ldv_work_8_2 = state; return; } else { } if (ldv_work_8_3 == 0) { ldv_work_struct_8_3 = work; ldv_work_8_3 = state; return; } else { } return; } } void disable_work_10(struct work_struct *work ) { { if ((ldv_work_10_0 == 3 || ldv_work_10_0 == 2) && (unsigned long )ldv_work_struct_10_0 == (unsigned long )work) { ldv_work_10_0 = 1; } else { } if ((ldv_work_10_1 == 3 || ldv_work_10_1 == 2) && (unsigned long )ldv_work_struct_10_1 == (unsigned long )work) { ldv_work_10_1 = 1; } else { } if ((ldv_work_10_2 == 3 || ldv_work_10_2 == 2) && (unsigned long )ldv_work_struct_10_2 == (unsigned long )work) { ldv_work_10_2 = 1; } else { } if ((ldv_work_10_3 == 3 || ldv_work_10_3 == 2) && (unsigned long )ldv_work_struct_10_3 == (unsigned long )work) { ldv_work_10_3 = 1; } else { } return; } } void activate_work_10(struct work_struct *work , int state ) { { if (ldv_work_10_0 == 0) { ldv_work_struct_10_0 = work; ldv_work_10_0 = state; return; } else { } if (ldv_work_10_1 == 0) { ldv_work_struct_10_1 = work; ldv_work_10_1 = state; return; } else { } if (ldv_work_10_2 == 0) { ldv_work_struct_10_2 = work; ldv_work_10_2 = state; return; } else { } if (ldv_work_10_3 == 0) { ldv_work_struct_10_3 = work; ldv_work_10_3 = state; return; } else { } return; } } void call_and_disable_work_11(struct work_struct *work ) { { if ((ldv_work_11_0 == 2 || ldv_work_11_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_0) { i915_hangcheck_elapsed(work); ldv_work_11_0 = 1; return; } else { } if ((ldv_work_11_1 == 2 || ldv_work_11_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_1) { i915_hangcheck_elapsed(work); ldv_work_11_1 = 1; return; } else { } if ((ldv_work_11_2 == 2 || ldv_work_11_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_2) { i915_hangcheck_elapsed(work); ldv_work_11_2 = 1; return; } else { } if ((ldv_work_11_3 == 2 || ldv_work_11_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_11_3) { i915_hangcheck_elapsed(work); ldv_work_11_3 = 1; return; } else { } return; } } void activate_work_7(struct work_struct *work , int state ) { { if (ldv_work_7_0 == 0) { ldv_work_struct_7_0 = work; ldv_work_7_0 = state; return; } else { } if (ldv_work_7_1 == 0) { ldv_work_struct_7_1 = work; ldv_work_7_1 = state; return; } else { } if (ldv_work_7_2 == 0) { ldv_work_struct_7_2 = work; ldv_work_7_2 = state; return; } else { } if (ldv_work_7_3 == 0) { ldv_work_struct_7_3 = work; ldv_work_7_3 = state; return; } else { } return; } } void call_and_disable_all_10(int state ) { { if (ldv_work_10_0 == state) { call_and_disable_work_10(ldv_work_struct_10_0); } else { } if (ldv_work_10_1 == state) { call_and_disable_work_10(ldv_work_struct_10_1); } else { } if (ldv_work_10_2 == state) { call_and_disable_work_10(ldv_work_struct_10_2); } else { } if (ldv_work_10_3 == state) { call_and_disable_work_10(ldv_work_struct_10_3); } else { } return; } } void work_init_9(void) { { ldv_work_9_0 = 0; ldv_work_9_1 = 0; ldv_work_9_2 = 0; ldv_work_9_3 = 0; return; } } void call_and_disable_work_10(struct work_struct *work ) { { if ((ldv_work_10_0 == 2 || ldv_work_10_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_0) { ivybridge_parity_work(work); ldv_work_10_0 = 1; return; } else { } if ((ldv_work_10_1 == 2 || ldv_work_10_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_1) { ivybridge_parity_work(work); ldv_work_10_1 = 1; return; } else { } if ((ldv_work_10_2 == 2 || ldv_work_10_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_2) { ivybridge_parity_work(work); ldv_work_10_2 = 1; return; } else { } if ((ldv_work_10_3 == 2 || ldv_work_10_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_10_3) { ivybridge_parity_work(work); ldv_work_10_3 = 1; return; } else { } return; } } void disable_work_7(struct work_struct *work ) { { if ((ldv_work_7_0 == 3 || ldv_work_7_0 == 2) && (unsigned long )ldv_work_struct_7_0 == (unsigned long )work) { ldv_work_7_0 = 1; } else { } if ((ldv_work_7_1 == 3 || ldv_work_7_1 == 2) && (unsigned long )ldv_work_struct_7_1 == (unsigned long )work) { ldv_work_7_1 = 1; } else { } if ((ldv_work_7_2 == 3 || ldv_work_7_2 == 2) && (unsigned long )ldv_work_struct_7_2 == (unsigned long )work) { ldv_work_7_2 = 1; } else { } if ((ldv_work_7_3 == 3 || ldv_work_7_3 == 2) && (unsigned long )ldv_work_struct_7_3 == (unsigned long )work) { ldv_work_7_3 = 1; } else { } return; } } void invoke_work_10(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_10_0 == 2 || ldv_work_10_0 == 3) { ldv_work_10_0 = 4; ivybridge_parity_work(ldv_work_struct_10_0); ldv_work_10_0 = 1; } else { } goto ldv_50426; case 1: ; if (ldv_work_10_1 == 2 || ldv_work_10_1 == 3) { ldv_work_10_1 = 4; ivybridge_parity_work(ldv_work_struct_10_0); ldv_work_10_1 = 1; } else { } goto ldv_50426; case 2: ; if (ldv_work_10_2 == 2 || ldv_work_10_2 == 3) { ldv_work_10_2 = 4; ivybridge_parity_work(ldv_work_struct_10_0); ldv_work_10_2 = 1; } else { } goto ldv_50426; case 3: ; if (ldv_work_10_3 == 2 || ldv_work_10_3 == 3) { ldv_work_10_3 = 4; ivybridge_parity_work(ldv_work_struct_10_0); ldv_work_10_3 = 1; } else { } goto ldv_50426; default: ldv_stop(); } ldv_50426: ; return; } } void call_and_disable_work_7(struct work_struct *work ) { { if ((ldv_work_7_0 == 2 || ldv_work_7_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_0) { i915_hotplug_work_func(work); ldv_work_7_0 = 1; return; } else { } if ((ldv_work_7_1 == 2 || ldv_work_7_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_1) { i915_hotplug_work_func(work); ldv_work_7_1 = 1; return; } else { } if ((ldv_work_7_2 == 2 || ldv_work_7_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_2) { i915_hotplug_work_func(work); ldv_work_7_2 = 1; return; } else { } if ((ldv_work_7_3 == 2 || ldv_work_7_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_3) { i915_hotplug_work_func(work); ldv_work_7_3 = 1; return; } else { } return; } } void work_init_8(void) { { ldv_work_8_0 = 0; ldv_work_8_1 = 0; ldv_work_8_2 = 0; ldv_work_8_3 = 0; return; } } void call_and_disable_work_8(struct work_struct *work ) { { if ((ldv_work_8_0 == 2 || ldv_work_8_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_0) { i915_digport_work_func(work); ldv_work_8_0 = 1; return; } else { } if ((ldv_work_8_1 == 2 || ldv_work_8_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_1) { i915_digport_work_func(work); ldv_work_8_1 = 1; return; } else { } if ((ldv_work_8_2 == 2 || ldv_work_8_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_2) { i915_digport_work_func(work); ldv_work_8_2 = 1; return; } else { } if ((ldv_work_8_3 == 2 || ldv_work_8_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_8_3) { i915_digport_work_func(work); ldv_work_8_3 = 1; return; } else { } return; } } void invoke_work_11(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_11_0 == 2 || ldv_work_11_0 == 3) { ldv_work_11_0 = 4; i915_hangcheck_elapsed(ldv_work_struct_11_0); ldv_work_11_0 = 1; } else { } goto ldv_50450; case 1: ; if (ldv_work_11_1 == 2 || ldv_work_11_1 == 3) { ldv_work_11_1 = 4; i915_hangcheck_elapsed(ldv_work_struct_11_0); ldv_work_11_1 = 1; } else { } goto ldv_50450; case 2: ; if (ldv_work_11_2 == 2 || ldv_work_11_2 == 3) { ldv_work_11_2 = 4; i915_hangcheck_elapsed(ldv_work_struct_11_0); ldv_work_11_2 = 1; } else { } goto ldv_50450; case 3: ; if (ldv_work_11_3 == 2 || ldv_work_11_3 == 3) { ldv_work_11_3 = 4; i915_hangcheck_elapsed(ldv_work_struct_11_0); ldv_work_11_3 = 1; } else { } goto ldv_50450; default: ldv_stop(); } ldv_50450: ; return; } } void invoke_work_9(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_9_0 == 2 || ldv_work_9_0 == 3) { ldv_work_9_0 = 4; gen6_pm_rps_work(ldv_work_struct_9_0); ldv_work_9_0 = 1; } else { } goto ldv_50461; case 1: ; if (ldv_work_9_1 == 2 || ldv_work_9_1 == 3) { ldv_work_9_1 = 4; gen6_pm_rps_work(ldv_work_struct_9_0); ldv_work_9_1 = 1; } else { } goto ldv_50461; case 2: ; if (ldv_work_9_2 == 2 || ldv_work_9_2 == 3) { ldv_work_9_2 = 4; gen6_pm_rps_work(ldv_work_struct_9_0); ldv_work_9_2 = 1; } else { } goto ldv_50461; case 3: ; if (ldv_work_9_3 == 2 || ldv_work_9_3 == 3) { ldv_work_9_3 = 4; gen6_pm_rps_work(ldv_work_struct_9_0); ldv_work_9_3 = 1; } else { } goto ldv_50461; default: ldv_stop(); } ldv_50461: ; return; } } void activate_work_9(struct work_struct *work , int state ) { { if (ldv_work_9_0 == 0) { ldv_work_struct_9_0 = work; ldv_work_9_0 = state; return; } else { } if (ldv_work_9_1 == 0) { ldv_work_struct_9_1 = work; ldv_work_9_1 = state; return; } else { } if (ldv_work_9_2 == 0) { ldv_work_struct_9_2 = work; ldv_work_9_2 = state; return; } else { } if (ldv_work_9_3 == 0) { ldv_work_struct_9_3 = work; ldv_work_9_3 = state; return; } else { } return; } } void call_and_disable_all_7(int state ) { { if (ldv_work_7_0 == state) { call_and_disable_work_7(ldv_work_struct_7_0); } else { } if (ldv_work_7_1 == state) { call_and_disable_work_7(ldv_work_struct_7_1); } else { } if (ldv_work_7_2 == state) { call_and_disable_work_7(ldv_work_struct_7_2); } else { } if (ldv_work_7_3 == state) { call_and_disable_work_7(ldv_work_struct_7_3); } else { } return; } } void invoke_work_12(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_12_0 == 2 || ldv_work_12_0 == 3) { ldv_work_12_0 = 4; intel_hpd_irq_reenable_work(ldv_work_struct_12_0); ldv_work_12_0 = 1; } else { } goto ldv_50479; case 1: ; if (ldv_work_12_1 == 2 || ldv_work_12_1 == 3) { ldv_work_12_1 = 4; intel_hpd_irq_reenable_work(ldv_work_struct_12_0); ldv_work_12_1 = 1; } else { } goto ldv_50479; case 2: ; if (ldv_work_12_2 == 2 || ldv_work_12_2 == 3) { ldv_work_12_2 = 4; intel_hpd_irq_reenable_work(ldv_work_struct_12_0); ldv_work_12_2 = 1; } else { } goto ldv_50479; case 3: ; if (ldv_work_12_3 == 2 || ldv_work_12_3 == 3) { ldv_work_12_3 = 4; intel_hpd_irq_reenable_work(ldv_work_struct_12_0); ldv_work_12_3 = 1; } else { } goto ldv_50479; default: ldv_stop(); } ldv_50479: ; return; } } void call_and_disable_work_12(struct work_struct *work ) { { if ((ldv_work_12_0 == 2 || ldv_work_12_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_0) { intel_hpd_irq_reenable_work(work); ldv_work_12_0 = 1; return; } else { } if ((ldv_work_12_1 == 2 || ldv_work_12_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_1) { intel_hpd_irq_reenable_work(work); ldv_work_12_1 = 1; return; } else { } if ((ldv_work_12_2 == 2 || ldv_work_12_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_2) { intel_hpd_irq_reenable_work(work); ldv_work_12_2 = 1; return; } else { } if ((ldv_work_12_3 == 2 || ldv_work_12_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_12_3) { intel_hpd_irq_reenable_work(work); ldv_work_12_3 = 1; return; } else { } return; } } void work_init_11(void) { { ldv_work_11_0 = 0; ldv_work_11_1 = 0; ldv_work_11_2 = 0; ldv_work_11_3 = 0; return; } } void activate_work_11(struct work_struct *work , int state ) { { if (ldv_work_11_0 == 0) { ldv_work_struct_11_0 = work; ldv_work_11_0 = state; return; } else { } if (ldv_work_11_1 == 0) { ldv_work_struct_11_1 = work; ldv_work_11_1 = state; return; } else { } if (ldv_work_11_2 == 0) { ldv_work_struct_11_2 = work; ldv_work_11_2 = state; return; } else { } if (ldv_work_11_3 == 0) { ldv_work_struct_11_3 = work; ldv_work_11_3 = state; return; } else { } return; } } void work_init_7(void) { { ldv_work_7_0 = 0; ldv_work_7_1 = 0; ldv_work_7_2 = 0; ldv_work_7_3 = 0; return; } } void invoke_work_7(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_7_0 == 2 || ldv_work_7_0 == 3) { ldv_work_7_0 = 4; i915_hotplug_work_func(ldv_work_struct_7_0); ldv_work_7_0 = 1; } else { } goto ldv_50505; case 1: ; if (ldv_work_7_1 == 2 || ldv_work_7_1 == 3) { ldv_work_7_1 = 4; i915_hotplug_work_func(ldv_work_struct_7_0); ldv_work_7_1 = 1; } else { } goto ldv_50505; case 2: ; if (ldv_work_7_2 == 2 || ldv_work_7_2 == 3) { ldv_work_7_2 = 4; i915_hotplug_work_func(ldv_work_struct_7_0); ldv_work_7_2 = 1; } else { } goto ldv_50505; case 3: ; if (ldv_work_7_3 == 2 || ldv_work_7_3 == 3) { ldv_work_7_3 = 4; i915_hotplug_work_func(ldv_work_struct_7_0); ldv_work_7_3 = 1; } else { } goto ldv_50505; default: ldv_stop(); } ldv_50505: ; return; } } void call_and_disable_all_8(int state ) { { if (ldv_work_8_0 == state) { call_and_disable_work_8(ldv_work_struct_8_0); } else { } if (ldv_work_8_1 == state) { call_and_disable_work_8(ldv_work_struct_8_1); } else { } if (ldv_work_8_2 == state) { call_and_disable_work_8(ldv_work_struct_8_2); } else { } if (ldv_work_8_3 == state) { call_and_disable_work_8(ldv_work_struct_8_3); } else { } return; } } void activate_work_12(struct work_struct *work , int state ) { { if (ldv_work_12_0 == 0) { ldv_work_struct_12_0 = work; ldv_work_12_0 = state; return; } else { } if (ldv_work_12_1 == 0) { ldv_work_struct_12_1 = work; ldv_work_12_1 = state; return; } else { } if (ldv_work_12_2 == 0) { ldv_work_struct_12_2 = work; ldv_work_12_2 = state; return; } else { } if (ldv_work_12_3 == 0) { ldv_work_struct_12_3 = work; ldv_work_12_3 = state; return; } else { } return; } } void disable_work_9(struct work_struct *work ) { { if ((ldv_work_9_0 == 3 || ldv_work_9_0 == 2) && (unsigned long )ldv_work_struct_9_0 == (unsigned long )work) { ldv_work_9_0 = 1; } else { } if ((ldv_work_9_1 == 3 || ldv_work_9_1 == 2) && (unsigned long )ldv_work_struct_9_1 == (unsigned long )work) { ldv_work_9_1 = 1; } else { } if ((ldv_work_9_2 == 3 || ldv_work_9_2 == 2) && (unsigned long )ldv_work_struct_9_2 == (unsigned long )work) { ldv_work_9_2 = 1; } else { } if ((ldv_work_9_3 == 3 || ldv_work_9_3 == 2) && (unsigned long )ldv_work_struct_9_3 == (unsigned long )work) { ldv_work_9_3 = 1; } else { } return; } } void work_init_12(void) { { ldv_work_12_0 = 0; ldv_work_12_1 = 0; ldv_work_12_2 = 0; ldv_work_12_3 = 0; return; } } bool ldv_queue_work_on_385(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_386(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_387(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_388(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_389(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_cancel_work_sync_390(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(ldv_func_arg1); return (ldv_func_res); } } bool ldv_queue_work_on_401(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_403(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_402(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_405(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_404(struct workqueue_struct *ldv_func_arg1 ) ; struct tracepoint __tracepoint_i915_pipe_update_start ; struct tracepoint __tracepoint_i915_pipe_update_vblank_evaded ; struct tracepoint __tracepoint_i915_pipe_update_end ; struct tracepoint __tracepoint_i915_gem_ring_flush ; struct tracepoint __tracepoint_i915_gem_request_complete ; struct tracepoint __tracepoint_i915_flip_request ; struct tracepoint __tracepoint_i915_flip_complete ; static char const __tpstrtab_i915_pipe_update_start[23U] = { 'i', '9', '1', '5', '_', 'p', 'i', 'p', 'e', '_', 'u', 'p', 'd', 'a', 't', 'e', '_', 's', 't', 'a', 'r', 't', '\000'}; struct tracepoint __tracepoint_i915_pipe_update_start = {(char const *)(& __tpstrtab_i915_pipe_update_start), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_pipe_update_vblank_evaded[31U] = { 'i', '9', '1', '5', '_', 'p', 'i', 'p', 'e', '_', 'u', 'p', 'd', 'a', 't', 'e', '_', 'v', 'b', 'l', 'a', 'n', 'k', '_', 'e', 'v', 'a', 'd', 'e', 'd', '\000'}; struct tracepoint __tracepoint_i915_pipe_update_vblank_evaded = {(char const *)(& __tpstrtab_i915_pipe_update_vblank_evaded), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_pipe_update_end[21U] = { 'i', '9', '1', '5', '_', 'p', 'i', 'p', 'e', '_', 'u', 'p', 'd', 'a', 't', 'e', '_', 'e', 'n', 'd', '\000'}; struct tracepoint __tracepoint_i915_pipe_update_end = {(char const *)(& __tpstrtab_i915_pipe_update_end), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_object_create[23U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 'c', 'r', 'e', 'a', 't', 'e', '\000'}; struct tracepoint __tracepoint_i915_gem_object_create = {(char const *)(& __tpstrtab_i915_gem_object_create), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_vma_bind[14U] = { 'i', '9', '1', '5', '_', 'v', 'm', 'a', '_', 'b', 'i', 'n', 'd', '\000'}; struct tracepoint __tracepoint_i915_vma_bind = {(char const *)(& __tpstrtab_i915_vma_bind), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_vma_unbind[16U] = { 'i', '9', '1', '5', '_', 'v', 'm', 'a', '_', 'u', 'n', 'b', 'i', 'n', 'd', '\000'}; struct tracepoint __tracepoint_i915_vma_unbind = {(char const *)(& __tpstrtab_i915_vma_unbind), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_va_alloc[14U] = { 'i', '9', '1', '5', '_', 'v', 'a', '_', 'a', 'l', 'l', 'o', 'c', '\000'}; struct tracepoint __tracepoint_i915_va_alloc = {(char const *)(& __tpstrtab_i915_va_alloc), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_page_table_entry_alloc[28U] = { 'i', '9', '1', '5', '_', 'p', 'a', 'g', 'e', '_', 't', 'a', 'b', 'l', 'e', '_', 'e', 'n', 't', 'r', 'y', '_', 'a', 'l', 'l', 'o', 'c', '\000'}; struct tracepoint __tracepoint_i915_page_table_entry_alloc = {(char const *)(& __tpstrtab_i915_page_table_entry_alloc), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_page_table_entry_map[26U] = { 'i', '9', '1', '5', '_', 'p', 'a', 'g', 'e', '_', 't', 'a', 'b', 'l', 'e', '_', 'e', 'n', 't', 'r', 'y', '_', 'm', 'a', 'p', '\000'}; struct tracepoint __tracepoint_i915_page_table_entry_map = {(char const *)(& __tpstrtab_i915_page_table_entry_map), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_object_change_domain[30U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 'c', 'h', 'a', 'n', 'g', 'e', '_', 'd', 'o', 'm', 'a', 'i', 'n', '\000'}; struct tracepoint __tracepoint_i915_gem_object_change_domain = {(char const *)(& __tpstrtab_i915_gem_object_change_domain), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_object_pwrite[23U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 'p', 'w', 'r', 'i', 't', 'e', '\000'}; struct tracepoint __tracepoint_i915_gem_object_pwrite = {(char const *)(& __tpstrtab_i915_gem_object_pwrite), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_object_pread[22U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 'p', 'r', 'e', 'a', 'd', '\000'}; struct tracepoint __tracepoint_i915_gem_object_pread = {(char const *)(& __tpstrtab_i915_gem_object_pread), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_object_fault[22U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 'f', 'a', 'u', 'l', 't', '\000'}; struct tracepoint __tracepoint_i915_gem_object_fault = {(char const *)(& __tpstrtab_i915_gem_object_fault), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_object_clflush[24U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 'c', 'l', 'f', 'l', 'u', 's', 'h', '\000'}; struct tracepoint __tracepoint_i915_gem_object_clflush = {(char const *)(& __tpstrtab_i915_gem_object_clflush), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_object_destroy[24U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 'd', 'e', 's', 't', 'r', 'o', 'y', '\000'}; struct tracepoint __tracepoint_i915_gem_object_destroy = {(char const *)(& __tpstrtab_i915_gem_object_destroy), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_evict[15U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'e', 'v', 'i', 'c', 't', '\000'}; struct tracepoint __tracepoint_i915_gem_evict = {(char const *)(& __tpstrtab_i915_gem_evict), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_evict_everything[26U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'e', 'v', 'i', 'c', 't', '_', 'e', 'v', 'e', 'r', 'y', 't', 'h', 'i', 'n', 'g', '\000'}; struct tracepoint __tracepoint_i915_gem_evict_everything = {(char const *)(& __tpstrtab_i915_gem_evict_everything), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_evict_vm[18U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'e', 'v', 'i', 'c', 't', '_', 'v', 'm', '\000'}; struct tracepoint __tracepoint_i915_gem_evict_vm = {(char const *)(& __tpstrtab_i915_gem_evict_vm), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_ring_sync_to[22U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'r', 'i', 'n', 'g', '_', 's', 'y', 'n', 'c', '_', 't', 'o', '\000'}; struct tracepoint __tracepoint_i915_gem_ring_sync_to = {(char const *)(& __tpstrtab_i915_gem_ring_sync_to), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_ring_dispatch[23U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'r', 'i', 'n', 'g', '_', 'd', 'i', 's', 'p', 'a', 't', 'c', 'h', '\000'}; struct tracepoint __tracepoint_i915_gem_ring_dispatch = {(char const *)(& __tpstrtab_i915_gem_ring_dispatch), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_ring_flush[20U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'r', 'i', 'n', 'g', '_', 'f', 'l', 'u', 's', 'h', '\000'}; struct tracepoint __tracepoint_i915_gem_ring_flush = {(char const *)(& __tpstrtab_i915_gem_ring_flush), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_request_add[21U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'a', 'd', 'd', '\000'}; struct tracepoint __tracepoint_i915_gem_request_add = {(char const *)(& __tpstrtab_i915_gem_request_add), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_request_notify[24U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'n', 'o', 't', 'i', 'f', 'y', '\000'}; struct tracepoint __tracepoint_i915_gem_request_notify = {(char const *)(& __tpstrtab_i915_gem_request_notify), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_request_retire[24U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'r', 'e', 't', 'i', 'r', 'e', '\000'}; struct tracepoint __tracepoint_i915_gem_request_retire = {(char const *)(& __tpstrtab_i915_gem_request_retire), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_request_complete[26U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'c', 'o', 'm', 'p', 'l', 'e', 't', 'e', '\000'}; struct tracepoint __tracepoint_i915_gem_request_complete = {(char const *)(& __tpstrtab_i915_gem_request_complete), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_request_wait_begin[28U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'w', 'a', 'i', 't', '_', 'b', 'e', 'g', 'i', 'n', '\000'}; struct tracepoint __tracepoint_i915_gem_request_wait_begin = {(char const *)(& __tpstrtab_i915_gem_request_wait_begin), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_gem_request_wait_end[26U] = { 'i', '9', '1', '5', '_', 'g', 'e', 'm', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'w', 'a', 'i', 't', '_', 'e', 'n', 'd', '\000'}; struct tracepoint __tracepoint_i915_gem_request_wait_end = {(char const *)(& __tpstrtab_i915_gem_request_wait_end), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_flip_request[18U] = { 'i', '9', '1', '5', '_', 'f', 'l', 'i', 'p', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '\000'}; struct tracepoint __tracepoint_i915_flip_request = {(char const *)(& __tpstrtab_i915_flip_request), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_flip_complete[19U] = { 'i', '9', '1', '5', '_', 'f', 'l', 'i', 'p', '_', 'c', 'o', 'm', 'p', 'l', 'e', 't', 'e', '\000'}; struct tracepoint __tracepoint_i915_flip_complete = {(char const *)(& __tpstrtab_i915_flip_complete), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_reg_rw[12U] = { 'i', '9', '1', '5', '_', 'r', 'e', 'g', '_', 'r', 'w', '\000'}; struct tracepoint __tracepoint_i915_reg_rw = {(char const *)(& __tpstrtab_i915_reg_rw), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_intel_gpu_freq_change[22U] = { 'i', 'n', 't', 'e', 'l', '_', 'g', 'p', 'u', '_', 'f', 'r', 'e', 'q', '_', 'c', 'h', 'a', 'n', 'g', 'e', '\000'}; struct tracepoint __tracepoint_intel_gpu_freq_change = {(char const *)(& __tpstrtab_intel_gpu_freq_change), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_ppgtt_create[18U] = { 'i', '9', '1', '5', '_', 'p', 'p', 'g', 't', 't', '_', 'c', 'r', 'e', 'a', 't', 'e', '\000'}; struct tracepoint __tracepoint_i915_ppgtt_create = {(char const *)(& __tpstrtab_i915_ppgtt_create), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_ppgtt_release[19U] = { 'i', '9', '1', '5', '_', 'p', 'p', 'g', 't', 't', '_', 'r', 'e', 'l', 'e', 'a', 's', 'e', '\000'}; struct tracepoint __tracepoint_i915_ppgtt_release = {(char const *)(& __tpstrtab_i915_ppgtt_release), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_context_create[20U] = { 'i', '9', '1', '5', '_', 'c', 'o', 'n', 't', 'e', 'x', 't', '_', 'c', 'r', 'e', 'a', 't', 'e', '\000'}; struct tracepoint __tracepoint_i915_context_create = {(char const *)(& __tpstrtab_i915_context_create), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_i915_context_free[18U] = { 'i', '9', '1', '5', '_', 'c', 'o', 'n', 't', 'e', 'x', 't', '_', 'f', 'r', 'e', 'e', '\000'}; struct tracepoint __tracepoint_i915_context_free = {(char const *)(& __tpstrtab_i915_context_free), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_switch_mm[10U] = { 's', 'w', 'i', 't', 'c', 'h', '_', 'm', 'm', '\000'}; struct tracepoint __tracepoint_switch_mm = {(char const *)(& __tpstrtab_switch_mm), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; __inline static bool seq_buf_has_overflowed(struct seq_buf *s ) { { return (s->len > s->size); } } __inline static bool trace_seq_has_overflowed(struct trace_seq *s ) { bool tmp ; int tmp___0 ; { if (s->full != 0) { tmp___0 = 1; } else { tmp = seq_buf_has_overflowed(& s->seq); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((bool )tmp___0); } } extern void trace_seq_printf(struct trace_seq * , char const * , ...) ; extern int trace_raw_output_prep(struct trace_iterator * , struct trace_event * ) ; __inline static enum print_line_t trace_handle_return(struct trace_seq *s ) { bool tmp ; { tmp = trace_seq_has_overflowed(s); return ((int )tmp ? 0 : 1); } } extern int trace_event_reg(struct trace_event_call * , enum trace_reg , void * ) ; extern int trace_event_raw_init(struct trace_event_call * ) ; extern int trace_define_field(struct trace_event_call * , char const * , char const * , int , int , int , int ) ; static enum print_line_t trace_raw_output_i915_pipe_update_start(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_pipe_update_start *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_pipe_update_start *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "pipe %c, frame=%u, scanline=%u, min=%u, max=%u\n", (int )field->pipe + 65, field->frame, field->scanline, field->min, field->max); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_pipe_update_vblank_evaded(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_pipe_update_vblank_evaded *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_pipe_update_vblank_evaded *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "pipe %c, frame=%u, scanline=%u, min=%u, max=%u\n", (int )field->pipe + 65, field->frame, field->scanline, field->min, field->max); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_pipe_update_end(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_pipe_update_end *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_pipe_update_end *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "pipe %c, frame=%u, scanline=%u\n", (int )field->pipe + 65, field->frame, field->scanline); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_object_create(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_object_create *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_object_create *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "obj=%p, size=%u\n", field->obj, field->size); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_vma_bind(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_vma_bind *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_vma_bind *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "obj=%p, offset=%016llx size=%x%s vm=%p\n", field->obj, field->offset, field->size, (int )field->flags & 1 ? (char *)", mappable" : (char *)"", field->vm); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_vma_unbind(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_vma_unbind *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_vma_unbind *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "obj=%p, offset=%016llx size=%x vm=%p\n", field->obj, field->offset, field->size, field->vm); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_va(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_va *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_va *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "vm=%p (%s), 0x%llx-0x%llx\n", field->vm, (char *)field + ((unsigned long )field->__data_loc_name & 65535UL), field->start, field->end); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_page_table_entry(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_page_table_entry *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_page_table_entry *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "vm=%p, pde=%d (0x%llx-0x%llx)\n", field->vm, field->pde, field->start, field->end); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_page_table_entry_update(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_page_table_entry_update *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_page_table_entry_update *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "vm=%p, pde=%d, updating %u:%u\t%s\n", field->vm, field->pde, field->last, field->first, (char *)field + ((unsigned long )field->__data_loc_cur_ptes & 65535UL)); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_object_change_domain(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_object_change_domain *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_object_change_domain *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "obj=%p, read=%02x=>%02x, write=%02x=>%02x\n", field->obj, field->read_domains >> 16, field->read_domains & 65535U, field->write_domain >> 16, field->write_domain & 65535U); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_object_pwrite(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_object_pwrite *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_object_pwrite *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "obj=%p, offset=%u, len=%u\n", field->obj, field->offset, field->len); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_object_pread(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_object_pread *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_object_pread *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "obj=%p, offset=%u, len=%u\n", field->obj, field->offset, field->len); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_object_fault(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_object_fault *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_object_fault *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "obj=%p, %s index=%u %s\n", field->obj, (int )field->gtt ? (char *)"GTT" : (char *)"CPU", field->index, (int )field->write ? (char *)", writable" : (char *)""); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_object(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_object *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_object *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "obj=%p\n", field->obj); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_evict(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_evict *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_evict *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%d, size=%d, align=%d %s\n", field->dev, field->size, field->align, (int )field->flags & 1 ? (char *)", mappable" : (char *)""); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_evict_everything(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_evict_everything *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_evict_everything *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%d\n", field->dev); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_evict_vm(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_evict_vm *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_evict_vm *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%d, vm=%p\n", field->dev, field->vm); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_ring_sync_to(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_ring_sync_to *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_ring_sync_to *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%u, sync-from=%u, sync-to=%u, seqno=%u\n", field->dev, field->sync_from, field->sync_to, field->seqno); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_ring_dispatch(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_ring_dispatch *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_ring_dispatch *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%u, ring=%u, seqno=%u, flags=%x\n", field->dev, field->ring, field->seqno, field->flags); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_ring_flush(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_ring_flush *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_ring_flush *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%u, ring=%x, invalidate=%04x, flush=%04x\n", field->dev, field->ring, field->invalidate, field->flush); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_request(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_request *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_request *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%u, ring=%u, seqno=%u\n", field->dev, field->ring, field->seqno); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_request_notify(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_request_notify *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_request_notify *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%u, ring=%u, seqno=%u\n", field->dev, field->ring, field->seqno); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_gem_request_wait_begin(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_gem_request_wait_begin *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_gem_request_wait_begin *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%u, ring=%u, seqno=%u, blocking=%s\n", field->dev, field->ring, field->seqno, (int )field->blocking ? (char *)"yes (NB)" : (char *)"no"); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_flip_request(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_flip_request *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_flip_request *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "plane=%d, obj=%p\n", field->plane, field->obj); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_flip_complete(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_flip_complete *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_flip_complete *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "plane=%d, obj=%p\n", field->plane, field->obj); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_reg_rw(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_reg_rw *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_reg_rw *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "%s reg=0x%x, len=%d, val=(0x%x, 0x%x)\n", (unsigned int )field->write != 0U ? (char *)"write" : (char *)"read", field->reg, (int )field->len, (unsigned int )field->val, (unsigned int )(field->val >> 32)); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_intel_gpu_freq_change(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_intel_gpu_freq_change *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_intel_gpu_freq_change *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "new_freq=%u\n", field->freq); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_ppgtt(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_ppgtt *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_ppgtt *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%u, vm=%p\n", field->dev, field->vm); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_i915_context(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_i915_context *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_i915_context *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%u, ctx=%p, ctx_vm=%p\n", field->dev, field->ctx, field->vm); tmp = trace_handle_return(s); return (tmp); } } static enum print_line_t trace_raw_output_switch_mm(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_switch_mm *field ; int ret ; enum print_line_t tmp ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_switch_mm *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } trace_seq_printf(s, "dev=%u, ring=%u, ctx=%p, ctx_vm=%p\n", field->dev, field->ring, field->to, field->vm); tmp = trace_handle_return(s); return (tmp); } } static int trace_event_define_fields_i915_pipe_update_start(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "enum pipe", "pipe", 8, 4, 1, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "frame", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "scanline", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "min", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "max", 24, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_pipe_update_vblank_evaded(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "enum pipe", "pipe", 8, 4, 1, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "frame", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "scanline", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "min", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "max", 24, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_pipe_update_end(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "enum pipe", "pipe", 8, 4, 1, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "frame", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "scanline", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_object_create(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "size", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_vma_bind(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "struct i915_address_space *", "vm", 16, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u64", "offset", 24, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "size", 32, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned", "flags", 36, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_vma_unbind(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "struct i915_address_space *", "vm", 16, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u64", "offset", 24, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "size", 32, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_va(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct i915_address_space *", "vm", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u64", "start", 16, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u64", "end", 24, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "__data_loc char[]", "name", 32, 4, 1, 0); return (ret); } } static int trace_event_define_fields_i915_page_table_entry(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct i915_address_space *", "vm", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "pde", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u64", "start", 24, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u64", "end", 32, 8, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_page_table_entry_update(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct i915_address_space *", "vm", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "pde", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "first", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "last", 24, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "__data_loc char[]", "cur_ptes", 28, 4, 1, 0); return (ret); } } static int trace_event_define_fields_i915_gem_object_change_domain(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "read_domains", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "write_domain", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_object_pwrite(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "offset", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "len", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_object_pread(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "offset", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "len", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_object_fault(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "index", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "bool", "gtt", 20, 1, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "bool", "write", 21, 1, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_object(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_evict(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "size", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "align", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned", "flags", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_evict_everything(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_evict_vm(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "struct i915_address_space *", "vm", 16, 8, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_ring_sync_to(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "sync_from", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "sync_to", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "seqno", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_ring_dispatch(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "ring", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "seqno", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "flags", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_ring_flush(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "ring", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "invalidate", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "flush", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_request(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "ring", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "seqno", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_request_notify(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "ring", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "seqno", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_gem_request_wait_begin(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "ring", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "seqno", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "bool", "blocking", 20, 1, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_flip_request(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "int", "plane", 8, 4, 1, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 16, 8, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_flip_complete(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "int", "plane", 8, 4, 1, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "struct drm_i915_gem_object *", "obj", 16, 8, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_reg_rw(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u64", "val", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "reg", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u16", "write", 20, 2, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u16", "len", 22, 2, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_intel_gpu_freq_change(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "freq", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_ppgtt(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "struct i915_address_space *", "vm", 8, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "dev", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_i915_context(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "dev", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "struct intel_context *", "ctx", 16, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "struct i915_address_space *", "vm", 24, 8, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } static int trace_event_define_fields_switch_mm(struct trace_event_call *event_call ) { int ret ; { ret = trace_define_field(event_call, "u32", "ring", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "struct intel_context *", "to", 16, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "struct i915_address_space *", "vm", 24, 8, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "u32", "dev", 32, 4, 0, 0); if (ret != 0) { return (ret); } else { } return (ret); } } void ldv_initialize_trace_event_class_77(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_ppgtt_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_103(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_pipe_update_vblank_evaded_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_93(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_object_pread_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_82(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_request_wait_begin_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_76(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_context_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_86(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_ring_dispatch_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_83(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_request_notify_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_94(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_object_pwrite_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_91(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_object_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_89(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_evict_everything_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_104(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_pipe_update_start_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_95(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_object_change_domain_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_85(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_ring_flush_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_88(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_evict_vm_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_96(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_page_table_entry_update_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_84(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_request_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_90(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_evict_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_101(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_object_create_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_97(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_page_table_entry_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_78(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_intel_gpu_freq_change_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_75(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_switch_mm_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_102(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_pipe_update_end_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_81(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_flip_request_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_98(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_va_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_87(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_ring_sync_to_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_80(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_flip_complete_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_99(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_vma_unbind_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_100(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_vma_bind_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_92(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_gem_object_fault_group0 = (struct trace_event_call *)tmp; return; } } void ldv_initialize_trace_event_class_79(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_i915_reg_rw_group0 = (struct trace_event_call *)tmp; return; } } void ldv_main_exported_127(void) { int ldvarg2 ; struct trace_iterator *ldvarg0 ; void *tmp ; struct trace_event *ldvarg1 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg0 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg1 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg2), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_127 == 1) { trace_raw_output_i915_page_table_entry(ldvarg0, ldvarg2, ldvarg1); ldv_state_variable_127 = 1; } else { } goto ldv_53845; default: ldv_stop(); } ldv_53845: ; return; } } void ldv_main_exported_90(void) { enum trace_reg ldvarg5 ; void *ldvarg4 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg4 = tmp; ldv_memset((void *)(& ldvarg5), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_90 == 1) { trace_event_reg(event_class_i915_gem_evict_group0, ldvarg5, ldvarg4); ldv_state_variable_90 = 1; } else { } goto ldv_53853; case 1: ; if (ldv_state_variable_90 == 1) { trace_event_raw_init(event_class_i915_gem_evict_group0); ldv_state_variable_90 = 1; } else { } goto ldv_53853; case 2: ; if (ldv_state_variable_90 == 1) { trace_event_define_fields_i915_gem_evict(event_class_i915_gem_evict_group0); ldv_state_variable_90 = 1; } else { } goto ldv_53853; default: ldv_stop(); } ldv_53853: ; return; } } void ldv_main_exported_118(void) { struct trace_iterator *ldvarg6 ; void *tmp ; struct trace_event *ldvarg7 ; void *tmp___0 ; int ldvarg8 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg6 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg7 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg8), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_118 == 1) { trace_raw_output_i915_gem_evict_vm(ldvarg6, ldvarg8, ldvarg7); ldv_state_variable_118 = 1; } else { } goto ldv_53864; default: ldv_stop(); } ldv_53864: ; return; } } void ldv_main_exported_102(void) { void *ldvarg19 ; void *tmp ; enum trace_reg ldvarg20 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg19 = tmp; ldv_memset((void *)(& ldvarg20), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_102 == 1) { trace_event_reg(event_class_i915_pipe_update_end_group0, ldvarg20, ldvarg19); ldv_state_variable_102 = 1; } else { } goto ldv_53872; case 1: ; if (ldv_state_variable_102 == 1) { trace_event_raw_init(event_class_i915_pipe_update_end_group0); ldv_state_variable_102 = 1; } else { } goto ldv_53872; case 2: ; if (ldv_state_variable_102 == 1) { trace_event_define_fields_i915_pipe_update_end(event_class_i915_pipe_update_end_group0); ldv_state_variable_102 = 1; } else { } goto ldv_53872; default: ldv_stop(); } ldv_53872: ; return; } } void ldv_main_exported_129(void) { struct trace_event *ldvarg137 ; void *tmp ; int ldvarg138 ; struct trace_iterator *ldvarg136 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg137 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg136 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg138), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_129 == 1) { trace_raw_output_i915_vma_unbind(ldvarg136, ldvarg138, ldvarg137); ldv_state_variable_129 = 1; } else { } goto ldv_53883; default: ldv_stop(); } ldv_53883: ; return; } } void ldv_main_exported_80(void) { void *ldvarg263 ; void *tmp ; enum trace_reg ldvarg264 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg263 = tmp; ldv_memset((void *)(& ldvarg264), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_80 == 1) { trace_event_reg(event_class_i915_flip_complete_group0, ldvarg264, ldvarg263); ldv_state_variable_80 = 1; } else { } goto ldv_53891; case 1: ; if (ldv_state_variable_80 == 1) { trace_event_raw_init(event_class_i915_flip_complete_group0); ldv_state_variable_80 = 1; } else { } goto ldv_53891; case 2: ; if (ldv_state_variable_80 == 1) { trace_event_define_fields_i915_flip_complete(event_class_i915_flip_complete_group0); ldv_state_variable_80 = 1; } else { } goto ldv_53891; default: ldv_stop(); } ldv_53891: ; return; } } void ldv_main_exported_119(void) { struct trace_iterator *ldvarg265 ; void *tmp ; int ldvarg267 ; struct trace_event *ldvarg266 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg265 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg266 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg267), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_119 == 1) { trace_raw_output_i915_gem_evict_everything(ldvarg265, ldvarg267, ldvarg266); ldv_state_variable_119 = 1; } else { } goto ldv_53902; default: ldv_stop(); } ldv_53902: ; return; } } void ldv_main_exported_99(void) { void *ldvarg274 ; void *tmp ; enum trace_reg ldvarg275 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg274 = tmp; ldv_memset((void *)(& ldvarg275), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_99 == 1) { trace_event_reg(event_class_i915_vma_unbind_group0, ldvarg275, ldvarg274); ldv_state_variable_99 = 1; } else { } goto ldv_53910; case 1: ; if (ldv_state_variable_99 == 1) { trace_event_raw_init(event_class_i915_vma_unbind_group0); ldv_state_variable_99 = 1; } else { } goto ldv_53910; case 2: ; if (ldv_state_variable_99 == 1) { trace_event_define_fields_i915_vma_unbind(event_class_i915_vma_unbind_group0); ldv_state_variable_99 = 1; } else { } goto ldv_53910; default: ldv_stop(); } ldv_53910: ; return; } } void ldv_main_exported_116(void) { int ldvarg434 ; struct trace_iterator *ldvarg435 ; void *tmp ; struct trace_event *ldvarg433 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg435 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg433 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg434), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_116 == 1) { trace_raw_output_i915_gem_ring_dispatch(ldvarg435, ldvarg434, ldvarg433); ldv_state_variable_116 = 1; } else { } goto ldv_53921; default: ldv_stop(); } ldv_53921: ; return; } } void ldv_main_exported_88(void) { enum trace_reg ldvarg432 ; void *ldvarg431 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg431 = tmp; ldv_memset((void *)(& ldvarg432), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_88 == 1) { trace_event_reg(event_class_i915_gem_evict_vm_group0, ldvarg432, ldvarg431); ldv_state_variable_88 = 1; } else { } goto ldv_53929; case 1: ; if (ldv_state_variable_88 == 1) { trace_event_raw_init(event_class_i915_gem_evict_vm_group0); ldv_state_variable_88 = 1; } else { } goto ldv_53929; case 2: ; if (ldv_state_variable_88 == 1) { trace_event_define_fields_i915_gem_evict_vm(event_class_i915_gem_evict_vm_group0); ldv_state_variable_88 = 1; } else { } goto ldv_53929; default: ldv_stop(); } ldv_53929: ; return; } } void ldv_main_exported_125(void) { struct trace_event *ldvarg22 ; void *tmp ; int ldvarg23 ; struct trace_iterator *ldvarg21 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg22 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg21 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg23), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_125 == 1) { trace_raw_output_i915_gem_object_change_domain(ldvarg21, ldvarg23, ldvarg22); ldv_state_variable_125 = 1; } else { } goto ldv_53940; default: ldv_stop(); } ldv_53940: ; return; } } void ldv_main_exported_100(void) { void *ldvarg466 ; void *tmp ; enum trace_reg ldvarg467 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg466 = tmp; ldv_memset((void *)(& ldvarg467), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_100 == 1) { trace_event_reg(event_class_i915_vma_bind_group0, ldvarg467, ldvarg466); ldv_state_variable_100 = 1; } else { } goto ldv_53948; case 1: ; if (ldv_state_variable_100 == 1) { trace_event_raw_init(event_class_i915_vma_bind_group0); ldv_state_variable_100 = 1; } else { } goto ldv_53948; case 2: ; if (ldv_state_variable_100 == 1) { trace_event_define_fields_i915_vma_bind(event_class_i915_vma_bind_group0); ldv_state_variable_100 = 1; } else { } goto ldv_53948; default: ldv_stop(); } ldv_53948: ; return; } } void ldv_main_exported_110(void) { struct trace_iterator *ldvarg142 ; void *tmp ; int ldvarg144 ; struct trace_event *ldvarg143 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg142 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg143 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg144), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_110 == 1) { trace_raw_output_i915_flip_complete(ldvarg142, ldvarg144, ldvarg143); ldv_state_variable_110 = 1; } else { } goto ldv_53959; default: ldv_stop(); } ldv_53959: ; return; } } void ldv_main_exported_82(void) { enum trace_reg ldvarg146 ; void *ldvarg145 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg145 = tmp; ldv_memset((void *)(& ldvarg146), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_82 == 1) { trace_event_reg(event_class_i915_gem_request_wait_begin_group0, ldvarg146, ldvarg145); ldv_state_variable_82 = 1; } else { } goto ldv_53967; case 1: ; if (ldv_state_variable_82 == 1) { trace_event_raw_init(event_class_i915_gem_request_wait_begin_group0); ldv_state_variable_82 = 1; } else { } goto ldv_53967; case 2: ; if (ldv_state_variable_82 == 1) { trace_event_define_fields_i915_gem_request_wait_begin(event_class_i915_gem_request_wait_begin_group0); ldv_state_variable_82 = 1; } else { } goto ldv_53967; default: ldv_stop(); } ldv_53967: ; return; } } void ldv_main_exported_128(void) { int ldvarg470 ; struct trace_iterator *ldvarg471 ; void *tmp ; struct trace_event *ldvarg469 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg471 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg469 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg470), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_128 == 1) { trace_raw_output_i915_va(ldvarg471, ldvarg470, ldvarg469); ldv_state_variable_128 = 1; } else { } goto ldv_53978; default: ldv_stop(); } ldv_53978: ; return; } } void ldv_main_exported_84(void) { void *ldvarg26 ; void *tmp ; enum trace_reg ldvarg27 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg26 = tmp; ldv_memset((void *)(& ldvarg27), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_84 == 1) { trace_event_reg(event_class_i915_gem_request_group0, ldvarg27, ldvarg26); ldv_state_variable_84 = 1; } else { } goto ldv_53986; case 1: ; if (ldv_state_variable_84 == 1) { trace_event_raw_init(event_class_i915_gem_request_group0); ldv_state_variable_84 = 1; } else { } goto ldv_53986; case 2: ; if (ldv_state_variable_84 == 1) { trace_event_define_fields_i915_gem_request(event_class_i915_gem_request_group0); ldv_state_variable_84 = 1; } else { } goto ldv_53986; default: ldv_stop(); } ldv_53986: ; return; } } void ldv_main_exported_120(void) { struct trace_event *ldvarg473 ; void *tmp ; struct trace_iterator *ldvarg475 ; void *tmp___0 ; int ldvarg474 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg473 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg475 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg474), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_120 == 1) { trace_raw_output_i915_gem_evict(ldvarg475, ldvarg474, ldvarg473); ldv_state_variable_120 = 1; } else { } goto ldv_53997; default: ldv_stop(); } ldv_53997: ; return; } } void ldv_main_exported_134(void) { struct trace_iterator *ldvarg483 ; void *tmp ; int ldvarg482 ; struct trace_event *ldvarg481 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg483 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg481 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg482), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_134 == 1) { trace_raw_output_i915_pipe_update_start(ldvarg483, ldvarg482, ldvarg481); ldv_state_variable_134 = 1; } else { } goto ldv_54006; default: ldv_stop(); } ldv_54006: ; return; } } void ldv_main_exported_95(void) { enum trace_reg ldvarg40 ; void *ldvarg39 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg39 = tmp; ldv_memset((void *)(& ldvarg40), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_95 == 1) { trace_event_reg(event_class_i915_gem_object_change_domain_group0, ldvarg40, ldvarg39); ldv_state_variable_95 = 1; } else { } goto ldv_54014; case 1: ; if (ldv_state_variable_95 == 1) { trace_event_raw_init(event_class_i915_gem_object_change_domain_group0); ldv_state_variable_95 = 1; } else { } goto ldv_54014; case 2: ; if (ldv_state_variable_95 == 1) { trace_event_define_fields_i915_gem_object_change_domain(event_class_i915_gem_object_change_domain_group0); ldv_state_variable_95 = 1; } else { } goto ldv_54014; default: ldv_stop(); } ldv_54014: ; return; } } void ldv_main_exported_75(void) { enum trace_reg ldvarg486 ; void *ldvarg485 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg485 = tmp; ldv_memset((void *)(& ldvarg486), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_75 == 1) { trace_event_reg(event_class_switch_mm_group0, ldvarg486, ldvarg485); ldv_state_variable_75 = 1; } else { } goto ldv_54024; case 1: ; if (ldv_state_variable_75 == 1) { trace_event_raw_init(event_class_switch_mm_group0); ldv_state_variable_75 = 1; } else { } goto ldv_54024; case 2: ; if (ldv_state_variable_75 == 1) { trace_event_define_fields_switch_mm(event_class_switch_mm_group0); ldv_state_variable_75 = 1; } else { } goto ldv_54024; default: ldv_stop(); } ldv_54024: ; return; } } void ldv_main_exported_83(void) { void *ldvarg487 ; void *tmp ; enum trace_reg ldvarg488 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg487 = tmp; ldv_memset((void *)(& ldvarg488), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_83 == 1) { trace_event_reg(event_class_i915_gem_request_notify_group0, ldvarg488, ldvarg487); ldv_state_variable_83 = 1; } else { } goto ldv_54034; case 1: ; if (ldv_state_variable_83 == 1) { trace_event_raw_init(event_class_i915_gem_request_notify_group0); ldv_state_variable_83 = 1; } else { } goto ldv_54034; case 2: ; if (ldv_state_variable_83 == 1) { trace_event_define_fields_i915_gem_request_notify(event_class_i915_gem_request_notify_group0); ldv_state_variable_83 = 1; } else { } goto ldv_54034; default: ldv_stop(); } ldv_54034: ; return; } } void ldv_main_exported_108(void) { struct trace_iterator *ldvarg298 ; void *tmp ; int ldvarg300 ; struct trace_event *ldvarg299 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg298 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg299 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg300), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_108 == 1) { trace_raw_output_intel_gpu_freq_change(ldvarg298, ldvarg300, ldvarg299); ldv_state_variable_108 = 1; } else { } goto ldv_54045; default: ldv_stop(); } ldv_54045: ; return; } } void ldv_main_exported_115(void) { struct trace_iterator *ldvarg301 ; void *tmp ; struct trace_event *ldvarg302 ; void *tmp___0 ; int ldvarg303 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg301 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg302 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg303), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_115 == 1) { trace_raw_output_i915_gem_ring_flush(ldvarg301, ldvarg303, ldvarg302); ldv_state_variable_115 = 1; } else { } goto ldv_54054; default: ldv_stop(); } ldv_54054: ; return; } } void ldv_main_exported_112(void) { struct trace_iterator *ldvarg158 ; void *tmp ; int ldvarg160 ; struct trace_event *ldvarg159 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg158 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg159 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg160), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_112 == 1) { trace_raw_output_i915_gem_request_wait_begin(ldvarg158, ldvarg160, ldvarg159); ldv_state_variable_112 = 1; } else { } goto ldv_54063; default: ldv_stop(); } ldv_54063: ; return; } } void ldv_main_exported_109(void) { struct trace_event *ldvarg50 ; void *tmp ; struct trace_iterator *ldvarg49 ; void *tmp___0 ; int ldvarg51 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg50 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg49 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg51), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_109 == 1) { trace_raw_output_i915_reg_rw(ldvarg49, ldvarg51, ldvarg50); ldv_state_variable_109 = 1; } else { } goto ldv_54072; default: ldv_stop(); } ldv_54072: ; return; } } void ldv_main_exported_92(void) { void *ldvarg304 ; void *tmp ; enum trace_reg ldvarg305 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg304 = tmp; ldv_memset((void *)(& ldvarg305), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_92 == 1) { trace_event_reg(event_class_i915_gem_object_fault_group0, ldvarg305, ldvarg304); ldv_state_variable_92 = 1; } else { } goto ldv_54080; case 1: ; if (ldv_state_variable_92 == 1) { trace_event_raw_init(event_class_i915_gem_object_fault_group0); ldv_state_variable_92 = 1; } else { } goto ldv_54080; case 2: ; if (ldv_state_variable_92 == 1) { trace_event_define_fields_i915_gem_object_fault(event_class_i915_gem_object_fault_group0); ldv_state_variable_92 = 1; } else { } goto ldv_54080; default: ldv_stop(); } ldv_54080: ; return; } } void ldv_main_exported_103(void) { void *ldvarg306 ; void *tmp ; enum trace_reg ldvarg307 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg306 = tmp; ldv_memset((void *)(& ldvarg307), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_103 == 1) { trace_event_reg(event_class_i915_pipe_update_vblank_evaded_group0, ldvarg307, ldvarg306); ldv_state_variable_103 = 1; } else { } goto ldv_54090; case 1: ; if (ldv_state_variable_103 == 1) { trace_event_raw_init(event_class_i915_pipe_update_vblank_evaded_group0); ldv_state_variable_103 = 1; } else { } goto ldv_54090; case 2: ; if (ldv_state_variable_103 == 1) { trace_event_define_fields_i915_pipe_update_vblank_evaded(event_class_i915_pipe_update_vblank_evaded_group0); ldv_state_variable_103 = 1; } else { } goto ldv_54090; default: ldv_stop(); } ldv_54090: ; return; } } void ldv_main_exported_89(void) { void *ldvarg60 ; void *tmp ; enum trace_reg ldvarg61 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg60 = tmp; ldv_memset((void *)(& ldvarg61), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_89 == 1) { trace_event_reg(event_class_i915_gem_evict_everything_group0, ldvarg61, ldvarg60); ldv_state_variable_89 = 1; } else { } goto ldv_54100; case 1: ; if (ldv_state_variable_89 == 1) { trace_event_raw_init(event_class_i915_gem_evict_everything_group0); ldv_state_variable_89 = 1; } else { } goto ldv_54100; case 2: ; if (ldv_state_variable_89 == 1) { trace_event_define_fields_i915_gem_evict_everything(event_class_i915_gem_evict_everything_group0); ldv_state_variable_89 = 1; } else { } goto ldv_54100; default: ldv_stop(); } ldv_54100: ; return; } } void ldv_main_exported_113(void) { struct trace_event *ldvarg309 ; void *tmp ; struct trace_iterator *ldvarg308 ; void *tmp___0 ; int ldvarg310 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg309 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg308 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg310), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_113 == 1) { trace_raw_output_i915_gem_request_notify(ldvarg308, ldvarg310, ldvarg309); ldv_state_variable_113 = 1; } else { } goto ldv_54111; default: ldv_stop(); } ldv_54111: ; return; } } void ldv_main_exported_124(void) { int ldvarg182 ; struct trace_iterator *ldvarg180 ; void *tmp ; struct trace_event *ldvarg181 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg180 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg181 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg182), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_124 == 1) { trace_raw_output_i915_gem_object_pwrite(ldvarg180, ldvarg182, ldvarg181); ldv_state_variable_124 = 1; } else { } goto ldv_54120; default: ldv_stop(); } ldv_54120: ; return; } } void ldv_main_exported_104(void) { enum trace_reg ldvarg184 ; void *ldvarg183 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg183 = tmp; ldv_memset((void *)(& ldvarg184), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_104 == 1) { trace_event_reg(event_class_i915_pipe_update_start_group0, ldvarg184, ldvarg183); ldv_state_variable_104 = 1; } else { } goto ldv_54128; case 1: ; if (ldv_state_variable_104 == 1) { trace_event_raw_init(event_class_i915_pipe_update_start_group0); ldv_state_variable_104 = 1; } else { } goto ldv_54128; case 2: ; if (ldv_state_variable_104 == 1) { trace_event_define_fields_i915_pipe_update_start(event_class_i915_pipe_update_start_group0); ldv_state_variable_104 = 1; } else { } goto ldv_54128; default: ldv_stop(); } ldv_54128: ; return; } } void ldv_main_exported_131(void) { struct trace_event *ldvarg186 ; void *tmp ; struct trace_iterator *ldvarg185 ; void *tmp___0 ; int ldvarg187 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg186 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg185 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg187), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_131 == 1) { trace_raw_output_i915_gem_object_create(ldvarg185, ldvarg187, ldvarg186); ldv_state_variable_131 = 1; } else { } goto ldv_54139; default: ldv_stop(); } ldv_54139: ; return; } } void ldv_main_exported_130(void) { struct trace_iterator *ldvarg510 ; void *tmp ; struct trace_event *ldvarg508 ; void *tmp___0 ; int ldvarg509 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg510 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg508 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg509), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_130 == 1) { trace_raw_output_i915_vma_bind(ldvarg510, ldvarg509, ldvarg508); ldv_state_variable_130 = 1; } else { } goto ldv_54148; default: ldv_stop(); } ldv_54148: ; return; } } void ldv_main_exported_122(void) { struct trace_event *ldvarg519 ; void *tmp ; int ldvarg520 ; struct trace_iterator *ldvarg521 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg519 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg521 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg520), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_122 == 1) { trace_raw_output_i915_gem_object_fault(ldvarg521, ldvarg520, ldvarg519); ldv_state_variable_122 = 1; } else { } goto ldv_54157; default: ldv_stop(); } ldv_54157: ; return; } } void ldv_main_exported_91(void) { void *ldvarg335 ; void *tmp ; enum trace_reg ldvarg336 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg335 = tmp; ldv_memset((void *)(& ldvarg336), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_91 == 1) { trace_event_reg(event_class_i915_gem_object_group0, ldvarg336, ldvarg335); ldv_state_variable_91 = 1; } else { } goto ldv_54165; case 1: ; if (ldv_state_variable_91 == 1) { trace_event_raw_init(event_class_i915_gem_object_group0); ldv_state_variable_91 = 1; } else { } goto ldv_54165; case 2: ; if (ldv_state_variable_91 == 1) { trace_event_define_fields_i915_gem_object(event_class_i915_gem_object_group0); ldv_state_variable_91 = 1; } else { } goto ldv_54165; default: ldv_stop(); } ldv_54165: ; return; } } void ldv_main_exported_78(void) { void *ldvarg75 ; void *tmp ; enum trace_reg ldvarg76 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg75 = tmp; ldv_memset((void *)(& ldvarg76), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_78 == 1) { trace_event_reg(event_class_intel_gpu_freq_change_group0, ldvarg76, ldvarg75); ldv_state_variable_78 = 1; } else { } goto ldv_54175; case 1: ; if (ldv_state_variable_78 == 1) { trace_event_raw_init(event_class_intel_gpu_freq_change_group0); ldv_state_variable_78 = 1; } else { } goto ldv_54175; case 2: ; if (ldv_state_variable_78 == 1) { trace_event_define_fields_intel_gpu_freq_change(event_class_intel_gpu_freq_change_group0); ldv_state_variable_78 = 1; } else { } goto ldv_54175; default: ldv_stop(); } ldv_54175: ; return; } } void ldv_main_exported_121(void) { struct trace_iterator *ldvarg194 ; void *tmp ; int ldvarg196 ; struct trace_event *ldvarg195 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg194 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg195 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg196), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_121 == 1) { trace_raw_output_i915_gem_object(ldvarg194, ldvarg196, ldvarg195); ldv_state_variable_121 = 1; } else { } goto ldv_54186; default: ldv_stop(); } ldv_54186: ; return; } } void ldv_main_exported_107(void) { struct trace_event *ldvarg339 ; void *tmp ; int ldvarg340 ; struct trace_iterator *ldvarg338 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg339 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg338 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg340), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_107 == 1) { trace_raw_output_i915_ppgtt(ldvarg338, ldvarg340, ldvarg339); ldv_state_variable_107 = 1; } else { } goto ldv_54195; default: ldv_stop(); } ldv_54195: ; return; } } void ldv_main_exported_79(void) { enum trace_reg ldvarg198 ; void *ldvarg197 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg197 = tmp; ldv_memset((void *)(& ldvarg198), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_79 == 1) { trace_event_reg(event_class_i915_reg_rw_group0, ldvarg198, ldvarg197); ldv_state_variable_79 = 1; } else { } goto ldv_54203; case 1: ; if (ldv_state_variable_79 == 1) { trace_event_raw_init(event_class_i915_reg_rw_group0); ldv_state_variable_79 = 1; } else { } goto ldv_54203; case 2: ; if (ldv_state_variable_79 == 1) { trace_event_define_fields_i915_reg_rw(event_class_i915_reg_rw_group0); ldv_state_variable_79 = 1; } else { } goto ldv_54203; default: ldv_stop(); } ldv_54203: ; return; } } void ldv_main_exported_87(void) { enum trace_reg ldvarg342 ; void *ldvarg341 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg341 = tmp; ldv_memset((void *)(& ldvarg342), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_87 == 1) { trace_event_reg(event_class_i915_gem_ring_sync_to_group0, ldvarg342, ldvarg341); ldv_state_variable_87 = 1; } else { } goto ldv_54213; case 1: ; if (ldv_state_variable_87 == 1) { trace_event_raw_init(event_class_i915_gem_ring_sync_to_group0); ldv_state_variable_87 = 1; } else { } goto ldv_54213; case 2: ; if (ldv_state_variable_87 == 1) { trace_event_define_fields_i915_gem_ring_sync_to(event_class_i915_gem_ring_sync_to_group0); ldv_state_variable_87 = 1; } else { } goto ldv_54213; default: ldv_stop(); } ldv_54213: ; return; } } void ldv_main_exported_77(void) { enum trace_reg ldvarg347 ; void *ldvarg346 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg346 = tmp; ldv_memset((void *)(& ldvarg347), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_77 == 1) { trace_event_reg(event_class_i915_ppgtt_group0, ldvarg347, ldvarg346); ldv_state_variable_77 = 1; } else { } goto ldv_54223; case 1: ; if (ldv_state_variable_77 == 1) { trace_event_raw_init(event_class_i915_ppgtt_group0); ldv_state_variable_77 = 1; } else { } goto ldv_54223; case 2: ; if (ldv_state_variable_77 == 1) { trace_event_define_fields_i915_ppgtt(event_class_i915_ppgtt_group0); ldv_state_variable_77 = 1; } else { } goto ldv_54223; default: ldv_stop(); } ldv_54223: ; return; } } void ldv_main_exported_93(void) { enum trace_reg ldvarg78 ; void *ldvarg77 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg77 = tmp; ldv_memset((void *)(& ldvarg78), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_93 == 1) { trace_event_reg(event_class_i915_gem_object_pread_group0, ldvarg78, ldvarg77); ldv_state_variable_93 = 1; } else { } goto ldv_54233; case 1: ; if (ldv_state_variable_93 == 1) { trace_event_raw_init(event_class_i915_gem_object_pread_group0); ldv_state_variable_93 = 1; } else { } goto ldv_54233; case 2: ; if (ldv_state_variable_93 == 1) { trace_event_define_fields_i915_gem_object_pread(event_class_i915_gem_object_pread_group0); ldv_state_variable_93 = 1; } else { } goto ldv_54233; default: ldv_stop(); } ldv_54233: ; return; } } void ldv_main_exported_106(void) { struct trace_event *ldvarg80 ; void *tmp ; struct trace_iterator *ldvarg79 ; void *tmp___0 ; int ldvarg81 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg80 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg79 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg81), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_106 == 1) { trace_raw_output_i915_context(ldvarg79, ldvarg81, ldvarg80); ldv_state_variable_106 = 1; } else { } goto ldv_54244; default: ldv_stop(); } ldv_54244: ; return; } } void ldv_main_exported_133(void) { int ldvarg350 ; struct trace_iterator *ldvarg348 ; void *tmp ; struct trace_event *ldvarg349 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg348 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg349 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg350), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_133 == 1) { trace_raw_output_i915_pipe_update_vblank_evaded(ldvarg348, ldvarg350, ldvarg349); ldv_state_variable_133 = 1; } else { } goto ldv_54253; default: ldv_stop(); } ldv_54253: ; return; } } void ldv_main_exported_96(void) { void *ldvarg208 ; void *tmp ; enum trace_reg ldvarg209 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg208 = tmp; ldv_memset((void *)(& ldvarg209), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_96 == 1) { trace_event_reg(event_class_i915_page_table_entry_update_group0, ldvarg209, ldvarg208); ldv_state_variable_96 = 1; } else { } goto ldv_54261; case 1: ; if (ldv_state_variable_96 == 1) { trace_event_raw_init(event_class_i915_page_table_entry_update_group0); ldv_state_variable_96 = 1; } else { } goto ldv_54261; case 2: ; if (ldv_state_variable_96 == 1) { trace_event_define_fields_i915_page_table_entry_update(event_class_i915_page_table_entry_update_group0); ldv_state_variable_96 = 1; } else { } goto ldv_54261; default: ldv_stop(); } ldv_54261: ; return; } } void ldv_main_exported_105(void) { int ldvarg532 ; struct trace_iterator *ldvarg533 ; void *tmp ; struct trace_event *ldvarg531 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg533 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg531 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg532), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_105 == 1) { trace_raw_output_switch_mm(ldvarg533, ldvarg532, ldvarg531); ldv_state_variable_105 = 1; } else { } goto ldv_54272; default: ldv_stop(); } ldv_54272: ; return; } } void ldv_main_exported_126(void) { struct trace_iterator *ldvarg210 ; void *tmp ; int ldvarg212 ; struct trace_event *ldvarg211 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg210 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg211 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg212), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_126 == 1) { trace_raw_output_i915_page_table_entry_update(ldvarg210, ldvarg212, ldvarg211); ldv_state_variable_126 = 1; } else { } goto ldv_54281; default: ldv_stop(); } ldv_54281: ; return; } } void ldv_main_exported_123(void) { int ldvarg361 ; struct trace_iterator *ldvarg359 ; void *tmp ; struct trace_event *ldvarg360 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg359 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg360 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg361), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_123 == 1) { trace_raw_output_i915_gem_object_pread(ldvarg359, ldvarg361, ldvarg360); ldv_state_variable_123 = 1; } else { } goto ldv_54290; default: ldv_stop(); } ldv_54290: ; return; } } void ldv_main_exported_85(void) { enum trace_reg ldvarg535 ; void *ldvarg534 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg534 = tmp; ldv_memset((void *)(& ldvarg535), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_85 == 1) { trace_event_reg(event_class_i915_gem_ring_flush_group0, ldvarg535, ldvarg534); ldv_state_variable_85 = 1; } else { } goto ldv_54298; case 1: ; if (ldv_state_variable_85 == 1) { trace_event_raw_init(event_class_i915_gem_ring_flush_group0); ldv_state_variable_85 = 1; } else { } goto ldv_54298; case 2: ; if (ldv_state_variable_85 == 1) { trace_event_define_fields_i915_gem_ring_flush(event_class_i915_gem_ring_flush_group0); ldv_state_variable_85 = 1; } else { } goto ldv_54298; default: ldv_stop(); } ldv_54298: ; return; } } void ldv_main_exported_94(void) { enum trace_reg ldvarg544 ; void *ldvarg543 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg543 = tmp; ldv_memset((void *)(& ldvarg544), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_94 == 1) { trace_event_reg(event_class_i915_gem_object_pwrite_group0, ldvarg544, ldvarg543); ldv_state_variable_94 = 1; } else { } goto ldv_54308; case 1: ; if (ldv_state_variable_94 == 1) { trace_event_raw_init(event_class_i915_gem_object_pwrite_group0); ldv_state_variable_94 = 1; } else { } goto ldv_54308; case 2: ; if (ldv_state_variable_94 == 1) { trace_event_define_fields_i915_gem_object_pwrite(event_class_i915_gem_object_pwrite_group0); ldv_state_variable_94 = 1; } else { } goto ldv_54308; default: ldv_stop(); } ldv_54308: ; return; } } void ldv_main_exported_97(void) { void *ldvarg374 ; void *tmp ; enum trace_reg ldvarg375 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg374 = tmp; ldv_memset((void *)(& ldvarg375), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_97 == 1) { trace_event_reg(event_class_i915_page_table_entry_group0, ldvarg375, ldvarg374); ldv_state_variable_97 = 1; } else { } goto ldv_54318; case 1: ; if (ldv_state_variable_97 == 1) { trace_event_raw_init(event_class_i915_page_table_entry_group0); ldv_state_variable_97 = 1; } else { } goto ldv_54318; case 2: ; if (ldv_state_variable_97 == 1) { trace_event_define_fields_i915_page_table_entry(event_class_i915_page_table_entry_group0); ldv_state_variable_97 = 1; } else { } goto ldv_54318; default: ldv_stop(); } ldv_54318: ; return; } } void ldv_main_exported_114(void) { struct trace_event *ldvarg107 ; void *tmp ; struct trace_iterator *ldvarg106 ; void *tmp___0 ; int ldvarg108 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg107 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg106 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg108), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_114 == 1) { trace_raw_output_i915_gem_request(ldvarg106, ldvarg108, ldvarg107); ldv_state_variable_114 = 1; } else { } goto ldv_54329; default: ldv_stop(); } ldv_54329: ; return; } } void ldv_main_exported_111(void) { int ldvarg554 ; struct trace_event *ldvarg553 ; void *tmp ; struct trace_iterator *ldvarg555 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg553 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg555 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg554), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_111 == 1) { trace_raw_output_i915_flip_request(ldvarg555, ldvarg554, ldvarg553); ldv_state_variable_111 = 1; } else { } goto ldv_54338; default: ldv_stop(); } ldv_54338: ; return; } } void ldv_main_exported_81(void) { enum trace_reg ldvarg118 ; void *ldvarg117 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg117 = tmp; ldv_memset((void *)(& ldvarg118), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_81 == 1) { trace_event_reg(event_class_i915_flip_request_group0, ldvarg118, ldvarg117); ldv_state_variable_81 = 1; } else { } goto ldv_54346; case 1: ; if (ldv_state_variable_81 == 1) { trace_event_raw_init(event_class_i915_flip_request_group0); ldv_state_variable_81 = 1; } else { } goto ldv_54346; case 2: ; if (ldv_state_variable_81 == 1) { trace_event_define_fields_i915_flip_request(event_class_i915_flip_request_group0); ldv_state_variable_81 = 1; } else { } goto ldv_54346; default: ldv_stop(); } ldv_54346: ; return; } } void ldv_main_exported_98(void) { enum trace_reg ldvarg231 ; void *ldvarg230 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg230 = tmp; ldv_memset((void *)(& ldvarg231), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_98 == 1) { trace_event_reg(event_class_i915_va_group0, ldvarg231, ldvarg230); ldv_state_variable_98 = 1; } else { } goto ldv_54356; case 1: ; if (ldv_state_variable_98 == 1) { trace_event_raw_init(event_class_i915_va_group0); ldv_state_variable_98 = 1; } else { } goto ldv_54356; case 2: ; if (ldv_state_variable_98 == 1) { trace_event_define_fields_i915_va(event_class_i915_va_group0); ldv_state_variable_98 = 1; } else { } goto ldv_54356; default: ldv_stop(); } ldv_54356: ; return; } } void ldv_main_exported_132(void) { struct trace_iterator *ldvarg567 ; void *tmp ; struct trace_event *ldvarg565 ; void *tmp___0 ; int ldvarg566 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg567 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg565 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg566), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_132 == 1) { trace_raw_output_i915_pipe_update_end(ldvarg567, ldvarg566, ldvarg565); ldv_state_variable_132 = 1; } else { } goto ldv_54367; default: ldv_stop(); } ldv_54367: ; return; } } void ldv_main_exported_101(void) { void *ldvarg122 ; void *tmp ; enum trace_reg ldvarg123 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg122 = tmp; ldv_memset((void *)(& ldvarg123), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_101 == 1) { trace_event_reg(event_class_i915_gem_object_create_group0, ldvarg123, ldvarg122); ldv_state_variable_101 = 1; } else { } goto ldv_54375; case 1: ; if (ldv_state_variable_101 == 1) { trace_event_raw_init(event_class_i915_gem_object_create_group0); ldv_state_variable_101 = 1; } else { } goto ldv_54375; case 2: ; if (ldv_state_variable_101 == 1) { trace_event_define_fields_i915_gem_object_create(event_class_i915_gem_object_create_group0); ldv_state_variable_101 = 1; } else { } goto ldv_54375; default: ldv_stop(); } ldv_54375: ; return; } } void ldv_main_exported_117(void) { int ldvarg235 ; struct trace_iterator *ldvarg233 ; void *tmp ; struct trace_event *ldvarg234 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(8560UL); ldvarg233 = (struct trace_iterator *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg234 = (struct trace_event *)tmp___0; ldv_memset((void *)(& ldvarg235), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_117 == 1) { trace_raw_output_i915_gem_ring_sync_to(ldvarg233, ldvarg235, ldvarg234); ldv_state_variable_117 = 1; } else { } goto ldv_54386; default: ldv_stop(); } ldv_54386: ; return; } } void ldv_main_exported_86(void) { void *ldvarg126 ; void *tmp ; enum trace_reg ldvarg127 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg126 = tmp; ldv_memset((void *)(& ldvarg127), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_86 == 1) { trace_event_reg(event_class_i915_gem_ring_dispatch_group0, ldvarg127, ldvarg126); ldv_state_variable_86 = 1; } else { } goto ldv_54394; case 1: ; if (ldv_state_variable_86 == 1) { trace_event_raw_init(event_class_i915_gem_ring_dispatch_group0); ldv_state_variable_86 = 1; } else { } goto ldv_54394; case 2: ; if (ldv_state_variable_86 == 1) { trace_event_define_fields_i915_gem_ring_dispatch(event_class_i915_gem_ring_dispatch_group0); ldv_state_variable_86 = 1; } else { } goto ldv_54394; default: ldv_stop(); } ldv_54394: ; return; } } void ldv_main_exported_76(void) { void *ldvarg128 ; void *tmp ; enum trace_reg ldvarg129 ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg128 = tmp; ldv_memset((void *)(& ldvarg129), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_76 == 1) { trace_event_reg(event_class_i915_context_group0, ldvarg129, ldvarg128); ldv_state_variable_76 = 1; } else { } goto ldv_54404; case 1: ; if (ldv_state_variable_76 == 1) { trace_event_raw_init(event_class_i915_context_group0); ldv_state_variable_76 = 1; } else { } goto ldv_54404; case 2: ; if (ldv_state_variable_76 == 1) { trace_event_define_fields_i915_context(event_class_i915_context_group0); ldv_state_variable_76 = 1; } else { } goto ldv_54404; default: ldv_stop(); } ldv_54404: ; return; } } bool ldv_queue_work_on_401(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_402(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_403(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_404(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_405(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void list_replace(struct list_head *old , struct list_head *new ) { { new->next = old->next; (new->next)->prev = new; new->prev = old->prev; (new->prev)->next = new; return; } } __inline static void list_replace_init(struct list_head *old , struct list_head *new ) { { list_replace(old, new); INIT_LIST_HEAD(old); return; } } __inline static unsigned long arch_local_save_flags___10(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static bool static_key_false___7(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int rcu_read_lock_sched_held___7(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___10(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } bool ldv_queue_work_on_415(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_417(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_416(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_419(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_418(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void kref_get___9(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___11(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___11(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___11(kref, 1U, release); return (tmp); } } __inline static u32 intel_read_status_page(struct intel_engine_cs *ring , int reg ) { { __asm__ volatile ("": : : "memory"); return (*(ring->status_page.page_addr + (unsigned long )reg)); } } __inline static void intel_write_status_page(struct intel_engine_cs *ring , int reg , u32 value ) { { *(ring->status_page.page_addr + (unsigned long )reg) = value; return; } } void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf ) ; int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev , struct intel_ringbuffer *ringbuf ) ; void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf ) ; int intel_alloc_ringbuffer_obj(struct drm_device *dev , struct intel_ringbuffer *ringbuf ) ; int __intel_ring_space(int head , int tail , int size ) ; void intel_ring_update_space(struct intel_ringbuffer *ringbuf ) ; int intel_ring_space(struct intel_ringbuffer *ringbuf ) ; bool intel_ring_stopped(struct intel_engine_cs *ring ) ; void intel_fini_pipe_control(struct intel_engine_cs *ring ) ; int intel_init_pipe_control(struct intel_engine_cs *ring ) ; int init_workarounds_ring(struct intel_engine_cs *ring ) ; __inline static void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf ) { { ringbuf->tail = ringbuf->tail & (u32 )(ringbuf->size + -1); return; } } __inline static void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf , u32 data ) { { iowrite32(data, ringbuf->virtual_start + (unsigned long )ringbuf->tail); ringbuf->tail = ringbuf->tail + 4U; return; } } __inline static void drm_gem_object_unreference___10(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___11(& obj->refcount, & drm_gem_object_free); } else { } return; } } __inline static struct drm_i915_gem_request *i915_gem_request_reference___2(struct drm_i915_gem_request *req ) { { if ((unsigned long )req != (unsigned long )((struct drm_i915_gem_request *)0)) { kref_get___9(& req->ref); } else { } return (req); } } __inline static void i915_gem_request_unreference___1(struct drm_i915_gem_request *req ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = mutex_is_locked(& ((req->ring)->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2216, "WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); kref_put___11(& req->ref, & i915_gem_request_free); return; } } __inline static void i915_gem_request_assign___1(struct drm_i915_gem_request **pdst , struct drm_i915_gem_request *src ) { { if ((unsigned long )src != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_reference___2(src); } else { } if ((unsigned long )*pdst != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_unreference___1(*pdst); } else { } *pdst = src; return; } } __inline static void trace_i915_gem_ring_dispatch___0(struct drm_i915_gem_request *req , u32 flags ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_372___0 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_374___0 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___7(& __tracepoint_i915_gem_ring_dispatch.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_ring_dispatch.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___7(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 475, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46139: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct drm_i915_gem_request * , u32 ))it_func))(__data, req, flags); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46139; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_ring_dispatch.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___7(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 475, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) ; void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) ; __inline static void i915_gem_context_reference___2(struct intel_context *ctx ) { { kref_get___9(& ctx->ref); return; } } static int intel_lr_context_pin(struct intel_engine_cs *ring , struct intel_context *ctx ) ; int intel_sanitize_enable_execlists(struct drm_device *dev , int enable_execlists ) { int __ret_warn_on ; long tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __ret_warn_on = i915.enable_ppgtt == -1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 230, "WARN_ON(i915.enable_ppgtt == -1)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { return (1); } else { } if (enable_execlists == 0) { return (0); } else { } __p___0 = to_i915((struct drm_device const *)dev); if (((unsigned int )((unsigned char )__p___0->info.gen) > 7U && i915.enable_ppgtt != 0) && i915.use_mmio_flip >= 0) { return (1); } else { } return (0); } } u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj ) { u32 lrca ; unsigned long tmp ; { tmp = i915_gem_obj_ggtt_offset(ctx_obj); lrca = (u32 )tmp; return (lrca >> 12); } } static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring , struct drm_i915_gem_object *ctx_obj ) { struct drm_device *dev ; uint64_t desc ; uint64_t lrca ; unsigned long tmp ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p ; u32 tmp___1 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = ring->dev; tmp = i915_gem_obj_ggtt_offset(ctx_obj); lrca = (uint64_t )tmp; __ret_warn_on = (lrca & 0xffffffff00000fffULL) != 0ULL; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 273, "WARN_ON(lrca & 0xFFFFFFFF00000FFFULL)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); desc = 1ULL; desc = desc | 8ULL; __p = to_i915((struct drm_device const *)ctx_obj->base.dev); if ((unsigned int )((unsigned char )__p->info.gen) == 8U) { desc = desc | 32ULL; } else { } desc = desc | 256ULL; desc = desc | lrca; tmp___1 = intel_execlists_ctx_id(ctx_obj); desc = ((unsigned long long )tmp___1 << 32) | desc; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___1->dev)->pdev)->revision <= 1U) { if ((((unsigned int )ring->id == 2U || (unsigned int )ring->id == 1U) || (unsigned int )ring->id == 3U) || (unsigned int )ring->id == 4U) { desc = desc | 4ULL; } else { } } else { } } else { } return (desc); } } static void execlists_elsp_write(struct intel_engine_cs *ring , struct drm_i915_gem_object *ctx_obj0 , struct drm_i915_gem_object *ctx_obj1 ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint64_t temp ; uint32_t desc[4U] ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; temp = 0ULL; if ((unsigned long )ctx_obj1 != (unsigned long )((struct drm_i915_gem_object *)0)) { temp = execlists_ctx_descriptor(ring, ctx_obj1); } else { temp = 0ULL; } desc[1] = (unsigned int )(temp >> 32); desc[0] = (unsigned int )temp; temp = execlists_ctx_descriptor(ring, ctx_obj0); desc[3] = (unsigned int )(temp >> 32); desc[2] = (unsigned int )temp; spin_lock(& dev_priv->uncore.lock); intel_uncore_forcewake_get__locked(dev_priv, 7); writel(desc[1], (void volatile *)dev_priv->regs + (unsigned long )(ring->mmio_base + 560U)); writel(desc[0], (void volatile *)dev_priv->regs + (unsigned long )(ring->mmio_base + 560U)); writel(desc[3], (void volatile *)dev_priv->regs + (unsigned long )(ring->mmio_base + 560U)); writel(desc[2], (void volatile *)dev_priv->regs + (unsigned long )(ring->mmio_base + 560U)); readl((void const volatile *)dev_priv->regs + (unsigned long )(ring->mmio_base + 564U)); intel_uncore_forcewake_put__locked(dev_priv, 7); spin_unlock(& dev_priv->uncore.lock); return; } } static int execlists_update_context(struct drm_i915_gem_object *ctx_obj , struct drm_i915_gem_object *ring_obj , struct i915_hw_ppgtt *ppgtt , u32 tail ) { struct page *page ; uint32_t *reg_state ; void *tmp ; unsigned long tmp___0 ; u64 _addr ; int tmp___1 ; u64 _addr___0 ; int tmp___2 ; u64 _addr___1 ; int tmp___3 ; u64 _addr___2 ; int tmp___4 ; { page = i915_gem_object_get_page___0(ctx_obj, 1); tmp = kmap_atomic(page); reg_state = (uint32_t *)tmp; *(reg_state + 7UL) = tail; tmp___0 = i915_gem_obj_ggtt_offset(ring_obj); *(reg_state + 9UL) = (uint32_t )tmp___0; if ((unsigned long )ppgtt != (unsigned long )((struct i915_hw_ppgtt *)0)) { tmp___1 = constant_test_bit(3L, (unsigned long const volatile *)(& ppgtt->__annonCompField80.pdp.used_pdpes)); _addr = tmp___1 != 0 ? (ppgtt->__annonCompField80.pdp.page_directory[3])->__annonCompField79.daddr : (ppgtt->scratch_pd)->__annonCompField79.daddr; *(reg_state + 37UL) = (unsigned int )(_addr >> 32ULL); *(reg_state + 39UL) = (unsigned int )_addr; tmp___2 = constant_test_bit(2L, (unsigned long const volatile *)(& ppgtt->__annonCompField80.pdp.used_pdpes)); _addr___0 = tmp___2 != 0 ? (ppgtt->__annonCompField80.pdp.page_directory[2])->__annonCompField79.daddr : (ppgtt->scratch_pd)->__annonCompField79.daddr; *(reg_state + 41UL) = (unsigned int )(_addr___0 >> 32ULL); *(reg_state + 43UL) = (unsigned int )_addr___0; tmp___3 = constant_test_bit(1L, (unsigned long const volatile *)(& ppgtt->__annonCompField80.pdp.used_pdpes)); _addr___1 = tmp___3 != 0 ? (ppgtt->__annonCompField80.pdp.page_directory[1])->__annonCompField79.daddr : (ppgtt->scratch_pd)->__annonCompField79.daddr; *(reg_state + 45UL) = (unsigned int )(_addr___1 >> 32ULL); *(reg_state + 47UL) = (unsigned int )_addr___1; tmp___4 = constant_test_bit(0L, (unsigned long const volatile *)(& ppgtt->__annonCompField80.pdp.used_pdpes)); _addr___2 = tmp___4 != 0 ? (ppgtt->__annonCompField80.pdp.page_directory[0])->__annonCompField79.daddr : (ppgtt->scratch_pd)->__annonCompField79.daddr; *(reg_state + 49UL) = (unsigned int )(_addr___2 >> 32ULL); *(reg_state + 51UL) = (unsigned int )_addr___2; } else { } __kunmap_atomic((void *)reg_state); return (0); } } static void execlists_submit_contexts(struct intel_engine_cs *ring , struct intel_context *to0 , u32 tail0 , struct intel_context *to1 , u32 tail1 ) { struct drm_i915_gem_object *ctx_obj0 ; struct intel_ringbuffer *ringbuf0 ; struct drm_i915_gem_object *ctx_obj1 ; struct intel_ringbuffer *ringbuf1 ; long tmp ; int __ret_warn_on ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; int __ret_warn_on___0 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; long tmp___6 ; int __ret_warn_on___1 ; bool tmp___7 ; int tmp___8 ; long tmp___9 ; int __ret_warn_on___2 ; bool tmp___10 ; int tmp___11 ; long tmp___12 ; { ctx_obj0 = to0->engine[(unsigned int )ring->id].state; ringbuf0 = to0->engine[(unsigned int )ring->id].ringbuf; ctx_obj1 = (struct drm_i915_gem_object *)0; ringbuf1 = (struct intel_ringbuffer *)0; tmp = ldv__builtin_expect((unsigned long )ctx_obj0 == (unsigned long )((struct drm_i915_gem_object *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c"), "i" (371), "i" (12UL)); ldv_48071: ; goto ldv_48071; } else { } tmp___0 = i915_gem_obj_is_pinned(ctx_obj0); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 372, "WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = i915_gem_obj_is_pinned(ringbuf0->obj); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } __ret_warn_on___0 = tmp___4; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 373, "WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0); if ((unsigned long )to1 != (unsigned long )((struct intel_context *)0)) { ringbuf1 = to1->engine[(unsigned int )ring->id].ringbuf; ctx_obj1 = to1->engine[(unsigned int )ring->id].state; tmp___6 = ldv__builtin_expect((unsigned long )ctx_obj1 == (unsigned long )((struct drm_i915_gem_object *)0), 0L); if (tmp___6 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c"), "i" (380), "i" (12UL)); ldv_48076: ; goto ldv_48076; } else { } tmp___7 = i915_gem_obj_is_pinned(ctx_obj1); if (tmp___7) { tmp___8 = 0; } else { tmp___8 = 1; } __ret_warn_on___1 = tmp___8; tmp___9 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 381, "WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1))"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); tmp___10 = i915_gem_obj_is_pinned(ringbuf1->obj); if (tmp___10) { tmp___11 = 0; } else { tmp___11 = 1; } __ret_warn_on___2 = tmp___11; tmp___12 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___12 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 382, "WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj))"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1); } else { } execlists_elsp_write(ring, ctx_obj0, ctx_obj1); return; } } static void execlists_context_unqueue(struct intel_engine_cs *ring ) { struct drm_i915_gem_request *req0 ; struct drm_i915_gem_request *req1 ; struct drm_i915_gem_request *cursor ; struct drm_i915_gem_request *tmp ; int tmp___0 ; long tmp___1 ; int __ret_warn_on ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; int tmp___5 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct intel_ringbuffer *ringbuf ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int __ret_warn_on___0 ; long tmp___6 ; { req0 = (struct drm_i915_gem_request *)0; req1 = (struct drm_i915_gem_request *)0; cursor = (struct drm_i915_gem_request *)0; tmp = (struct drm_i915_gem_request *)0; tmp___0 = queued_spin_is_locked(& ring->execlist_lock.__annonCompField18.rlock.raw_lock); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c"), "i" (395), "i" (12UL)); ldv_48088: ; goto ldv_48088; } else { } tmp___2 = intel_irqs_enabled((struct drm_i915_private *)(ring->dev)->dev_private); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } __ret_warn_on = tmp___3; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 401, "WARN_ON(!intel_irqs_enabled(ring->dev->dev_private))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___5 = list_empty((struct list_head const *)(& ring->execlist_queue)); if (tmp___5 != 0) { return; } else { } __mptr = (struct list_head const *)ring->execlist_queue.next; cursor = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffff88UL; __mptr___0 = (struct list_head const *)cursor->execlist_link.next; tmp = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffff88UL; goto ldv_48099; ldv_48098: ; if ((unsigned long )req0 == (unsigned long )((struct drm_i915_gem_request *)0)) { req0 = cursor; } else if ((unsigned long )req0->ctx == (unsigned long )cursor->ctx) { cursor->elsp_submitted = req0->elsp_submitted; list_del(& req0->execlist_link); list_add_tail(& req0->execlist_link, & ring->execlist_retired_req_list); req0 = cursor; } else { req1 = cursor; goto ldv_48097; } cursor = tmp; __mptr___1 = (struct list_head const *)tmp->execlist_link.next; tmp = (struct drm_i915_gem_request *)__mptr___1 + 0xffffffffffffff88UL; ldv_48099: ; if ((unsigned long )(& cursor->execlist_link) != (unsigned long )(& ring->execlist_queue)) { goto ldv_48098; } else { } ldv_48097: __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) == 8U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { _L: /* CIL Label */ if (req0->elsp_submitted != 0) { ringbuf = (req0->ctx)->engine[(unsigned int )ring->id].ringbuf; req0->tail = req0->tail + 8U; req0->tail = req0->tail & (u32 )(ringbuf->size + -1); } else { } } else { } } __ret_warn_on___0 = (unsigned long )req1 != (unsigned long )((struct drm_i915_gem_request *)0) && req1->elsp_submitted != 0; tmp___6 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 445, "WARN_ON(req1 && req1->elsp_submitted)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); execlists_submit_contexts(ring, req0->ctx, req0->tail, (unsigned long )req1 != (unsigned long )((struct drm_i915_gem_request *)0) ? req1->ctx : (struct intel_context *)0, (unsigned long )req1 != (unsigned long )((struct drm_i915_gem_request *)0) ? req1->tail : 0U); req0->elsp_submitted = req0->elsp_submitted + 1; if ((unsigned long )req1 != (unsigned long )((struct drm_i915_gem_request *)0)) { req1->elsp_submitted = req1->elsp_submitted + 1; } else { } return; } } static bool execlists_check_remove_request(struct intel_engine_cs *ring , u32 request_id ) { struct drm_i915_gem_request *head_req ; int tmp ; long tmp___0 ; struct list_head const *__mptr ; int tmp___2 ; struct drm_i915_gem_object *ctx_obj ; int __ret_warn_on ; long tmp___3 ; u32 tmp___4 ; { tmp = queued_spin_is_locked(& ring->execlist_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c"), "i" (461), "i" (12UL)); ldv_48120: ; goto ldv_48120; } else { } tmp___2 = list_empty((struct list_head const *)(& ring->execlist_queue)); if (tmp___2 == 0) { __mptr = (struct list_head const *)ring->execlist_queue.next; head_req = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffff88UL; } else { head_req = (struct drm_i915_gem_request *)0; } if ((unsigned long )head_req != (unsigned long )((struct drm_i915_gem_request *)0)) { ctx_obj = (head_req->ctx)->engine[(unsigned int )ring->id].state; tmp___4 = intel_execlists_ctx_id(ctx_obj); if (tmp___4 == request_id) { __ret_warn_on = head_req->elsp_submitted == 0; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 472, "Never submitted head request\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); head_req->elsp_submitted = head_req->elsp_submitted - 1; if (head_req->elsp_submitted <= 0) { list_del(& head_req->execlist_link); list_add_tail(& head_req->execlist_link, & ring->execlist_retired_req_list); return (1); } else { } } else { } } else { } return (0); } } void intel_lrc_irq_handler(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; u32 status_pointer ; u8 read_pointer ; u8 write_pointer ; u32 status ; u32 status_id ; u32 submit_contexts ; int __ret_warn_on ; long tmp ; bool tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; bool tmp___2 ; int __ret_warn_on___1 ; long tmp___3 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; submit_contexts = 0U; status_pointer = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 928U), 1); read_pointer = ring->next_context_status_buffer; write_pointer = (unsigned int )((u8 )status_pointer) & 7U; if ((int )read_pointer > (int )write_pointer) { write_pointer = (unsigned int )write_pointer + 6U; } else { } spin_lock(& ring->execlist_lock); goto ldv_48141; ldv_48140: read_pointer = (u8 )((int )read_pointer + 1); status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((ring->mmio_base + (u32 )((int )((unsigned int )read_pointer % 6U) * 8)) + 880U), 1); status_id = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((ring->mmio_base + (u32 )((int )((unsigned int )read_pointer % 6U) * 8)) + 884U), 1); if ((status & 2U) != 0U) { if ((status & 32768U) != 0U) { tmp___0 = execlists_check_remove_request(ring, status_id); if ((int )tmp___0) { __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 522, "Lite Restored request removed from queue\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } } else { __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 524, "Preemption without Lite Restore\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } } else { } if ((status & 8U) != 0U || (status & 4U) != 0U) { tmp___2 = execlists_check_remove_request(ring, status_id); if ((int )tmp___2) { submit_contexts = submit_contexts + 1U; } else { } } else { } ldv_48141: ; if ((int )read_pointer < (int )write_pointer) { goto ldv_48140; } else { } if (submit_contexts != 0U) { execlists_context_unqueue(ring); } else { } spin_unlock(& ring->execlist_lock); __ret_warn_on___1 = submit_contexts > 2U; tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 539, "More than two context complete events?\n"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); ring->next_context_status_buffer = (u8 )((unsigned int )write_pointer % 6U); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 928U), ((unsigned int )ring->next_context_status_buffer & 7U) << 8, 1); return; } } static int execlists_context_queue(struct intel_engine_cs *ring , struct intel_context *to , u32 tail , struct drm_i915_gem_request *request ) { struct drm_i915_gem_request *cursor ; int num_elements ; void *tmp ; int __ret_warn_on ; long tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct drm_i915_gem_request *tail_req ; struct list_head const *__mptr___1 ; int __ret_warn_on___0 ; long tmp___1 ; { num_elements = 0; if ((unsigned long )ring->default_context != (unsigned long )to) { intel_lr_context_pin(ring, to); } else { } if ((unsigned long )request == (unsigned long )((struct drm_i915_gem_request *)0)) { tmp = kzalloc(144UL, 208U); request = (struct drm_i915_gem_request *)tmp; if ((unsigned long )request == (unsigned long )((struct drm_i915_gem_request *)0)) { return (-12); } else { } request->ring = ring; request->ctx = to; kref_init(& request->ref); i915_gem_context_reference___2(request->ctx); } else { i915_gem_request_reference___2(request); __ret_warn_on = (unsigned long )request->ctx != (unsigned long )to; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 571, "WARN_ON(to != request->ctx)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } request->tail = tail; spin_lock_irq(& ring->execlist_lock); __mptr = (struct list_head const *)ring->execlist_queue.next; cursor = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffff88UL; goto ldv_48161; ldv_48160: num_elements = num_elements + 1; if (num_elements > 2) { goto ldv_48159; } else { } __mptr___0 = (struct list_head const *)cursor->execlist_link.next; cursor = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffff88UL; ldv_48161: ; if ((unsigned long )(& cursor->execlist_link) != (unsigned long )(& ring->execlist_queue)) { goto ldv_48160; } else { } ldv_48159: ; if (num_elements > 2) { __mptr___1 = (struct list_head const *)ring->execlist_queue.prev; tail_req = (struct drm_i915_gem_request *)__mptr___1 + 0xffffffffffffff88UL; if ((unsigned long )tail_req->ctx == (unsigned long )to) { __ret_warn_on___0 = tail_req->elsp_submitted != 0; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 590, "More than 2 already-submitted reqs queued\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); list_del(& tail_req->execlist_link); list_add_tail(& tail_req->execlist_link, & ring->execlist_retired_req_list); } else { } } else { } list_add_tail(& request->execlist_link, & ring->execlist_queue); if (num_elements == 0) { execlists_context_unqueue(ring); } else { } spin_unlock_irq(& ring->execlist_lock); return (0); } } static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf , struct intel_context *ctx ) { struct intel_engine_cs *ring ; uint32_t flush_domains ; int ret ; { ring = ringbuf->ring; flush_domains = 0U; if ((int )ring->gpu_caches_dirty) { flush_domains = 62U; } else { } ret = (*(ring->emit_flush))(ringbuf, ctx, 62U, flush_domains); if (ret != 0) { return (ret); } else { } ring->gpu_caches_dirty = 0; return (0); } } static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf , struct intel_context *ctx , struct list_head *vmas ) { struct intel_engine_cs *ring ; unsigned int other_rings ; unsigned int tmp ; struct i915_vma *vma ; uint32_t flush_domains ; bool flush_chipset ; int ret ; struct list_head const *__mptr ; struct drm_i915_gem_object *obj ; bool tmp___0 ; struct list_head const *__mptr___0 ; int tmp___1 ; { ring = ringbuf->ring; tmp = intel_ring_flag(ring); other_rings = ~ tmp; flush_domains = 0U; flush_chipset = 0; __mptr = (struct list_head const *)vmas->next; vma = (struct i915_vma *)__mptr + 0xffffffffffffff48UL; goto ldv_48191; ldv_48190: obj = vma->obj; if (((unsigned int )obj->active & other_rings) != 0U) { ret = i915_gem_object_sync(obj, ring); if (ret != 0) { return (ret); } else { } } else { } if ((int )obj->base.write_domain & 1) { tmp___0 = i915_gem_clflush_object(obj, 0); flush_chipset = ((int )flush_chipset | (int )tmp___0) != 0; } else { } flush_domains = obj->base.write_domain | flush_domains; __mptr___0 = (struct list_head const *)vma->exec_list.next; vma = (struct i915_vma *)__mptr___0 + 0xffffffffffffff48UL; ldv_48191: ; if ((unsigned long )(& vma->exec_list) != (unsigned long )vmas) { goto ldv_48190; } else { } if ((flush_domains & 64U) != 0U) { __asm__ volatile ("sfence": : : "memory"); } else { } tmp___1 = logical_ring_invalidate_all_caches(ringbuf, ctx); return (tmp___1); } } int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request , struct intel_context *ctx ) { int ret ; { if ((unsigned long )(request->ring)->default_context != (unsigned long )ctx) { ret = intel_lr_context_pin(request->ring, ctx); if (ret != 0) { return (ret); } else { } } else { } request->ringbuf = ctx->engine[(unsigned int )(request->ring)->id].ringbuf; request->ctx = ctx; i915_gem_context_reference___2(request->ctx); return (0); } } static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf , struct intel_context *ctx , int bytes ) { struct intel_engine_cs *ring ; struct drm_i915_gem_request *request ; unsigned int space ; int ret ; int tmp ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; { ring = ringbuf->ring; tmp = intel_ring_space(ringbuf); if (tmp >= bytes) { return (0); } else { } __mptr = (struct list_head const *)ring->request_list.next; request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL; goto ldv_48214; ldv_48213: ; if ((unsigned long )request->ringbuf != (unsigned long )ringbuf) { goto ldv_48211; } else { } tmp___0 = __intel_ring_space((int )request->postfix, (int )ringbuf->tail, ringbuf->size); space = (unsigned int )tmp___0; if ((unsigned int )bytes <= space) { goto ldv_48212; } else { } ldv_48211: __mptr___0 = (struct list_head const *)request->list.next; request = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffffb8UL; ldv_48214: ; if ((unsigned long )(& request->list) != (unsigned long )(& ring->request_list)) { goto ldv_48213; } else { } ldv_48212: __ret_warn_on = (unsigned long )(& request->list) == (unsigned long )(& ring->request_list); tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 707, "WARN_ON(&request->list == &ring->request_list)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (-28); } else { } ret = i915_wait_request(request); if (ret != 0) { return (ret); } else { } ringbuf->space = (int )space; return (0); } } static void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf , struct intel_context *ctx , struct drm_i915_gem_request *request ) { struct intel_engine_cs *ring ; bool tmp ; { ring = ringbuf->ring; intel_logical_ring_advance(ringbuf); tmp = intel_ring_stopped(ring); if ((int )tmp) { return; } else { } execlists_context_queue(ring, ctx, ringbuf->tail, request); return; } } static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf , struct intel_context *ctx ) { uint32_t *virt ; int rem ; int ret ; int tmp ; uint32_t *tmp___0 ; int tmp___1 ; { rem = (int )((u32 )ringbuf->size - ringbuf->tail); if (ringbuf->space < rem) { tmp = logical_ring_wait_for_space(ringbuf, ctx, rem); ret = tmp; if (ret != 0) { return (ret); } else { } } else { } virt = (uint32_t *)ringbuf->virtual_start + (unsigned long )ringbuf->tail; rem = rem / 4; goto ldv_48231; ldv_48230: tmp___0 = virt; virt = virt + 1; iowrite32(0U, (void *)tmp___0); ldv_48231: tmp___1 = rem; rem = rem - 1; if (tmp___1 != 0) { goto ldv_48230; } else { } ringbuf->tail = 0U; intel_ring_update_space(ringbuf); return (0); } } static int logical_ring_prepare(struct intel_ringbuffer *ringbuf , struct intel_context *ctx , int bytes ) { int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp___0 = ldv__builtin_expect(ringbuf->tail + (u32 )bytes > (u32 )ringbuf->effective_size, 0L); if (tmp___0 != 0L) { ret = logical_ring_wrap_buffer(ringbuf, ctx); tmp = ldv__builtin_expect(ret != 0, 0L); if (tmp != 0L) { return (ret); } else { } } else { } tmp___2 = ldv__builtin_expect(ringbuf->space < bytes, 0L); if (tmp___2 != 0L) { ret = logical_ring_wait_for_space(ringbuf, ctx, bytes); tmp___1 = ldv__builtin_expect(ret != 0, 0L); if (tmp___1 != 0L) { return (ret); } else { } } else { } return (0); } } static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf , struct intel_context *ctx , int num_dwords ) { struct intel_engine_cs *ring ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; { ring = ringbuf->ring; dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = i915_gem_check_wedge(& dev_priv->gpu_error, (int )dev_priv->mm.interruptible); if (ret != 0) { return (ret); } else { } ret = logical_ring_prepare(ringbuf, ctx, (int )((unsigned int )num_dwords * 4U)); if (ret != 0) { return (ret); } else { } ret = i915_gem_request_alloc(ring, ctx); if (ret != 0) { return (ret); } else { } ringbuf->space = (int )((unsigned int )ringbuf->space - (unsigned int )((unsigned long )num_dwords) * 4U); return (0); } } int intel_execlists_submission(struct drm_device *dev , struct drm_file *file , struct intel_engine_cs *ring , struct intel_context *ctx , struct drm_i915_gem_execbuffer2 *args , struct list_head *vmas , struct drm_i915_gem_object *batch_obj , u64 exec_start , u32 dispatch_flags ) { struct drm_i915_private *dev_priv ; struct intel_ringbuffer *ringbuf ; int instp_mode ; u32 instp_mask ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; struct drm_i915_gem_request *tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ringbuf = ctx->engine[(unsigned int )ring->id].ringbuf; instp_mode = (int )args->flags & 192; instp_mask = 192U; switch (instp_mode) { case 0: ; case 64: ; case 128: ; if (instp_mode != 0 && (unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) != (unsigned long )ring) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_execlists_submission", "non-0 rel constants mode on non-RCS\n"); } else { } return (-22); } else { } if (dev_priv->relative_constants_mode != instp_mode) { if (instp_mode == 128) { tmp___0 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_execlists_submission", "rel surface constants mode invalid on gen5+\n"); } else { } return (-22); } else { } instp_mask = instp_mask & 4294967167U; } else { } goto ldv_48268; default: tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_execlists_submission", "execbuf with unknown constants: %d\n", instp_mode); } else { } return (-22); } ldv_48268: ; if (args->num_cliprects != 0U) { tmp___2 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_execlists_submission", "clip rectangles are only valid on pre-gen5\n"); } else { } return (-22); } else { if (args->DR4 == 4294967295U) { tmp___3 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_execlists_submission", "UXA submitting garbage DR4, fixing up\n"); } else { } args->DR4 = 0U; } else { } if ((args->DR1 != 0U || args->DR4 != 0U) || args->cliprects_ptr != 0ULL) { tmp___4 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_execlists_submission", "0 cliprects but dirt in cliprects fields\n"); } else { } return (-22); } else { } } if ((args->flags & 256ULL) != 0ULL) { tmp___5 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_execlists_submission", "sol reset is gen7 only\n"); } else { } return (-22); } else { } ret = execlists_move_to_gpu(ringbuf, ctx, vmas); if (ret != 0) { return (ret); } else { } if ((unsigned long )((struct intel_engine_cs *)(& dev_priv->ring)) == (unsigned long )ring && dev_priv->relative_constants_mode != instp_mode) { ret = intel_logical_ring_begin(ringbuf, ctx, 4); if (ret != 0) { return (ret); } else { } intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_emit(ringbuf, 285212673U); intel_logical_ring_emit(ringbuf, 8384U); intel_logical_ring_emit(ringbuf, (instp_mask << 16) | (u32 )instp_mode); intel_logical_ring_advance(ringbuf); dev_priv->relative_constants_mode = instp_mode; } else { } ret = (*(ring->emit_bb_start))(ringbuf, ctx, exec_start, dispatch_flags); if (ret != 0) { return (ret); } else { } tmp___6 = intel_ring_get_request(ring); trace_i915_gem_ring_dispatch___0(tmp___6, dispatch_flags); i915_gem_execbuffer_move_to_active(vmas, ring); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); return (0); } } void intel_execlists_retire_requests(struct intel_engine_cs *ring ) { struct drm_i915_gem_request *req ; struct drm_i915_gem_request *tmp ; struct list_head retired_list ; int __ret_warn_on ; int tmp___0 ; long tmp___1 ; int tmp___2 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct intel_context *ctx ; struct drm_i915_gem_object *ctx_obj ; struct list_head const *__mptr___1 ; { tmp___0 = mutex_is_locked(& (ring->dev)->struct_mutex); __ret_warn_on = tmp___0 == 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 938, "WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = list_empty((struct list_head const *)(& ring->execlist_retired_req_list)); if (tmp___2 != 0) { return; } else { } INIT_LIST_HEAD(& retired_list); spin_lock_irq(& ring->execlist_lock); list_replace_init(& ring->execlist_retired_req_list, & retired_list); spin_unlock_irq(& ring->execlist_lock); __mptr = (struct list_head const *)retired_list.next; req = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffff88UL; __mptr___0 = (struct list_head const *)req->execlist_link.next; tmp = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffff88UL; goto ldv_48287; ldv_48286: ctx = req->ctx; ctx_obj = ctx->engine[(unsigned int )ring->id].state; if ((unsigned long )ctx_obj != (unsigned long )((struct drm_i915_gem_object *)0) && (unsigned long )ring->default_context != (unsigned long )ctx) { intel_lr_context_unpin(ring, ctx); } else { } list_del(& req->execlist_link); i915_gem_request_unreference___1(req); req = tmp; __mptr___1 = (struct list_head const *)tmp->execlist_link.next; tmp = (struct drm_i915_gem_request *)__mptr___1 + 0xffffffffffffff88UL; ldv_48287: ; if ((unsigned long )(& req->execlist_link) != (unsigned long )(& retired_list)) { goto ldv_48286; } else { } return; } } extern void __compiletime_assert_978(void) ; void intel_logical_ring_stop(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; int ret ; bool tmp ; int tmp___0 ; struct drm_i915_private *tmp___1 ; bool tmp___2 ; int tmp___3 ; int _a ; unsigned long timeout__ ; unsigned long tmp___4 ; int ret__ ; uint32_t tmp___5 ; uint32_t tmp___6 ; bool __cond___2 ; bool __cond___3 ; bool __cond___4 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; tmp = intel_ring_initialized(ring); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } ret = intel_ring_idle(ring); if (ret != 0) { tmp___1 = to_i915((struct drm_device const *)ring->dev); tmp___2 = i915_reset_in_progress(& tmp___1->gpu_error); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { drm_err("failed to quiesce %s whilst cleaning up: %d\n", ring->name, ret); } else { } } else { } _a = 256; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 156U), (uint32_t )((_a << 16) | _a), 1); tmp___4 = msecs_to_jiffies(1000U); timeout__ = (tmp___4 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48319; ldv_48318: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 156U), 1); if ((tmp___5 & 512U) == 0U) { ret__ = -110; } else { } goto ldv_48317; } else { } cpu_relax(); ldv_48319: tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 156U), 1); if ((tmp___6 & 512U) == 0U) { goto ldv_48318; } else { } ldv_48317: ; if (ret__ != 0) { drm_err("%s :timed out trying to stop ring\n", ring->name); return; } else { } __cond___2 = 0; if ((int )__cond___2) { __compiletime_assert_978(); } else { } __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_978(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_978(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 156U), (uint32_t )16777216, 1); return; } } int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf , struct intel_context *ctx ) { struct intel_engine_cs *ring ; int ret ; { ring = ringbuf->ring; if (! ring->gpu_caches_dirty) { return (0); } else { } ret = (*(ring->emit_flush))(ringbuf, ctx, 0U, 62U); if (ret != 0) { return (ret); } else { } ring->gpu_caches_dirty = 0; return (0); } } static int intel_lr_context_pin(struct intel_engine_cs *ring , struct intel_context *ctx ) { struct drm_i915_gem_object *ctx_obj ; struct intel_ringbuffer *ringbuf ; int ret ; int __ret_warn_on ; int tmp ; long tmp___0 ; int tmp___1 ; { ctx_obj = ctx->engine[(unsigned int )ring->id].state; ringbuf = ctx->engine[(unsigned int )ring->id].ringbuf; ret = 0; tmp = mutex_is_locked(& (ring->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 1005, "WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = ctx->engine[(unsigned int )ring->id].pin_count; ctx->engine[(unsigned int )ring->id].pin_count = ctx->engine[(unsigned int )ring->id].pin_count + 1; if (tmp___1 == 0) { ret = i915_gem_obj_ggtt_pin(ctx_obj, 4096U, 0U); if (ret != 0) { goto reset_pin_count; } else { } ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); if (ret != 0) { goto unpin_ctx_obj; } else { } } else { } return (ret); unpin_ctx_obj: i915_gem_object_ggtt_unpin(ctx_obj); reset_pin_count: ctx->engine[(unsigned int )ring->id].pin_count = 0; return (ret); } } void intel_lr_context_unpin(struct intel_engine_cs *ring , struct intel_context *ctx ) { struct drm_i915_gem_object *ctx_obj ; struct intel_ringbuffer *ringbuf ; int __ret_warn_on ; int tmp ; long tmp___0 ; { ctx_obj = ctx->engine[(unsigned int )ring->id].state; ringbuf = ctx->engine[(unsigned int )ring->id].ringbuf; if ((unsigned long )ctx_obj != (unsigned long )((struct drm_i915_gem_object *)0)) { tmp = mutex_is_locked(& (ring->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 1034, "WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ctx->engine[(unsigned int )ring->id].pin_count = ctx->engine[(unsigned int )ring->id].pin_count - 1; if (ctx->engine[(unsigned int )ring->id].pin_count == 0) { intel_unpin_ringbuffer_obj(ringbuf); i915_gem_object_ggtt_unpin(ctx_obj); } else { } } else { } return; } } static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring , struct intel_context *ctx ) { int ret ; int i ; struct intel_ringbuffer *ringbuf ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct i915_workarounds *w ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { ringbuf = ctx->engine[(unsigned int )ring->id].ringbuf; dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; w = & dev_priv->workarounds; __ret_warn_once = w->count == 0U; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 1051, "WARN_ON_ONCE(w->count == 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { return (0); } else { } ring->gpu_caches_dirty = 1; ret = logical_ring_flush_all_caches(ringbuf, ctx); if (ret != 0) { return (ret); } else { } ret = intel_logical_ring_begin(ringbuf, ctx, (int )((w->count + 1U) * 2U)); if (ret != 0) { return (ret); } else { } intel_logical_ring_emit(ringbuf, (w->count * 2U - 1U) | 285212672U); i = 0; goto ldv_48375; ldv_48374: intel_logical_ring_emit(ringbuf, w->reg[i].addr); intel_logical_ring_emit(ringbuf, w->reg[i].value); i = i + 1; ldv_48375: ; if ((u32 )i < w->count) { goto ldv_48374; } else { } intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_advance(ringbuf); ring->gpu_caches_dirty = 1; ret = logical_ring_flush_all_caches(ringbuf, ctx); if (ret != 0) { return (ret); } else { } return (0); } } extern void __compiletime_assert_1096(void) ; static int gen8_init_common_ring(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; bool __cond ; bool __cond___0 ; bool __cond___1 ; int _a ; long tmp ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ (ring->irq_enable_mask | ring->irq_keep_mask), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 152U), 4294967295U, 1); if ((unsigned long )ring->status_page.obj != (unsigned long )((struct drm_i915_gem_object *)0)) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 128U), ring->status_page.gfx_addr, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 128U), 0); } else { } __cond = 0; if ((int )__cond) { __compiletime_assert_1096(); } else { } __cond___0 = 0; if ((int )__cond___0) { __compiletime_assert_1096(); } else { } __cond___1 = 0; if ((int )__cond___1) { __compiletime_assert_1096(); } else { } _a = 32768; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 668U), (uint32_t )(134217728 | ((_a << 16) | _a)), 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 668U), 0); ring->next_context_status_buffer = 0U; tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen8_init_common_ring", "Execlists enabled for %s\n", ring->name); } else { } memset((void *)(& ring->hangcheck), 0, 32UL); return (0); } } static int gen8_init_render_ring(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; int _a ; int _a___0 ; int tmp ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = gen8_init_common_ring(ring); if (ret != 0) { return (ret); } else { } _a = 16384; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8348L, (uint32_t )((_a << 16) | _a), 1); _a___0 = 128; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8384L, (uint32_t )((_a___0 << 16) | _a___0), 1); tmp = init_workarounds_ring(ring); return (tmp); } } static int gen9_init_render_ring(struct intel_engine_cs *ring ) { int ret ; int tmp ; { ret = gen8_init_common_ring(ring); if (ret != 0) { return (ret); } else { } tmp = init_workarounds_ring(ring); return (tmp); } } static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf , struct intel_context *ctx , u64 offset , unsigned int dispatch_flags ) { bool ppgtt ; int ret ; { ppgtt = (dispatch_flags & 1U) == 0U; ret = intel_logical_ring_begin(ringbuf, ctx, 4); if (ret != 0) { return (ret); } else { } intel_logical_ring_emit(ringbuf, (u32 )(((int )ppgtt << 8) | 411041793)); intel_logical_ring_emit(ringbuf, (unsigned int )offset); intel_logical_ring_emit(ringbuf, (unsigned int )(offset >> 32ULL)); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_advance(ringbuf); return (0); } } static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; raw_spinlock_t *tmp___3 ; unsigned int tmp___4 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 1167, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (0); } else { } tmp___3 = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp___3); tmp___4 = ring->irq_refcount; ring->irq_refcount = ring->irq_refcount + 1U; if (tmp___4 == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ (ring->irq_enable_mask | ring->irq_keep_mask), 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 168U), 0); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return (1); } } static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; raw_spinlock_t *tmp ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp); ring->irq_refcount = ring->irq_refcount - 1U; if (ring->irq_refcount == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ ring->irq_keep_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 168U), 0); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return; } } static int gen8_emit_flush(struct intel_ringbuffer *ringbuf , struct intel_context *ctx , u32 invalidate_domains , u32 unused ) { struct intel_engine_cs *ring ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t cmd ; int ret ; { ring = ringbuf->ring; dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = intel_logical_ring_begin(ringbuf, ctx, 4); if (ret != 0) { return (ret); } else { } cmd = 318767106U; cmd = cmd | 2113536U; if ((invalidate_domains & 62U) != 0U) { cmd = cmd | 262144U; if ((unsigned long )((struct intel_engine_cs *)(& dev_priv->ring) + 1UL) == (unsigned long )ring) { cmd = cmd | 128U; } else { } } else { } intel_logical_ring_emit(ringbuf, cmd); intel_logical_ring_emit(ringbuf, 260U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_advance(ringbuf); return (0); } } static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf , struct intel_context *ctx , u32 invalidate_domains , u32 flush_domains ) { struct intel_engine_cs *ring ; u32 scratch_addr ; bool vf_flush_wa ; u32 flags ; int ret ; struct drm_i915_private *__p ; { ring = ringbuf->ring; scratch_addr = ring->scratch.gtt_offset + 128U; flags = 0U; flags = flags | 1048576U; if (flush_domains != 0U) { flags = flags | 4096U; flags = flags | 1U; } else { } if (invalidate_domains != 0U) { flags = flags | 262144U; flags = flags | 2048U; flags = flags | 1024U; flags = flags | 16U; flags = flags | 8U; flags = flags | 4U; flags = flags | 16384U; flags = flags | 16777216U; } else { } __p = to_i915((struct drm_device const *)ring->dev); vf_flush_wa = (bool )((unsigned int )((unsigned char )__p->info.gen) > 8U && (flags & 16U) != 0U); ret = intel_logical_ring_begin(ringbuf, ctx, (int )vf_flush_wa ? 12 : 6); if (ret != 0) { return (ret); } else { } if ((int )vf_flush_wa) { intel_logical_ring_emit(ringbuf, 2046820356U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_emit(ringbuf, 0U); } else { } intel_logical_ring_emit(ringbuf, 2046820356U); intel_logical_ring_emit(ringbuf, flags); intel_logical_ring_emit(ringbuf, scratch_addr); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_advance(ringbuf); return (0); } } static u32 gen8_get_seqno(struct intel_engine_cs *ring , bool lazy_coherency ) { u32 tmp ; { tmp = intel_read_status_page(ring, 48); return (tmp); } } static void gen8_set_seqno(struct intel_engine_cs *ring , u32 seqno ) { { intel_write_status_page(ring, 48, seqno); return; } } static int gen8_emit_request(struct intel_ringbuffer *ringbuf , struct drm_i915_gem_request *request ) { struct intel_engine_cs *ring ; u32 cmd ; int ret ; uint32_t tmp ; { ring = ringbuf->ring; ret = intel_logical_ring_begin(ringbuf, request->ctx, 8); if (ret != 0) { return (ret); } else { } cmd = 268435458U; cmd = cmd | 4194304U; intel_logical_ring_emit(ringbuf, cmd); intel_logical_ring_emit(ringbuf, ring->status_page.gfx_addr + 192U); intel_logical_ring_emit(ringbuf, 0U); tmp = i915_gem_request_get_seqno(ring->outstanding_lazy_request); intel_logical_ring_emit(ringbuf, tmp); intel_logical_ring_emit(ringbuf, 16777216U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_emit(ringbuf, 0U); intel_logical_ring_advance(ringbuf); return (0); } } static int intel_lr_context_render_state_init(struct intel_engine_cs *ring , struct intel_context *ctx ) { struct intel_ringbuffer *ringbuf ; struct render_state so ; struct drm_i915_file_private *file_priv ; struct drm_file *file ; int ret ; struct i915_vma *tmp ; { ringbuf = ctx->engine[(unsigned int )ring->id].ringbuf; file_priv = ctx->file_priv; file = (unsigned long )file_priv != (unsigned long )((struct drm_i915_file_private *)0) ? file_priv->file : (struct drm_file *)0; ret = i915_gem_render_state_prepare(ring, & so); if (ret != 0) { return (ret); } else { } if ((unsigned long )so.rodata == (unsigned long )((struct intel_renderstate_rodata const *)0)) { return (0); } else { } ret = (*(ring->emit_bb_start))(ringbuf, ctx, so.ggtt_offset, 1U); if (ret != 0) { goto out; } else { } tmp = i915_gem_obj_to_ggtt(so.obj); i915_vma_move_to_active(tmp, ring); ret = __i915_add_request(ring, file, so.obj); out: i915_gem_render_state_fini(& so); return (ret); } } static int gen8_init_rcs_context(struct intel_engine_cs *ring , struct intel_context *ctx ) { int ret ; int tmp ; { ret = intel_logical_ring_workarounds_emit(ring, ctx); if (ret != 0) { return (ret); } else { } tmp = intel_lr_context_render_state_init(ring, ctx); return (tmp); } } void intel_logical_ring_cleanup(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; bool tmp ; int tmp___0 ; int __ret_warn_on ; uint32_t tmp___1 ; long tmp___2 ; struct page *tmp___3 ; { tmp = intel_ring_initialized(ring); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; intel_logical_ring_stop(ring); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 156U), 1); __ret_warn_on = (tmp___1 & 512U) == 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 1407, "WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); i915_gem_request_assign___1(& ring->outstanding_lazy_request, (struct drm_i915_gem_request *)0); if ((unsigned long )ring->cleanup != (unsigned long )((void (*)(struct intel_engine_cs * ))0)) { (*(ring->cleanup))(ring); } else { } i915_cmd_parser_fini_ring(ring); i915_gem_batch_pool_fini(& ring->batch_pool); if ((unsigned long )ring->status_page.obj != (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___3 = sg_page___2(((ring->status_page.obj)->pages)->sgl); kunmap(tmp___3); ring->status_page.obj = (struct drm_i915_gem_object *)0; } else { } return; } } static int logical_ring_init(struct drm_device *dev , struct intel_engine_cs *ring ) { int ret ; struct lock_class_key __key ; struct lock_class_key __key___0 ; { ring->buffer = (struct intel_ringbuffer *)0; ring->dev = dev; INIT_LIST_HEAD(& ring->active_list); INIT_LIST_HEAD(& ring->request_list); i915_gem_batch_pool_init(dev, & ring->batch_pool); __init_waitqueue_head(& ring->irq_queue, "&ring->irq_queue", & __key); INIT_LIST_HEAD(& ring->execlist_queue); INIT_LIST_HEAD(& ring->execlist_retired_req_list); spinlock_check(& ring->execlist_lock); __raw_spin_lock_init(& ring->execlist_lock.__annonCompField18.rlock, "&(&ring->execlist_lock)->rlock", & __key___0); ret = i915_cmd_parser_init_ring(ring); if (ret != 0) { return (ret); } else { } ret = intel_lr_context_deferred_create(ring->default_context, ring); return (ret); } } static int logical_render_ring_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring); ring->name = "render ring"; ring->id = 0; ring->mmio_base = 8192U; ring->irq_enable_mask = 1U; ring->irq_keep_mask = 256U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { ring->irq_keep_mask = ring->irq_keep_mask | 32U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { ring->irq_keep_mask = ring->irq_keep_mask | 32U; } else { } } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 8U) { ring->init_hw = & gen9_init_render_ring; } else { ring->init_hw = & gen8_init_render_ring; } ring->init_context = & gen8_init_rcs_context; ring->cleanup = & intel_fini_pipe_control; ring->get_seqno = & gen8_get_seqno; ring->set_seqno = & gen8_set_seqno; ring->emit_request = & gen8_emit_request; ring->emit_flush = & gen8_emit_flush_render; ring->irq_get = & gen8_logical_ring_get_irq; ring->irq_put = & gen8_logical_ring_put_irq; ring->emit_bb_start = & gen8_emit_bb_start; ring->dev = dev; ret = logical_ring_init(dev, ring); if (ret != 0) { return (ret); } else { } tmp = intel_init_pipe_control(ring); return (tmp); } } static int logical_bsd_ring_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring) + 1UL; ring->name = "bsd ring"; ring->id = 1; ring->mmio_base = 73728U; ring->irq_enable_mask = 1U; ring->irq_keep_mask = 256U; ring->init_hw = & gen8_init_common_ring; ring->get_seqno = & gen8_get_seqno; ring->set_seqno = & gen8_set_seqno; ring->emit_request = & gen8_emit_request; ring->emit_flush = & gen8_emit_flush; ring->irq_get = & gen8_logical_ring_get_irq; ring->irq_put = & gen8_logical_ring_put_irq; ring->emit_bb_start = & gen8_emit_bb_start; tmp = logical_ring_init(dev, ring); return (tmp); } } static int logical_bsd2_ring_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring) + 4UL; ring->name = "bds2 ring"; ring->id = 4; ring->mmio_base = 114688U; ring->irq_enable_mask = 65536U; ring->irq_keep_mask = 16777216U; ring->init_hw = & gen8_init_common_ring; ring->get_seqno = & gen8_get_seqno; ring->set_seqno = & gen8_set_seqno; ring->emit_request = & gen8_emit_request; ring->emit_flush = & gen8_emit_flush; ring->irq_get = & gen8_logical_ring_get_irq; ring->irq_put = & gen8_logical_ring_put_irq; ring->emit_bb_start = & gen8_emit_bb_start; tmp = logical_ring_init(dev, ring); return (tmp); } } static int logical_blt_ring_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring) + 2UL; ring->name = "blitter ring"; ring->id = 2; ring->mmio_base = 139264U; ring->irq_enable_mask = 65536U; ring->irq_keep_mask = 16777216U; ring->init_hw = & gen8_init_common_ring; ring->get_seqno = & gen8_get_seqno; ring->set_seqno = & gen8_set_seqno; ring->emit_request = & gen8_emit_request; ring->emit_flush = & gen8_emit_flush; ring->irq_get = & gen8_logical_ring_get_irq; ring->irq_put = & gen8_logical_ring_put_irq; ring->emit_bb_start = & gen8_emit_bb_start; tmp = logical_ring_init(dev, ring); return (tmp); } } static int logical_vebox_ring_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring) + 3UL; ring->name = "video enhancement ring"; ring->id = 3; ring->mmio_base = 106496U; ring->irq_enable_mask = 1U; ring->irq_keep_mask = 256U; ring->init_hw = & gen8_init_common_ring; ring->get_seqno = & gen8_get_seqno; ring->set_seqno = & gen8_set_seqno; ring->emit_request = & gen8_emit_request; ring->emit_flush = & gen8_emit_flush; ring->irq_get = & gen8_logical_ring_get_irq; ring->irq_put = & gen8_logical_ring_put_irq; ring->emit_bb_start = & gen8_emit_bb_start; tmp = logical_ring_init(dev, ring); return (tmp); } } int intel_logical_rings_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ret = logical_render_ring_init(dev); if (ret != 0) { return (ret); } else { } __p = to_i915((struct drm_device const *)dev); if (((int )__p->info.ring_mask & 2) != 0) { ret = logical_bsd_ring_init(dev); if (ret != 0) { goto cleanup_render_ring; } else { } } else { } __p___0 = to_i915((struct drm_device const *)dev); if (((int )__p___0->info.ring_mask & 4) != 0) { ret = logical_blt_ring_init(dev); if (ret != 0) { goto cleanup_bsd_ring; } else { } } else { } __p___1 = to_i915((struct drm_device const *)dev); if (((int )__p___1->info.ring_mask & 8) != 0) { ret = logical_vebox_ring_init(dev); if (ret != 0) { goto cleanup_blt_ring; } else { } } else { } __p___2 = to_i915((struct drm_device const *)dev); if (((int )__p___2->info.ring_mask & 16) != 0) { ret = logical_bsd2_ring_init(dev); if (ret != 0) { goto cleanup_vebox_ring; } else { } } else { } ret = i915_gem_set_seqno(dev, 4294963199U); if (ret != 0) { goto cleanup_bsd2_ring; } else { } return (0); cleanup_bsd2_ring: intel_logical_ring_cleanup((struct intel_engine_cs *)(& dev_priv->ring) + 4UL); cleanup_vebox_ring: intel_logical_ring_cleanup((struct intel_engine_cs *)(& dev_priv->ring) + 3UL); cleanup_blt_ring: intel_logical_ring_cleanup((struct intel_engine_cs *)(& dev_priv->ring) + 2UL); cleanup_bsd_ring: intel_logical_ring_cleanup((struct intel_engine_cs *)(& dev_priv->ring) + 1UL); cleanup_render_ring: intel_logical_ring_cleanup((struct intel_engine_cs *)(& dev_priv->ring)); return (ret); } } static u32 make_rpcs(struct drm_device *dev ) { u32 rpcs ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; { rpcs = 0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 8U) { return (0U); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 112UL) != 0U) { rpcs = rpcs | 262144U; __p___0 = to_i915((struct drm_device const *)dev); rpcs = (u32 )((int )__p___0->info.slice_total << 15) | rpcs; rpcs = rpcs | 2147483648U; } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 112UL) != 0U) { rpcs = rpcs | 2048U; __p___2 = to_i915((struct drm_device const *)dev); rpcs = (u32 )((int )__p___2->info.subslice_per_slice << 8) | rpcs; rpcs = rpcs | 2147483648U; } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 112UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); rpcs = (u32 )__p___4->info.eu_per_subslice | rpcs; __p___5 = to_i915((struct drm_device const *)dev); rpcs = (u32 )((int )__p___5->info.eu_per_subslice << 4) | rpcs; rpcs = rpcs | 2147483648U; } else { } return (rpcs); } } static int populate_lr_context(struct intel_context *ctx , struct drm_i915_gem_object *ctx_obj , struct intel_engine_cs *ring , struct intel_ringbuffer *ringbuf ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct i915_hw_ppgtt *ppgtt ; struct page *page ; uint32_t *reg_state ; int ret ; long tmp ; long tmp___0 ; void *tmp___1 ; int _a ; u64 _addr ; int tmp___2 ; u64 _addr___0 ; int tmp___3 ; u64 _addr___1 ; int tmp___4 ; u64 _addr___2 ; int tmp___5 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ppgtt = ctx->ppgtt; if ((unsigned long )ppgtt == (unsigned long )((struct i915_hw_ppgtt *)0)) { ppgtt = dev_priv->mm.aliasing_ppgtt; } else { } ret = i915_gem_object_set_to_cpu_domain(ctx_obj, 1); if (ret != 0) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("populate_lr_context", "Could not set to CPU domain\n"); } else { } return (ret); } else { } ret = i915_gem_object_get_pages(ctx_obj); if (ret != 0) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("populate_lr_context", "Could not get object pages\n"); } else { } return (ret); } else { } i915_gem_object_pin_pages(ctx_obj); page = i915_gem_object_get_page___0(ctx_obj, 1); tmp___1 = kmap_atomic(page); reg_state = (uint32_t *)tmp___1; if ((unsigned int )ring->id == 0U) { *(reg_state + 1UL) = 285212699U; } else { *(reg_state + 1UL) = 285212693U; } *(reg_state + 1UL) = *(reg_state + 1UL) | 4096U; *(reg_state + 2UL) = ring->mmio_base + 580U; _a = 9; *(reg_state + 3UL) = (uint32_t )((_a << 16) | _a); *(reg_state + 4UL) = ring->mmio_base + 52U; *(reg_state + 5UL) = 0U; *(reg_state + 6UL) = ring->mmio_base + 48U; *(reg_state + 7UL) = 0U; *(reg_state + 8UL) = ring->mmio_base + 56U; *(reg_state + 10UL) = ring->mmio_base + 60U; *(reg_state + 11UL) = (((uint32_t )ringbuf->size - 4096U) & 2093056U) | 1U; *(reg_state + 12UL) = ring->mmio_base + 360U; *(reg_state + 13UL) = 0U; *(reg_state + 14UL) = ring->mmio_base + 320U; *(reg_state + 15UL) = 0U; *(reg_state + 16UL) = ring->mmio_base + 272U; *(reg_state + 17UL) = 32U; *(reg_state + 18UL) = ring->mmio_base + 284U; *(reg_state + 19UL) = 0U; *(reg_state + 20UL) = ring->mmio_base + 276U; *(reg_state + 21UL) = 0U; *(reg_state + 22UL) = ring->mmio_base + 280U; *(reg_state + 23UL) = 0U; if ((unsigned int )ring->id == 0U) { *(reg_state + 24UL) = ring->mmio_base + 448U; *(reg_state + 25UL) = 0U; *(reg_state + 26UL) = ring->mmio_base + 452U; *(reg_state + 27UL) = 0U; *(reg_state + 28UL) = ring->mmio_base + 456U; *(reg_state + 29UL) = 0U; } else { } *(reg_state + 33UL) = 285212689U; *(reg_state + 33UL) = *(reg_state + 33UL) | 4096U; *(reg_state + 34UL) = ring->mmio_base + 936U; *(reg_state + 35UL) = 0U; *(reg_state + 36UL) = ring->mmio_base + 652U; *(reg_state + 38UL) = ring->mmio_base + 648U; *(reg_state + 40UL) = ring->mmio_base + 644U; *(reg_state + 42UL) = ring->mmio_base + 640U; *(reg_state + 44UL) = ring->mmio_base + 636U; *(reg_state + 46UL) = ring->mmio_base + 632U; *(reg_state + 48UL) = ring->mmio_base + 628U; *(reg_state + 50UL) = ring->mmio_base + 624U; tmp___2 = constant_test_bit(3L, (unsigned long const volatile *)(& ppgtt->__annonCompField80.pdp.used_pdpes)); _addr = tmp___2 != 0 ? (ppgtt->__annonCompField80.pdp.page_directory[3])->__annonCompField79.daddr : (ppgtt->scratch_pd)->__annonCompField79.daddr; *(reg_state + 37UL) = (unsigned int )(_addr >> 32ULL); *(reg_state + 39UL) = (unsigned int )_addr; tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& ppgtt->__annonCompField80.pdp.used_pdpes)); _addr___0 = tmp___3 != 0 ? (ppgtt->__annonCompField80.pdp.page_directory[2])->__annonCompField79.daddr : (ppgtt->scratch_pd)->__annonCompField79.daddr; *(reg_state + 41UL) = (unsigned int )(_addr___0 >> 32ULL); *(reg_state + 43UL) = (unsigned int )_addr___0; tmp___4 = constant_test_bit(1L, (unsigned long const volatile *)(& ppgtt->__annonCompField80.pdp.used_pdpes)); _addr___1 = tmp___4 != 0 ? (ppgtt->__annonCompField80.pdp.page_directory[1])->__annonCompField79.daddr : (ppgtt->scratch_pd)->__annonCompField79.daddr; *(reg_state + 45UL) = (unsigned int )(_addr___1 >> 32ULL); *(reg_state + 47UL) = (unsigned int )_addr___1; tmp___5 = constant_test_bit(0L, (unsigned long const volatile *)(& ppgtt->__annonCompField80.pdp.used_pdpes)); _addr___2 = tmp___5 != 0 ? (ppgtt->__annonCompField80.pdp.page_directory[0])->__annonCompField79.daddr : (ppgtt->scratch_pd)->__annonCompField79.daddr; *(reg_state + 49UL) = (unsigned int )(_addr___2 >> 32ULL); *(reg_state + 51UL) = (unsigned int )_addr___2; if ((unsigned int )ring->id == 0U) { *(reg_state + 65UL) = 285212673U; *(reg_state + 66UL) = 8392U; *(reg_state + 67UL) = make_rpcs(dev); } else { } __kunmap_atomic((void *)reg_state); ctx_obj->dirty = 1U; set_page_dirty(page); i915_gem_object_unpin_pages(ctx_obj); return (0); } } void intel_lr_context_free(struct intel_context *ctx ) { int i ; struct drm_i915_gem_object *ctx_obj ; struct intel_ringbuffer *ringbuf ; struct intel_engine_cs *ring ; int __ret_warn_on ; long tmp ; { i = 0; goto ldv_48722; ldv_48721: ctx_obj = ctx->engine[i].state; if ((unsigned long )ctx_obj != (unsigned long )((struct drm_i915_gem_object *)0)) { ringbuf = ctx->engine[i].ringbuf; ring = ringbuf->ring; if ((unsigned long )ring->default_context == (unsigned long )ctx) { intel_unpin_ringbuffer_obj(ringbuf); i915_gem_object_ggtt_unpin(ctx_obj); } else { } __ret_warn_on = ctx->engine[(unsigned int )ring->id].pin_count != 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 1832, "WARN_ON(ctx->engine[ring->id].pin_count)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); intel_destroy_ringbuffer_obj(ringbuf); kfree((void const *)ringbuf); drm_gem_object_unreference___10(& ctx_obj->base); } else { } i = i + 1; ldv_48722: ; if (i <= 4) { goto ldv_48721; } else { } return; } } static uint32_t get_lr_context_size(struct intel_engine_cs *ring ) { int ret ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp ; struct drm_i915_private *__p___0 ; { ret = 0; __p = to_i915((struct drm_device const *)ring->dev); __ret_warn_on = (unsigned int )((unsigned char )__p->info.gen) <= 7U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 1844, "WARN_ON(INTEL_INFO(ring->dev)->gen < 8)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); switch ((unsigned int )ring->id) { case 0U: __p___0 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 8U) { ret = 90112; } else { ret = 81920; } goto ldv_48743; case 1U: ; case 2U: ; case 3U: ; case 4U: ret = 8192; goto ldv_48743; } ldv_48743: ; return ((uint32_t )ret); } } static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring , struct drm_i915_gem_object *default_ctx_obj ) { struct drm_i915_private *dev_priv ; unsigned long tmp ; struct page *tmp___0 ; void *tmp___1 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; tmp = i915_gem_obj_ggtt_offset(default_ctx_obj); ring->status_page.gfx_addr = (unsigned int )tmp; tmp___0 = sg_page___2((default_ctx_obj->pages)->sgl); tmp___1 = kmap(tmp___0); ring->status_page.page_addr = (u32 *)tmp___1; ring->status_page.obj = default_ctx_obj; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 128U), ring->status_page.gfx_addr, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 128U), 0); return; } } int intel_lr_context_deferred_create(struct intel_context *ctx , struct intel_engine_cs *ring ) { bool is_global_default_ctx ; struct drm_device *dev ; struct drm_i915_gem_object *ctx_obj ; uint32_t context_size ; struct intel_ringbuffer *ringbuf ; int ret ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; long tmp___0 ; uint32_t tmp___1 ; long tmp___2 ; long tmp___3 ; void *tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; { is_global_default_ctx = (unsigned long )ring->default_context == (unsigned long )ctx; dev = ring->dev; __ret_warn_on = (unsigned long )ctx->legacy_hw_ctx.rcs_state != (unsigned long )((struct drm_i915_gem_object *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 1904, "WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (unsigned long )ctx->engine[(unsigned int )ring->id].state != (unsigned long )((struct drm_i915_gem_object *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 1905, "WARN_ON(ctx->engine[ring->id].state)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); tmp___1 = get_lr_context_size(ring); context_size = ((tmp___1 - 1U) | 4095U) + 1U; ctx_obj = i915_gem_alloc_object(dev, (size_t )context_size); if ((unsigned long )ctx_obj == (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_lr_context_deferred_create", "Alloc LRC backing obj failed.\n"); } else { } return (-12); } else { } if ((int )is_global_default_ctx) { ret = i915_gem_obj_ggtt_pin(ctx_obj, 4096U, 0U); if (ret != 0) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_lr_context_deferred_create", "Pin LRC backing obj failed: %d\n", ret); } else { } drm_gem_object_unreference___10(& ctx_obj->base); return (ret); } else { } } else { } tmp___4 = kzalloc(48UL, 208U); ringbuf = (struct intel_ringbuffer *)tmp___4; if ((unsigned long )ringbuf == (unsigned long )((struct intel_ringbuffer *)0)) { tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_lr_context_deferred_create", "Failed to allocate ringbuffer %s\n", ring->name); } else { } ret = -12; goto error_unpin_ctx; } else { } ringbuf->ring = ring; ringbuf->size = 131072; ringbuf->effective_size = ringbuf->size; ringbuf->head = 0U; ringbuf->tail = 0U; ringbuf->last_retired_head = 4294967295U; intel_ring_update_space(ringbuf); if ((unsigned long )ringbuf->obj == (unsigned long )((struct drm_i915_gem_object *)0)) { ret = intel_alloc_ringbuffer_obj(dev, ringbuf); if (ret != 0) { tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_lr_context_deferred_create", "Failed to allocate ringbuffer obj %s: %d\n", ring->name, ret); } else { } goto error_free_rbuf; } else { } if ((int )is_global_default_ctx) { ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); if (ret != 0) { drm_err("Failed to pin and map ringbuffer %s: %d\n", ring->name, ret); goto error_destroy_rbuf; } else { } } else { } } else { } ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); if (ret != 0) { tmp___7 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_lr_context_deferred_create", "Failed to populate LRC: %d\n", ret); } else { } goto error; } else { } ctx->engine[(unsigned int )ring->id].ringbuf = ringbuf; ctx->engine[(unsigned int )ring->id].state = ctx_obj; if ((unsigned long )ring->default_context == (unsigned long )ctx) { lrc_setup_hardware_status_page(ring, ctx_obj); } else if ((unsigned int )ring->id == 0U && ! ctx->rcs_initialized) { if ((unsigned long )ring->init_context != (unsigned long )((int (*)(struct intel_engine_cs * , struct intel_context * ))0)) { ret = (*(ring->init_context))(ring, ctx); if (ret != 0) { drm_err("ring init context: %d\n", ret); ctx->engine[(unsigned int )ring->id].ringbuf = (struct intel_ringbuffer *)0; ctx->engine[(unsigned int )ring->id].state = (struct drm_i915_gem_object *)0; goto error; } else { } } else { } ctx->rcs_initialized = 1; } else { } return (0); error: ; if ((int )is_global_default_ctx) { intel_unpin_ringbuffer_obj(ringbuf); } else { } error_destroy_rbuf: intel_destroy_ringbuffer_obj(ringbuf); error_free_rbuf: kfree((void const *)ringbuf); error_unpin_ctx: ; if ((int )is_global_default_ctx) { i915_gem_object_ggtt_unpin(ctx_obj); } else { } drm_gem_object_unreference___10(& ctx_obj->base); return (ret); } } void intel_lr_context_reset(struct drm_device *dev , struct intel_context *ctx ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int i ; struct drm_i915_gem_object *ctx_obj ; struct intel_ringbuffer *ringbuf ; uint32_t *reg_state ; struct page *page ; int __ret_warn_on ; long tmp ; int tmp___0 ; void *tmp___1 ; bool tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_48787; ldv_48786: ring = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___2 = intel_ring_initialized(ring); if ((int )tmp___2) { ctx_obj = ctx->engine[(unsigned int )ring->id].state; ringbuf = ctx->engine[(unsigned int )ring->id].ringbuf; if ((unsigned long )ctx_obj == (unsigned long )((struct drm_i915_gem_object *)0)) { goto ldv_48783; } else { } tmp___0 = i915_gem_object_get_pages(ctx_obj); if (tmp___0 != 0) { __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_lrc.c", 2023, "Failed get_pages for context obj\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); goto ldv_48783; } else { } page = i915_gem_object_get_page___0(ctx_obj, 1); tmp___1 = kmap_atomic(page); reg_state = (uint32_t *)tmp___1; *(reg_state + 5UL) = 0U; *(reg_state + 7UL) = 0U; __kunmap_atomic((void *)reg_state); ringbuf->head = 0U; ringbuf->tail = 0U; } else { } ldv_48783: i = i + 1; ldv_48787: ; if (i <= 4) { goto ldv_48786; } else { } return; } } bool ldv_queue_work_on_415(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_416(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_417(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_418(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_419(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned int __arch_hweight8(unsigned int w ) { unsigned int tmp ; { tmp = __arch_hweight32(w & 255U); return (tmp); } } __inline static unsigned long arch_local_save_flags___11(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static bool static_key_false___8(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int rcu_read_lock_sched_held___8(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___11(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } bool ldv_queue_work_on_429(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_431(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_430(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_433(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_432(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void kref_get___10(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___12(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___12(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___12(kref, 1U, release); return (tmp); } } __inline static bool drm_can_sleep___3(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; default: __bad_percpu_size(); } ldv_39629: pscr_ret__ = pfo_ret__; goto ldv_39635; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; default: __bad_percpu_size(); } ldv_39639: pscr_ret__ = pfo_ret_____0; goto ldv_39635; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; default: __bad_percpu_size(); } ldv_39648: pscr_ret__ = pfo_ret_____1; goto ldv_39635; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; default: __bad_percpu_size(); } ldv_39657: pscr_ret__ = pfo_ret_____2; goto ldv_39635; default: __bad_size_call_parameter(); goto ldv_39635; } ldv_39635: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___11(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } int intel_ring_cacheline_align(struct intel_engine_cs *ring ) ; void __intel_ring_advance(struct intel_engine_cs *ring ) ; __inline static void drm_gem_object_unreference___11(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___12(& obj->refcount, & drm_gem_object_free); } else { } return; } } __inline static struct drm_i915_gem_request *i915_gem_request_reference___3(struct drm_i915_gem_request *req ) { { if ((unsigned long )req != (unsigned long )((struct drm_i915_gem_request *)0)) { kref_get___10(& req->ref); } else { } return (req); } } __inline static void i915_gem_request_unreference___2(struct drm_i915_gem_request *req ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = mutex_is_locked(& ((req->ring)->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2216, "WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); kref_put___12(& req->ref, & i915_gem_request_free); return; } } __inline static void i915_gem_request_assign___2(struct drm_i915_gem_request **pdst , struct drm_i915_gem_request *src ) { { if ((unsigned long )src != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_reference___3(src); } else { } if ((unsigned long )*pdst != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_unreference___2(*pdst); } else { } *pdst = src; return; } } __inline static void trace_i915_gem_ring_flush(struct intel_engine_cs *ring , u32 invalidate , u32 flush ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_376 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_378 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___8(& __tracepoint_i915_gem_ring_flush.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_ring_flush.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___8(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 498, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46197: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct intel_engine_cs * , u32 , u32 ))it_func))(__data, ring, invalidate, flush); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46197; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_gem_ring_flush.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___8(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 498, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } bool intel_ring_initialized(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct intel_context *dctx ; struct intel_ringbuffer *ringbuf ; { dev = ring->dev; if ((unsigned long )dev == (unsigned long )((struct drm_device *)0)) { return (0); } else { } if (i915.enable_execlists != 0) { dctx = ring->default_context; ringbuf = dctx->engine[(unsigned int )ring->id].ringbuf; return ((unsigned long )ringbuf->obj != (unsigned long )((struct drm_i915_gem_object *)0)); } else { return ((bool )((unsigned long )ring->buffer != (unsigned long )((struct intel_ringbuffer *)0) && (unsigned long )(ring->buffer)->obj != (unsigned long )((struct drm_i915_gem_object *)0))); } } } int __intel_ring_space(int head , int tail , int size ) { int space ; { space = head - tail; if (space <= 0) { space = space + size; } else { } return (space + -64); } } void intel_ring_update_space(struct intel_ringbuffer *ringbuf ) { { if (ringbuf->last_retired_head != 4294967295U) { ringbuf->head = ringbuf->last_retired_head; ringbuf->last_retired_head = 4294967295U; } else { } ringbuf->space = __intel_ring_space((int )ringbuf->head & 2097148, (int )ringbuf->tail, ringbuf->size); return; } } int intel_ring_space(struct intel_ringbuffer *ringbuf ) { { intel_ring_update_space(ringbuf); return (ringbuf->space); } } bool intel_ring_stopped(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; unsigned int tmp ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; tmp = intel_ring_flag(ring); return ((dev_priv->gpu_error.stop_rings & tmp) != 0U); } } void __intel_ring_advance(struct intel_engine_cs *ring ) { struct intel_ringbuffer *ringbuf ; bool tmp ; { ringbuf = ring->buffer; ringbuf->tail = ringbuf->tail & (u32 )(ringbuf->size + -1); tmp = intel_ring_stopped(ring); if ((int )tmp) { return; } else { } (*(ring->write_tail))(ring, ringbuf->tail); return; } } static int gen2_render_ring_flush(struct intel_engine_cs *ring , u32 invalidate_domains , u32 flush_domains ) { u32 cmd ; int ret ; { cmd = 33554432U; if (((invalidate_domains | flush_domains) & 2U) == 0U) { cmd = cmd | 4U; } else { } if ((invalidate_domains & 4U) != 0U) { cmd = cmd | 1U; } else { } ret = intel_ring_begin(ring, 2); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, cmd); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int gen4_render_ring_flush(struct intel_engine_cs *ring , u32 invalidate_domains , u32 flush_domains ) { struct drm_device *dev ; u32 cmd ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = ring->dev; cmd = 33554436U; if (((invalidate_domains | flush_domains) & 2U) != 0U) { cmd = cmd & 4294967291U; } else { } if ((invalidate_domains & 16U) != 0U) { cmd = cmd | 2U; } else { } if ((invalidate_domains & 8U) != 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { cmd = cmd | 32U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 5U) { cmd = cmd | 32U; } else { } } } else { } ret = intel_ring_begin(ring, 2); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, cmd); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring ) { u32 scratch_addr ; int ret ; { scratch_addr = ring->scratch.gtt_offset + 128U; ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 2046820355U); intel_ring_emit(ring, 1048578U); intel_ring_emit(ring, scratch_addr | 4U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 2046820355U); intel_ring_emit(ring, 16384U); intel_ring_emit(ring, scratch_addr | 4U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int gen6_render_ring_flush(struct intel_engine_cs *ring , u32 invalidate_domains , u32 flush_domains ) { u32 flags ; u32 scratch_addr ; int ret ; { flags = 0U; scratch_addr = ring->scratch.gtt_offset + 128U; ret = intel_emit_post_sync_nonzero_flush(ring); if (ret != 0) { return (ret); } else { } if (flush_domains != 0U) { flags = flags | 4096U; flags = flags | 1U; flags = flags | 1048576U; } else { } if (invalidate_domains != 0U) { flags = flags | 262144U; flags = flags | 2048U; flags = flags | 1024U; flags = flags | 16U; flags = flags | 8U; flags = flags | 4U; flags = flags | 1064960U; } else { } ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 2046820354U); intel_ring_emit(ring, flags); intel_ring_emit(ring, scratch_addr | 4U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring ) { int ret ; { ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 2046820354U); intel_ring_emit(ring, 1048578U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int gen7_render_ring_flush(struct intel_engine_cs *ring , u32 invalidate_domains , u32 flush_domains ) { u32 flags ; u32 scratch_addr ; int ret ; { flags = 0U; scratch_addr = ring->scratch.gtt_offset + 128U; flags = flags | 1048576U; if (flush_domains != 0U) { flags = flags | 4096U; flags = flags | 1U; } else { } if (invalidate_domains != 0U) { flags = flags | 262144U; flags = flags | 2048U; flags = flags | 1024U; flags = flags | 16U; flags = flags | 8U; flags = flags | 4U; flags = flags | 65536U; flags = flags | 16384U; flags = flags | 16777216U; flags = flags | 2U; gen7_render_ring_cs_stall_wa(ring); } else { } ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 2046820354U); intel_ring_emit(ring, flags); intel_ring_emit(ring, scratch_addr); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int gen8_emit_pipe_control(struct intel_engine_cs *ring , u32 flags , u32 scratch_addr ) { int ret ; { ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 2046820356U); intel_ring_emit(ring, flags); intel_ring_emit(ring, scratch_addr); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int gen8_render_ring_flush(struct intel_engine_cs *ring , u32 invalidate_domains , u32 flush_domains ) { u32 flags ; u32 scratch_addr ; int ret ; int tmp ; { flags = 0U; scratch_addr = ring->scratch.gtt_offset + 128U; flags = flags | 1048576U; if (flush_domains != 0U) { flags = flags | 4096U; flags = flags | 1U; } else { } if (invalidate_domains != 0U) { flags = flags | 262144U; flags = flags | 2048U; flags = flags | 1024U; flags = flags | 16U; flags = flags | 8U; flags = flags | 4U; flags = flags | 16384U; flags = flags | 16777216U; ret = gen8_emit_pipe_control(ring, 1048578U, 0U); if (ret != 0) { return (ret); } else { } } else { } tmp = gen8_emit_pipe_control(ring, flags, scratch_addr); return (tmp); } } static void ring_write_tail(struct intel_engine_cs *ring , u32 value ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 48U), value, 1); return; } } u64 intel_ring_get_active_head(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; u64 acthd ; u32 upper ; uint32_t tmp ; u32 lower ; uint32_t tmp___0 ; u32 tmp___1 ; uint32_t tmp___2 ; int __ret_warn_on ; uint32_t tmp___3 ; long tmp___4 ; uint32_t tmp___5 ; uint32_t tmp___6 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; __p___0 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 92U), 1); upper = tmp; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 116U), 1); lower = tmp___0; tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 92U), 1); tmp___1 = tmp___2; if (upper != tmp___1) { upper = tmp___1; lower = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 116U), 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 92U), 1); __ret_warn_on = tmp___3 != upper; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 452, "WARN_ON(I915_READ(((ring->mmio_base)+0x5c)) != upper)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } acthd = ((unsigned long long )upper << 32) | (unsigned long long )lower; } else { __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 116U), 1); acthd = (u64 )tmp___5; } else { tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8392L, 1); acthd = (u64 )tmp___6; } } return (acthd); } } static void ring_setup_phys_status_page(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; u32 addr ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; addr = (u32 )(dev_priv->status_page_dmah)->busaddr; __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { addr = ((u32 )((dev_priv->status_page_dmah)->busaddr >> 28) & 240U) | addr; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8320L, addr, 1); return; } } static void intel_ring_setup_status_page(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 mmio ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; u32 reg ; int __ret_warn_on ; uint32_t tmp ; long tmp___0 ; int _a ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; mmio = 0U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 7U) { switch ((unsigned int )ring->id) { case 0U: mmio = 16512U; goto ldv_48121; case 2U: mmio = 17024U; goto ldv_48121; case 4U: ; case 1U: mmio = 16768U; goto ldv_48121; case 3U: mmio = 17280U; goto ldv_48121; } ldv_48121: ; } else { __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { mmio = ring->mmio_base + 8320U; } else { mmio = ring->mmio_base + 128U; } } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )mmio, ring->status_page.gfx_addr, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )mmio, 0); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 5U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 7U) { reg = ring->mmio_base + 192U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 156U), 1); __ret_warn_on = (tmp & 512U) == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 522, "WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); _a = 544; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, (uint32_t )((_a << 16) | _a), 1); tmp___1 = msecs_to_jiffies(1000U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48172; ldv_48171: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___2 & 32U) != 0U) { ret__ = -110; } else { } goto ldv_48170; } else { } tmp___3 = drm_can_sleep___3(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48172: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___4 & 32U) != 0U) { goto ldv_48171; } else { } ldv_48170: ; if (ret__ != 0) { drm_err("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", ring->name); } else { } } else { } } else { } return; } } extern void __compiletime_assert_557(void) ; static bool stop_ring(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; int _a ; uint32_t tmp___0 ; uint32_t tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; struct drm_i915_private *__p ; bool __cond___3 ; bool __cond___4 ; bool __cond___5 ; struct drm_i915_private *__p___0 ; uint32_t tmp___6 ; { tmp = to_i915((struct drm_device const *)ring->dev); dev_priv = tmp; __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { _a = 256; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 156U), (uint32_t )((_a << 16) | _a), 1); tmp___2 = msecs_to_jiffies(1000U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48209; ldv_48208: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 156U), 1); if ((tmp___3 & 512U) == 0U) { ret__ = -110; } else { } goto ldv_48207; } else { } tmp___4 = drm_can_sleep___3(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48209: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 156U), 1); if ((tmp___5 & 512U) == 0U) { goto ldv_48208; } else { } ldv_48207: ; if (ret__ != 0) { drm_err("%s : timed out trying to stop ring\n", ring->name); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 48U), 1); if (tmp___0 != tmp___1) { return (0); } else { } } else { } } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 60U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 52U), 0U, 1); (*(ring->write_tail))(ring, 0U); __p___0 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 2U) { (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 60U), 1); __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_557(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_557(); } else { } __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_557(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 156U), (uint32_t )16777216, 1); } else { } tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); return ((tmp___6 & 2097148U) == 0U); } } static int init_ring_common(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_ringbuffer *ringbuf ; struct drm_i915_gem_object *obj ; int ret ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; long tmp___3 ; uint32_t tmp___4 ; uint32_t tmp___5 ; uint32_t tmp___6 ; uint32_t tmp___7 ; bool tmp___8 ; int tmp___9 ; bool tmp___10 ; int tmp___11 ; struct drm_i915_private *__p ; unsigned long tmp___12 ; uint32_t tmp___13 ; long tmp___14 ; uint32_t tmp___15 ; unsigned long tmp___16 ; uint32_t tmp___17 ; uint32_t tmp___18 ; uint32_t tmp___19 ; uint32_t tmp___20 ; uint32_t tmp___21 ; unsigned long timeout__ ; unsigned long tmp___22 ; int ret__ ; uint32_t tmp___23 ; uint32_t tmp___24 ; unsigned long tmp___25 ; uint32_t tmp___26 ; bool tmp___27 ; uint32_t tmp___28 ; uint32_t tmp___29 ; unsigned long tmp___30 ; uint32_t tmp___31 ; uint32_t tmp___32 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ringbuf = ring->buffer; obj = ringbuf->obj; ret = 0; intel_uncore_forcewake_get(dev_priv, 7); tmp___10 = stop_ring(ring); if (tmp___10) { tmp___11 = 0; } else { tmp___11 = 1; } if (tmp___11) { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 56U), 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 48U), 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 60U), 1); drm_ut_debug_printk("init_ring_common", "%s head not reset to zero ctl %08x head %08x tail %08x start %08x\n", ring->name, tmp___2, tmp___1, tmp___0, tmp); } else { } tmp___8 = stop_ring(ring); if (tmp___8) { tmp___9 = 0; } else { tmp___9 = 1; } if (tmp___9) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 56U), 1); tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 48U), 1); tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 60U), 1); drm_err("failed to set %s head to zero ctl %08x head %08x tail %08x start %08x\n", ring->name, tmp___7, tmp___6, tmp___5, tmp___4); ret = -5; goto out; } else { } } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { intel_ring_setup_status_page(ring); } else { ring_setup_phys_status_page(ring); } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); tmp___12 = i915_gem_obj_ggtt_offset(obj); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 56U), (uint32_t )tmp___12, 1); tmp___15 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); if (tmp___15 != 0U) { tmp___14 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___14 != 0L) { tmp___13 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); drm_ut_debug_printk("init_ring_common", "%s initialization failed [head=%08x], fudging\n", ring->name, tmp___13); } else { } } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 52U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 60U), (((uint32_t )ringbuf->size - 4096U) & 2093056U) | 1U, 1); tmp___22 = msecs_to_jiffies(50U); timeout__ = (tmp___22 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48256; ldv_48255: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___23 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 60U), 1); if ((tmp___23 & 1U) == 0U) { ret__ = -110; } else { tmp___24 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 56U), 1); tmp___25 = i915_gem_obj_ggtt_offset(obj); if ((unsigned long )tmp___24 != tmp___25) { ret__ = -110; } else { tmp___26 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); if ((tmp___26 & 2097148U) != 0U) { ret__ = -110; } else { } } } goto ldv_48254; } else { } tmp___27 = drm_can_sleep___3(); if ((int )tmp___27) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48256: tmp___28 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 60U), 1); if ((tmp___28 & 1U) == 0U) { goto ldv_48255; } else { tmp___29 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 56U), 1); tmp___30 = i915_gem_obj_ggtt_offset(obj); if ((unsigned long )tmp___29 != tmp___30) { goto ldv_48255; } else { tmp___31 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); if ((tmp___31 & 2097148U) != 0U) { goto ldv_48255; } else { goto ldv_48254; } } } ldv_48254: ; if (ret__ != 0) { tmp___16 = i915_gem_obj_ggtt_offset(obj); tmp___17 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 56U), 1); tmp___18 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 48U), 1); tmp___19 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); tmp___20 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 60U), 1); tmp___21 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 60U), 1); drm_err("%s initialization failed ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", ring->name, tmp___21, tmp___20 & 1U, tmp___19, tmp___18, tmp___17, tmp___16); ret = -5; goto out; } else { } ringbuf->last_retired_head = 4294967295U; ringbuf->head = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 52U), 1); tmp___32 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 48U), 1); ringbuf->tail = tmp___32 & 2097144U; intel_ring_update_space(ringbuf); memset((void *)(& ring->hangcheck), 0, 32UL); out: intel_uncore_forcewake_put(dev_priv, 7); return (ret); } } void intel_fini_pipe_control(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct page *tmp ; struct drm_i915_private *__p ; { dev = ring->dev; if ((unsigned long )ring->scratch.obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { tmp = sg_page___2(((ring->scratch.obj)->pages)->sgl); kunmap(tmp); i915_gem_object_ggtt_unpin(ring->scratch.obj); } else { } drm_gem_object_unreference___11(& (ring->scratch.obj)->base); ring->scratch.obj = (struct drm_i915_gem_object *)0; return; } } int intel_init_pipe_control(struct intel_engine_cs *ring ) { int ret ; int __ret_warn_on ; long tmp ; unsigned long tmp___0 ; struct page *tmp___1 ; void *tmp___2 ; long tmp___3 ; { __ret_warn_on = (unsigned long )ring->scratch.obj != (unsigned long )((struct drm_i915_gem_object *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 670, "WARN_ON(ring->scratch.obj)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096UL); if ((unsigned long )ring->scratch.obj == (unsigned long )((struct drm_i915_gem_object *)0)) { drm_err("Failed to allocate seqno page\n"); ret = -12; goto err; } else { } ret = i915_gem_object_set_cache_level(ring->scratch.obj, 1); if (ret != 0) { goto err_unref; } else { } ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096U, 0U); if (ret != 0) { goto err_unref; } else { } tmp___0 = i915_gem_obj_ggtt_offset(ring->scratch.obj); ring->scratch.gtt_offset = (u32 )tmp___0; tmp___1 = sg_page___2(((ring->scratch.obj)->pages)->sgl); tmp___2 = kmap(tmp___1); ring->scratch.cpu_page = (u32 volatile *)tmp___2; if ((unsigned long )ring->scratch.cpu_page == (unsigned long )((u32 volatile *)0U)) { ret = -12; goto err_unpin; } else { } tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_init_pipe_control", "%s pipe control offset: 0x%08x\n", ring->name, ring->scratch.gtt_offset); } else { } return (0); err_unpin: i915_gem_object_ggtt_unpin(ring->scratch.obj); err_unref: drm_gem_object_unreference___11(& (ring->scratch.obj)->base); err: ; return (ret); } } static int intel_ring_workarounds_emit(struct intel_engine_cs *ring , struct intel_context *ctx ) { int ret ; int i ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct i915_workarounds *w ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; w = & dev_priv->workarounds; __ret_warn_once = w->count == 0U; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 714, "WARN_ON_ONCE(w->count == 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { return (0); } else { } ring->gpu_caches_dirty = 1; ret = intel_ring_flush_all_caches(ring); if (ret != 0) { return (ret); } else { } ret = intel_ring_begin(ring, (int )((w->count + 1U) * 2U)); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, (w->count * 2U - 1U) | 285212672U); i = 0; goto ldv_48293; ldv_48292: intel_ring_emit(ring, w->reg[i].addr); intel_ring_emit(ring, w->reg[i].value); i = i + 1; ldv_48293: ; if ((u32 )i < w->count) { goto ldv_48292; } else { } intel_ring_emit(ring, 0U); intel_ring_advance(ring); ring->gpu_caches_dirty = 1; ret = intel_ring_flush_all_caches(ring); if (ret != 0) { return (ret); } else { } tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_ring_workarounds_emit", "Number of Workarounds emitted: %d\n", w->count); } else { } return (0); } } static int intel_rcs_ctx_init(struct intel_engine_cs *ring , struct intel_context *ctx ) { int ret ; { ret = intel_ring_workarounds_emit(ring, ctx); if (ret != 0) { return (ret); } else { } ret = i915_gem_render_state_init(ring); if (ret != 0) { drm_err("init render state: %d\n", ret); } else { } return (ret); } } static int wa_add(struct drm_i915_private *dev_priv , u32 const addr , u32 const mask , u32 const val ) { u32 idx ; int __ret_warn_on ; long tmp ; long tmp___0 ; { idx = dev_priv->workarounds.count; __ret_warn_on = idx > 15U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 766, "WARN_ON(idx >= I915_MAX_WA_REGS)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-28); } else { } dev_priv->workarounds.reg[idx].addr = addr; dev_priv->workarounds.reg[idx].value = val; dev_priv->workarounds.reg[idx].mask = mask; dev_priv->workarounds.count = dev_priv->workarounds.count + 1U; return (0); } } extern void __compiletime_assert_838(void) ; extern void __compiletime_assert_854(void) ; static int bdw_init_workarounds(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int r ; int _a ; int tmp ; int r___0 ; int _a___0 ; int tmp___0 ; int r___1 ; int _a___1 ; int tmp___1 ; int r___2 ; int _a___2 ; int tmp___2 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; unsigned int tmp___3 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; int tmp___4 ; int r___3 ; bool __cond___23 ; bool __cond___24 ; bool __cond___25 ; int tmp___5 ; int r___4 ; int _a___3 ; int tmp___6 ; int r___5 ; bool __cond___29 ; bool __cond___30 ; bool __cond___31 ; int tmp___7 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; _a = 288; tmp = wa_add(dev_priv, 58608U, 288U, (u32 const )((_a << 16) | _a)); r = tmp; if (r != 0) { return (r); } else { } _a___0 = 1; tmp___0 = wa_add(dev_priv, 58612U, 1U, (u32 const )((_a___0 << 16) | _a___0)); r___0 = tmp___0; if (r___0 != 0) { return (r___0); } else { } _a___1 = 2; tmp___1 = wa_add(dev_priv, 57732U, 2U, (u32 const )((_a___1 << 16) | _a___1)); r___1 = tmp___1; if (r___1 != 0) { return (r___1); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { __p___4 = to_i915((struct drm_device const *)dev); if (((int )__p___4->info.device_id & 240) == 32) { tmp___2 = 18480; } else { tmp___2 = 2096; } } else { tmp___2 = 2096; } } else { tmp___2 = 2096; } _a___2 = tmp___2; __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) == 0U) { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___9->info.gen) == 8U) { __p___10 = to_i915((struct drm_device const *)dev); if (((int )__p___10->info.device_id & 240) == 32) { tmp___3 = 18480U; } else { tmp___3 = 2096U; } } else { tmp___3 = 2096U; } } else { tmp___3 = 2096U; } tmp___4 = wa_add(dev_priv, 29440U, tmp___3, (u32 const )((_a___2 << 16) | _a___2)); r___2 = tmp___4; if (r___2 != 0) { return (r___2); } else { } __cond___23 = 0; if ((int )__cond___23) { __compiletime_assert_838(); } else { } __cond___24 = 0; if ((int )__cond___24) { __compiletime_assert_838(); } else { } __cond___25 = 0; if ((int )__cond___25) { __compiletime_assert_838(); } else { } tmp___5 = wa_add(dev_priv, 28672U, 4U, (u32 const )262144); r___3 = tmp___5; if (r___3 != 0) { return (r___3); } else { } _a___3 = 64; tmp___6 = wa_add(dev_priv, 28676U, 64U, (u32 const )((_a___3 << 16) | _a___3)); r___4 = tmp___6; if (r___4 != 0) { return (r___4); } else { } __cond___29 = 0; if ((int )__cond___29) { __compiletime_assert_854(); } else { } __cond___30 = 0; if ((int )__cond___30) { __compiletime_assert_854(); } else { } __cond___31 = 0; if ((int )__cond___31) { __compiletime_assert_854(); } else { } tmp___7 = wa_add(dev_priv, 28680U, 640U, (u32 const )41943552); r___5 = tmp___7; if (r___5 != 0) { return (r___5); } else { } return (0); } } extern void __compiletime_assert_883(void) ; extern void __compiletime_assert_902(void) ; static int chv_init_workarounds(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int r ; int _a ; int tmp ; int r___0 ; int _a___0 ; int tmp___0 ; int r___1 ; bool __cond___5 ; bool __cond___6 ; bool __cond___7 ; int tmp___1 ; int r___2 ; int _a___1 ; int tmp___2 ; int r___3 ; int _a___2 ; int tmp___3 ; int r___4 ; bool __cond___14 ; bool __cond___15 ; bool __cond___16 ; int tmp___4 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; _a = 288; tmp = wa_add(dev_priv, 58608U, 288U, (u32 const )((_a << 16) | _a)); r = tmp; if (r != 0) { return (r); } else { } _a___0 = 2064; tmp___0 = wa_add(dev_priv, 29440U, 2064U, (u32 const )((_a___0 << 16) | _a___0)); r___0 = tmp___0; if (r___0 != 0) { return (r___0); } else { } __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_883(); } else { } __cond___6 = 0; if ((int )__cond___6) { __compiletime_assert_883(); } else { } __cond___7 = 0; if ((int )__cond___7) { __compiletime_assert_883(); } else { } tmp___1 = wa_add(dev_priv, 28672U, 4U, (u32 const )262144); r___1 = tmp___1; if (r___1 != 0) { return (r___1); } else { } _a___1 = 64; tmp___2 = wa_add(dev_priv, 28676U, 64U, (u32 const )((_a___1 << 16) | _a___1)); r___2 = tmp___2; if (r___2 != 0) { return (r___2); } else { } _a___2 = 32768; tmp___3 = wa_add(dev_priv, 28696U, 32768U, (u32 const )((_a___2 << 16) | _a___2)); r___3 = tmp___3; if (r___3 != 0) { return (r___3); } else { } __cond___14 = 0; if ((int )__cond___14) { __compiletime_assert_902(); } else { } __cond___15 = 0; if ((int )__cond___15) { __compiletime_assert_902(); } else { } __cond___16 = 0; if ((int )__cond___16) { __compiletime_assert_902(); } else { } tmp___4 = wa_add(dev_priv, 28680U, 640U, (u32 const )41943552); r___4 = tmp___4; if (r___4 != 0) { return (r___4); } else { } return (0); } } extern void __compiletime_assert_926(void) ; extern void __compiletime_assert_953(void) ; static int gen9_init_workarounds(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; int r ; int _a ; int tmp___0 ; int r___0 ; int _a___0 ; int tmp___1 ; int r___1 ; bool __cond___5 ; bool __cond___6 ; bool __cond___7 ; int tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int r___2 ; int _a___1 ; int tmp___3 ; int r___3 ; int _a___2 ; int tmp___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; int r___4 ; int _a___3 ; int tmp___5 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; struct drm_i915_private *__p___12 ; struct drm_i915_private *__p___13 ; int r___5 ; int _a___4 ; int tmp___6 ; int r___6 ; int _a___5 ; int tmp___7 ; int r___7 ; bool __cond___38 ; bool __cond___39 ; bool __cond___40 ; int tmp___8 ; int r___8 ; int _a___6 ; int tmp___9 ; struct drm_i915_private *__p___14 ; struct drm_i915_private *__p___15 ; struct drm_i915_private *__p___16 ; struct drm_i915_private *__p___17 ; struct drm_i915_private *__p___18 ; struct drm_i915_private *__p___19 ; struct drm_i915_private *__p___20 ; struct drm_i915_private *__p___21 ; struct drm_i915_private *__p___22 ; struct drm_i915_private *__p___23 ; int r___9 ; uint32_t _a___7 ; int tmp___10 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; _a = 256; tmp___0 = wa_add(dev_priv, 58608U, 256U, (u32 const )((_a << 16) | _a)); r = tmp___0; if (r != 0) { return (r); } else { } _a___0 = 32; tmp___1 = wa_add(dev_priv, 57732U, 32U, (u32 const )((_a___0 << 16) | _a___0)); r___0 = tmp___1; if (r___0 != 0) { return (r___0); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___0->dev)->pdev)->revision == 0U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___1->dev)->pdev)->revision == 1U) { goto _L; } else { goto _L___0; } } } else { _L___0: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___4->dev)->pdev)->revision <= 2U) { _L: /* CIL Label */ __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_926(); } else { } __cond___6 = 0; if ((int )__cond___6) { __compiletime_assert_926(); } else { } __cond___7 = 0; if ((int )__cond___7) { __compiletime_assert_926(); } else { } tmp___2 = wa_add(dev_priv, 57736U, 32U, (u32 const )2097152); r___1 = tmp___2; if (r___1 != 0) { return (r___1); } else { } } else { } } else { } } else { } } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___6->dev)->pdev)->revision <= 1U) { goto _L___1; } else { goto _L___2; } } else { _L___2: /* CIL Label */ __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) == 0U) { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) == 9U) { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___9->dev)->pdev)->revision <= 2U) { _L___1: /* CIL Label */ _a___1 = 16384; tmp___3 = wa_add(dev_priv, 28688U, 16384U, (u32 const )((_a___1 << 16) | _a___1)); r___2 = tmp___3; if (r___2 != 0) { return (r___2); } else { } _a___2 = 16384; tmp___4 = wa_add(dev_priv, 29448U, 16384U, (u32 const )((_a___2 << 16) | _a___2)); r___3 = tmp___4; if (r___3 != 0) { return (r___3); } else { } } else { } } else { } } else { } } __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 45UL) != 0U) { __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___11->dev)->pdev)->revision > 1U) { goto _L___3; } else { goto _L___4; } } else { _L___4: /* CIL Label */ __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___12 + 45UL) == 0U) { __p___13 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___13->info.gen) == 9U) { _L___3: /* CIL Label */ _a___3 = 16; tmp___5 = wa_add(dev_priv, 57748U, 16U, (u32 const )((_a___3 << 16) | _a___3)); r___4 = tmp___5; if (r___4 != 0) { return (r___4); } else { } } else { } } else { } } _a___4 = 64; tmp___6 = wa_add(dev_priv, 28676U, 64U, (u32 const )((_a___4 << 16) | _a___4)); r___5 = tmp___6; if (r___5 != 0) { return (r___5); } else { } _a___5 = 2; tmp___7 = wa_add(dev_priv, 28676U, 2U, (u32 const )((_a___5 << 16) | _a___5)); r___6 = tmp___7; if (r___6 != 0) { return (r___6); } else { } __cond___38 = 0; if ((int )__cond___38) { __compiletime_assert_953(); } else { } __cond___39 = 0; if ((int )__cond___39) { __compiletime_assert_953(); } else { } __cond___40 = 0; if ((int )__cond___40) { __compiletime_assert_953(); } else { } tmp___8 = wa_add(dev_priv, 57736U, 8U, (u32 const )524288); r___7 = tmp___8; if (r___7 != 0) { return (r___7); } else { } __p___14 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___14 + 45UL) != 0U) { __p___15 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___15->dev)->pdev)->revision == 2U) { goto _L___5; } else { goto _L___6; } } else { _L___6: /* CIL Label */ __p___16 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___16 + 45UL) == 0U) { __p___17 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___17->info.gen) == 9U) { __p___18 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___18->dev)->pdev)->revision <= 2U) { _L___5: /* CIL Label */ _a___6 = 16384; tmp___9 = wa_add(dev_priv, 29448U, 16384U, (u32 const )((_a___6 << 16) | _a___6)); r___8 = tmp___9; if (r___8 != 0) { return (r___8); } else { } } else { } } else { } } else { } } tmp = 32U; __p___19 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___19 + 45UL) != 0U) { __p___20 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___20->dev)->pdev)->revision == 5U) { tmp = tmp | 32768U; } else { goto _L___7; } } else { _L___7: /* CIL Label */ __p___21 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___21 + 45UL) == 0U) { __p___22 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___22->info.gen) == 9U) { __p___23 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___23->dev)->pdev)->revision > 2U) { tmp = tmp | 32768U; } else { } } else { } } else { } } _a___7 = tmp; tmp___10 = wa_add(dev_priv, 29440U, tmp, (_a___7 << 16) | _a___7); r___9 = tmp___10; if (r___9 != 0) { return (r___9); } else { } return (0); } } extern void __compiletime_assert_1008(void) ; static int skl_tune_iz_hashing(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u8 vals[3U] ; unsigned int i ; u8 ss ; unsigned int tmp ; int tmp___0 ; int r ; bool __cond ; int tmp___1 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; vals[0] = 0U; vals[1] = 0U; vals[2] = 0U; i = 0U; goto ldv_48912; ldv_48911: tmp = __arch_hweight8((unsigned int )dev_priv->info.subslice_7eu[i]); if (tmp != 1U) { goto ldv_48910; } else { } tmp___0 = ffs((int )dev_priv->info.subslice_7eu[i]); ss = (unsigned int )((u8 )tmp___0) + 255U; vals[i] = 3U - (unsigned int )ss; ldv_48910: i = i + 1U; ldv_48912: ; if (i <= 2U) { goto ldv_48911; } else { } if (((unsigned int )vals[0] == 0U && (unsigned int )vals[1] == 0U) && (unsigned int )vals[2] == 0U) { return (0); } else { } __cond = 0; if ((int )__cond) { __compiletime_assert_1008(); } else { } tmp___1 = wa_add(dev_priv, 28680U, 63U, (u32 const )(((((int )vals[2] << 4) | ((int )vals[1] << 2)) | (int )vals[0]) | 4128768)); r = tmp___1; if (r != 0) { return (r); } else { } return (0); } } static int skl_init_workarounds(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int r ; int _a ; int tmp ; struct drm_i915_private *__p ; int r___0 ; int _a___0 ; int tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int r___1 ; int _a___1 ; int tmp___1 ; struct drm_i915_private *__p___2 ; int tmp___2 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; gen9_init_workarounds(ring); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p->dev)->pdev)->revision == 1U) { _a = 8; tmp = wa_add(dev_priv, 28696U, 8U, (u32 const )((_a << 16) | _a)); r = tmp; if (r != 0) { return (r); } else { } } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___0->dev)->pdev)->revision == 2U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___1->dev)->pdev)->revision == 3U) { _L: /* CIL Label */ _a___0 = 17408; tmp___0 = wa_add(dev_priv, 29440U, 17408U, (u32 const )((_a___0 << 16) | _a___0)); r___0 = tmp___0; if (r___0 != 0) { return (r___0); } else { } } else { } } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___2->dev)->pdev)->revision <= 3U) { _a___1 = 16; tmp___1 = wa_add(dev_priv, 29440U, 16U, (u32 const )((_a___1 << 16) | _a___1)); r___1 = tmp___1; if (r___1 != 0) { return (r___1); } else { } } else { } tmp___2 = skl_tune_iz_hashing(ring); return (tmp___2); } } static int bxt_init_workarounds(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int r ; int _a ; int tmp ; int r___0 ; int _a___0 ; int tmp___0 ; struct drm_i915_private *__p ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; gen9_init_workarounds(ring); _a = 32; tmp = wa_add(dev_priv, 58608U, 32U, (u32 const )((_a << 16) | _a)); r = tmp; if (r != 0) { return (r); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p->dev)->pdev)->revision <= 3U) { _a___0 = 16; tmp___0 = wa_add(dev_priv, 57600U, 16U, (u32 const )((_a___0 << 16) | _a___0)); r___0 = tmp___0; if (r___0 != 0) { return (r___0); } else { } } else { } return (0); } } int init_workarounds_ring(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; long tmp ; int tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp___1 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int tmp___2 ; struct drm_i915_private *__p___3 ; int tmp___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __ret_warn_on = (unsigned int )ring->id != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 1073, "WARN_ON(ring->id != RCS)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); dev_priv->workarounds.count = 0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { tmp___0 = bdw_init_workarounds(ring); return (tmp___0); } else { } } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { tmp___1 = chv_init_workarounds(ring); return (tmp___1); } else { } } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { tmp___2 = skl_init_workarounds(ring); return (tmp___2); } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 9U) { tmp___3 = bxt_init_workarounds(ring); return (tmp___3); } else { } } else { } return (0); } } extern void __compiletime_assert_1132(void) ; static int init_render_ring(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; int tmp ; int _a ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int _a___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int _a___1 ; struct drm_i915_private *__p___3 ; int _a___2 ; int _a___3 ; struct drm_i915_private *__p___4 ; bool __cond___20 ; bool __cond___21 ; bool __cond___22 ; struct drm_i915_private *__p___5 ; int _a___4 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; int tmp___0 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = init_ring_common(ring); ret = tmp; if (ret != 0) { return (ret); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 6U) { _a = 64; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8348L, (uint32_t )((_a << 16) | _a), 1); } else { } } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 5U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 8U) { _a___0 = 16384; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8348L, (uint32_t )((_a___0 << 16) | _a___0), 1); } else { } } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 6U) { _a___1 = 8192; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 9504L, (uint32_t )((_a___1 << 16) | _a___1), 1); } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 7U) { _a___2 = 8192; _a___3 = 2048; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8860L, (uint32_t )(((_a___2 << 16) | _a___2) | ((_a___3 << 16) | _a___3)), 1); } else { } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 6U) { __cond___20 = 0; if ((int )__cond___20) { __compiletime_assert_1132(); } else { } __cond___21 = 0; if ((int )__cond___21) { __compiletime_assert_1132(); } else { } __cond___22 = 0; if ((int )__cond___22) { __compiletime_assert_1132(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8480L, (uint32_t )2097152, 1); } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 5U) { _a___4 = 128; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8384L, (uint32_t )((_a___4 << 16) | _a___4), 1); } else { } __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { __p___7 = to_i915((struct drm_device const *)dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), (unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U ? 4294965215U : 4294967263U, 1); } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 45UL) != 0U) { __p___7 = to_i915((struct drm_device const *)dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), (unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U ? 4294965215U : 4294967263U, 1); } else { } } tmp___0 = init_workarounds_ring(ring); return (tmp___0); } } static void render_ring_cleanup(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->semaphore_obj != (unsigned long )((struct drm_i915_gem_object *)0)) { i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); drm_gem_object_unreference___11(& (dev_priv->semaphore_obj)->base); dev_priv->semaphore_obj = (struct drm_i915_gem_object *)0; } else { } intel_fini_pipe_control(ring); return; } } static int gen8_rcs_signal(struct intel_engine_cs *signaller , unsigned int num_dwords ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *waiter ; int i ; int ret ; int num_rings ; struct drm_i915_private *__p ; unsigned int tmp ; u32 seqno ; u64 gtt_offset ; bool tmp___0 ; { dev = signaller->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); tmp = __arch_hweight32((unsigned int )__p->info.ring_mask); num_rings = (int )tmp; num_dwords = (unsigned int )((num_rings + -1) * 8) + num_dwords; ret = intel_ring_begin(signaller, (int )num_dwords); if (ret != 0) { return (ret); } else { } i = 0; goto ldv_49495; ldv_49494: waiter = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(waiter); if ((int )tmp___0) { gtt_offset = signaller->semaphore.__annonCompField77.signal_ggtt[i]; if (gtt_offset == 196608ULL) { goto ldv_49493; } else { } seqno = i915_gem_request_get_seqno(signaller->outstanding_lazy_request); intel_ring_emit(signaller, 2046820356U); intel_ring_emit(signaller, 16793728U); intel_ring_emit(signaller, (unsigned int )gtt_offset); intel_ring_emit(signaller, (unsigned int )(gtt_offset >> 32ULL)); intel_ring_emit(signaller, seqno); intel_ring_emit(signaller, 0U); intel_ring_emit(signaller, ((unsigned int )waiter->id << 15) | 226492416U); intel_ring_emit(signaller, 0U); } else { } ldv_49493: i = i + 1; ldv_49495: ; if (i <= 4) { goto ldv_49494; } else { } return (0); } } static int gen8_xcs_signal(struct intel_engine_cs *signaller , unsigned int num_dwords ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *waiter ; int i ; int ret ; int num_rings ; struct drm_i915_private *__p ; unsigned int tmp ; u32 seqno ; u64 gtt_offset ; bool tmp___0 ; { dev = signaller->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); tmp = __arch_hweight32((unsigned int )__p->info.ring_mask); num_rings = (int )tmp; num_dwords = (unsigned int )((num_rings + -1) * 6) + num_dwords; ret = intel_ring_begin(signaller, (int )num_dwords); if (ret != 0) { return (ret); } else { } i = 0; goto ldv_49715; ldv_49714: waiter = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___0 = intel_ring_initialized(waiter); if ((int )tmp___0) { gtt_offset = signaller->semaphore.__annonCompField77.signal_ggtt[i]; if (gtt_offset == 196608ULL) { goto ldv_49713; } else { } seqno = i915_gem_request_get_seqno(signaller->outstanding_lazy_request); intel_ring_emit(signaller, 318783490U); intel_ring_emit(signaller, (unsigned int )gtt_offset | 4U); intel_ring_emit(signaller, (unsigned int )(gtt_offset >> 32ULL)); intel_ring_emit(signaller, seqno); intel_ring_emit(signaller, ((unsigned int )waiter->id << 15) | 226492416U); intel_ring_emit(signaller, 0U); } else { } ldv_49713: i = i + 1; ldv_49715: ; if (i <= 4) { goto ldv_49714; } else { } return (0); } } static int gen6_signal(struct intel_engine_cs *signaller , unsigned int num_dwords ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *useless ; int i ; int ret ; int num_rings ; struct drm_i915_private *__p ; unsigned int tmp ; u32 mbox_reg ; u32 seqno ; uint32_t tmp___0 ; bool tmp___1 ; { dev = signaller->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); tmp = __arch_hweight32((unsigned int )__p->info.ring_mask); num_rings = (int )tmp; num_dwords = ((unsigned int )(((num_rings + -1) * 3 + -1) | 1) + num_dwords) + 1U; ret = intel_ring_begin(signaller, (int )num_dwords); if (ret != 0) { return (ret); } else { } i = 0; goto ldv_49934; ldv_49933: useless = (struct intel_engine_cs *)(& dev_priv->ring) + (unsigned long )i; tmp___1 = intel_ring_initialized(useless); if ((int )tmp___1) { mbox_reg = signaller->semaphore.__annonCompField77.mbox.signal[i]; if (mbox_reg != 0U) { tmp___0 = i915_gem_request_get_seqno(signaller->outstanding_lazy_request); seqno = tmp___0; intel_ring_emit(signaller, 285212673U); intel_ring_emit(signaller, mbox_reg); intel_ring_emit(signaller, seqno); } else { } } else { } i = i + 1; ldv_49934: ; if (i <= 4) { goto ldv_49933; } else { } if (((unsigned int )num_rings & 1U) == 0U) { intel_ring_emit(signaller, 0U); } else { } return (0); } } static int gen6_add_request(struct intel_engine_cs *ring ) { int ret ; uint32_t tmp ; { if ((unsigned long )ring->semaphore.signal != (unsigned long )((int (*)(struct intel_engine_cs * , unsigned int ))0)) { ret = (*(ring->semaphore.signal))(ring, 4U); } else { ret = intel_ring_begin(ring, 4); } if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 276824065U); intel_ring_emit(ring, 192U); tmp = i915_gem_request_get_seqno(ring->outstanding_lazy_request); intel_ring_emit(ring, tmp); intel_ring_emit(ring, 16777216U); __intel_ring_advance(ring); return (0); } } __inline static bool i915_gem_has_seqno_wrapped(struct drm_device *dev , u32 seqno ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; return (dev_priv->last_seqno < seqno); } } static int gen8_ring_sync(struct intel_engine_cs *waiter , struct intel_engine_cs *signaller , u32 seqno ) { struct drm_i915_private *dev_priv ; int ret ; unsigned long tmp ; unsigned long tmp___0 ; { dev_priv = (struct drm_i915_private *)(waiter->dev)->dev_private; ret = intel_ring_begin(waiter, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(waiter, 239112194U); intel_ring_emit(waiter, seqno); tmp = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); intel_ring_emit(waiter, (unsigned int )tmp + ((unsigned int )waiter->id + (unsigned int )signaller->id * 5U) * 8U); tmp___0 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); intel_ring_emit(waiter, (unsigned int )((tmp___0 + ((unsigned long )waiter->id + (unsigned long )((unsigned int )signaller->id * 5U)) * 8UL) >> 32UL)); intel_ring_advance(waiter); return (0); } } static int gen6_ring_sync(struct intel_engine_cs *waiter , struct intel_engine_cs *signaller , u32 seqno ) { u32 dw1 ; u32 wait_mbox ; int ret ; int __ret_warn_on ; long tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; { dw1 = 185860097U; wait_mbox = signaller->semaphore.__annonCompField77.mbox.wait[(unsigned int )waiter->id]; seqno = seqno - 1U; __ret_warn_on = wait_mbox == 196608U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 1362, "WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = intel_ring_begin(waiter, 4); if (ret != 0) { return (ret); } else { } tmp___0 = i915_gem_has_seqno_wrapped(waiter->dev, seqno); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } tmp___2 = ldv__builtin_expect((long )tmp___1, 1L); if (tmp___2 != 0L) { intel_ring_emit(waiter, dw1 | wait_mbox); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, 0U); intel_ring_emit(waiter, 0U); } else { intel_ring_emit(waiter, 0U); intel_ring_emit(waiter, 0U); intel_ring_emit(waiter, 0U); intel_ring_emit(waiter, 0U); } intel_ring_advance(waiter); return (0); } } static int pc_render_add_request(struct intel_engine_cs *ring ) { u32 scratch_addr ; int ret ; uint32_t tmp ; uint32_t tmp___0 ; { scratch_addr = ring->scratch.gtt_offset + 128U; ret = intel_ring_begin(ring, 32); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 2046841858U); intel_ring_emit(ring, ring->scratch.gtt_offset | 4U); tmp = i915_gem_request_get_seqno(ring->outstanding_lazy_request); intel_ring_emit(ring, tmp); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 2046844930U); intel_ring_emit(ring, scratch_addr | 4U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); scratch_addr = scratch_addr + 128U; intel_ring_emit(ring, 2046844930U); intel_ring_emit(ring, scratch_addr | 4U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); scratch_addr = scratch_addr + 128U; intel_ring_emit(ring, 2046844930U); intel_ring_emit(ring, scratch_addr | 4U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); scratch_addr = scratch_addr + 128U; intel_ring_emit(ring, 2046844930U); intel_ring_emit(ring, scratch_addr | 4U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); scratch_addr = scratch_addr + 128U; intel_ring_emit(ring, 2046844930U); intel_ring_emit(ring, scratch_addr | 4U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); scratch_addr = scratch_addr + 128U; intel_ring_emit(ring, 2046844930U); intel_ring_emit(ring, scratch_addr | 4U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 2046842114U); intel_ring_emit(ring, ring->scratch.gtt_offset | 4U); tmp___0 = i915_gem_request_get_seqno(ring->outstanding_lazy_request); intel_ring_emit(ring, tmp___0); intel_ring_emit(ring, 0U); __intel_ring_advance(ring); return (0); } } static u32 gen6_ring_get_seqno(struct intel_engine_cs *ring , bool lazy_coherency ) { struct drm_i915_private *dev_priv ; u32 tmp ; { if (! lazy_coherency) { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 116U), 0); } else { } tmp = intel_read_status_page(ring, 48); return (tmp); } } static u32 ring_get_seqno(struct intel_engine_cs *ring , bool lazy_coherency ) { u32 tmp ; { tmp = intel_read_status_page(ring, 48); return (tmp); } } static void ring_set_seqno(struct intel_engine_cs *ring , u32 seqno ) { { intel_write_status_page(ring, 48, seqno); return; } } static u32 pc_render_get_seqno(struct intel_engine_cs *ring , bool lazy_coherency ) { { return ((u32 )*(ring->scratch.cpu_page)); } } static void pc_render_set_seqno(struct intel_engine_cs *ring , u32 seqno ) { { *(ring->scratch.cpu_page) = seqno; return; } } static bool gen5_ring_get_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; raw_spinlock_t *tmp___3 ; unsigned int tmp___4 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 1489, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (0); } else { } tmp___3 = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp___3); tmp___4 = ring->irq_refcount; ring->irq_refcount = ring->irq_refcount + 1U; if (tmp___4 == 0U) { gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return (1); } } static void gen5_ring_put_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; raw_spinlock_t *tmp ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp); ring->irq_refcount = ring->irq_refcount - 1U; if (ring->irq_refcount == 0U) { gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return; } } static bool i9xx_ring_get_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; bool tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; unsigned int tmp___2 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp___1); tmp___2 = ring->irq_refcount; ring->irq_refcount = ring->irq_refcount + 1U; if (tmp___2 == 0U) { dev_priv->__annonCompField82.irq_mask = dev_priv->__annonCompField82.irq_mask & ~ ring->irq_enable_mask; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8360L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8360L, 0); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return (1); } } static void i9xx_ring_put_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; raw_spinlock_t *tmp ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp); ring->irq_refcount = ring->irq_refcount - 1U; if (ring->irq_refcount == 0U) { dev_priv->__annonCompField82.irq_mask = dev_priv->__annonCompField82.irq_mask | ring->irq_enable_mask; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8360L, dev_priv->__annonCompField82.irq_mask, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8360L, 0); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return; } } static bool i8xx_ring_get_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; bool tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; unsigned int tmp___2 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp___1); tmp___2 = ring->irq_refcount; ring->irq_refcount = ring->irq_refcount + 1U; if (tmp___2 == 0U) { dev_priv->__annonCompField82.irq_mask = dev_priv->__annonCompField82.irq_mask & ~ ring->irq_enable_mask; (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8360L, (int )((uint16_t )dev_priv->__annonCompField82.irq_mask), 1); (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 8360L, 0); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return (1); } } static void i8xx_ring_put_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; raw_spinlock_t *tmp ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp); ring->irq_refcount = ring->irq_refcount - 1U; if (ring->irq_refcount == 0U) { dev_priv->__annonCompField82.irq_mask = dev_priv->__annonCompField82.irq_mask | ring->irq_enable_mask; (*(dev_priv->uncore.funcs.mmio_writew))(dev_priv, 8360L, (int )((uint16_t )dev_priv->__annonCompField82.irq_mask), 1); (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, 8360L, 0); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return; } } static int bsd_ring_flush(struct intel_engine_cs *ring , u32 invalidate_domains , u32 flush_domains ) { int ret ; { ret = intel_ring_begin(ring, 2); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 33554432U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int i9xx_add_request(struct intel_engine_cs *ring ) { int ret ; uint32_t tmp ; { ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 276824065U); intel_ring_emit(ring, 192U); tmp = i915_gem_request_get_seqno(ring->outstanding_lazy_request); intel_ring_emit(ring, tmp); intel_ring_emit(ring, 16777216U); __intel_ring_advance(ring); return (0); } } static bool gen6_ring_get_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; raw_spinlock_t *tmp___3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; unsigned int tmp___4 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 1630, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (0); } else { } tmp___3 = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp___3); tmp___4 = ring->irq_refcount; ring->irq_refcount = ring->irq_refcount + 1U; if (tmp___4 == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { _L: /* CIL Label */ if ((unsigned int )ring->id == 0U) { __p = to_i915((struct drm_device const *)dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ (ring->irq_enable_mask | ((unsigned int )*((unsigned char *)__p + 45UL) != 0U ? 2080U : 32U)), 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ ring->irq_enable_mask, 1); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ ring->irq_enable_mask, 1); } } gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return (1); } } static void gen6_ring_put_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; raw_spinlock_t *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp); ring->irq_refcount = ring->irq_refcount - 1U; if (ring->irq_refcount == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { _L: /* CIL Label */ if ((unsigned int )ring->id == 0U) { __p = to_i915((struct drm_device const *)dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), (unsigned int )*((unsigned char *)__p + 45UL) != 0U ? 4294965215U : 4294967263U, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), 4294967295U, 1); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), 4294967295U, 1); } } gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return; } } static bool hsw_vebox_get_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; raw_spinlock_t *tmp___3 ; unsigned int tmp___4 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 1673, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (0); } else { } tmp___3 = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp___3); tmp___4 = ring->irq_refcount; ring->irq_refcount = ring->irq_refcount + 1U; if (tmp___4 == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ ring->irq_enable_mask, 1); gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return (1); } } static void hsw_vebox_put_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; raw_spinlock_t *tmp ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp); ring->irq_refcount = ring->irq_refcount - 1U; if (ring->irq_refcount == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), 4294967295U, 1); gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return; } } static bool gen8_ring_get_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; raw_spinlock_t *tmp___3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; unsigned int tmp___4 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_irqs_enabled(dev_priv); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 1708, "WARN_ON(!intel_irqs_enabled(dev_priv))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (0); } else { } tmp___3 = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp___3); tmp___4 = ring->irq_refcount; ring->irq_refcount = ring->irq_refcount + 1U; if (tmp___4 == 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { _L: /* CIL Label */ if ((unsigned int )ring->id == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ (ring->irq_enable_mask | 32U), 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ ring->irq_enable_mask, 1); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), ~ ring->irq_enable_mask, 1); } } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 168U), 0); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return (1); } } static void gen8_ring_put_irq(struct intel_engine_cs *ring ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long flags ; raw_spinlock_t *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp); ring->irq_refcount = ring->irq_refcount - 1U; if (ring->irq_refcount == 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { _L: /* CIL Label */ if ((unsigned int )ring->id == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), 4294967263U, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), 4294967295U, 1); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 168U), 4294967295U, 1); } } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 168U), 0); } else { } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return; } } static int i965_dispatch_execbuffer(struct intel_engine_cs *ring , u64 offset , u32 length , unsigned int dispatch_flags ) { int ret ; { ret = intel_ring_begin(ring, 2); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, (int )dispatch_flags & 1 ? 411041920U : 411042176U); intel_ring_emit(ring, (u32 )offset); intel_ring_advance(ring); return (0); } } static int i830_dispatch_execbuffer(struct intel_engine_cs *ring , u64 offset , u32 len , unsigned int dispatch_flags ) { u32 cs_offset ; int ret ; { cs_offset = ring->scratch.gtt_offset; ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 1345323011U); intel_ring_emit(ring, 66064384U); intel_ring_emit(ring, 131076U); intel_ring_emit(ring, cs_offset); intel_ring_emit(ring, 3735928559U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); if ((dispatch_flags & 2U) == 0U) { if (len > 262144U) { return (-28); } else { } ret = intel_ring_begin(ring, 8); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 1357905924U); intel_ring_emit(ring, 63705088U); intel_ring_emit(ring, ((len + 4095U) / 4096U << 16) | 4096U); intel_ring_emit(ring, cs_offset); intel_ring_emit(ring, 4096U); intel_ring_emit(ring, (u32 )offset); intel_ring_emit(ring, 33554432U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); offset = (u64 )cs_offset; } else { } ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 402653185U); intel_ring_emit(ring, (unsigned int )((dispatch_flags & 1U) == 0U) | (u32 )offset); intel_ring_emit(ring, ((u32 )offset + len) - 8U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int i915_dispatch_execbuffer(struct intel_engine_cs *ring , u64 offset , u32 len , unsigned int dispatch_flags ) { int ret ; { ret = intel_ring_begin(ring, 2); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 411041920U); intel_ring_emit(ring, (unsigned int )((dispatch_flags & 1U) == 0U) | (u32 )offset); intel_ring_advance(ring); return (0); } } static void cleanup_status_page(struct intel_engine_cs *ring ) { struct drm_i915_gem_object *obj ; struct page *tmp ; { obj = ring->status_page.obj; if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return; } else { } tmp = sg_page___2((obj->pages)->sgl); kunmap(tmp); i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference___11(& obj->base); ring->status_page.obj = (struct drm_i915_gem_object *)0; return; } } static int init_status_page(struct intel_engine_cs *ring ) { struct drm_i915_gem_object *obj ; unsigned int flags ; int ret ; struct drm_i915_private *__p ; unsigned long tmp ; struct page *tmp___0 ; void *tmp___1 ; long tmp___2 ; { obj = ring->status_page.obj; if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { obj = i915_gem_alloc_object(ring->dev, 4096UL); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { drm_err("Failed to allocate status page\n"); return (-12); } else { } ret = i915_gem_object_set_cache_level(obj, 1); if (ret != 0) { goto err_unref; } else { } flags = 0U; __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { flags = flags | 1U; } else { } ret = i915_gem_obj_ggtt_pin(obj, 4096U, flags); if (ret != 0) { err_unref: drm_gem_object_unreference___11(& obj->base); return (ret); } else { } ring->status_page.obj = obj; } else { } tmp = i915_gem_obj_ggtt_offset(obj); ring->status_page.gfx_addr = (unsigned int )tmp; tmp___0 = sg_page___2((obj->pages)->sgl); tmp___1 = kmap(tmp___0); ring->status_page.page_addr = (u32 *)tmp___1; memset((void *)ring->status_page.page_addr, 0, 4096UL); tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("init_status_page", "%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); } else { } return (0); } } static int init_phys_status_page(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; if ((unsigned long )dev_priv->status_page_dmah == (unsigned long )((struct drm_dma_handle *)0)) { dev_priv->status_page_dmah = drm_pci_alloc(ring->dev, 4096UL, 4096UL); if ((unsigned long )dev_priv->status_page_dmah == (unsigned long )((struct drm_dma_handle *)0)) { return (-12); } else { } } else { } ring->status_page.page_addr = (u32 *)(dev_priv->status_page_dmah)->vaddr; memset((void *)ring->status_page.page_addr, 0, 4096UL); return (0); } } void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf ) { { iounmap((void volatile *)ringbuf->virtual_start); ringbuf->virtual_start = (void *)0; i915_gem_object_ggtt_unpin(ringbuf->obj); return; } } int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev , struct intel_ringbuffer *ringbuf ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct drm_i915_gem_object *obj ; int ret ; unsigned long tmp___0 ; { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; obj = ringbuf->obj; ret = i915_gem_obj_ggtt_pin(obj, 4096U, 1U); if (ret != 0) { return (ret); } else { } ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret != 0) { i915_gem_object_ggtt_unpin(obj); return (ret); } else { } tmp___0 = i915_gem_obj_ggtt_offset(obj); ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + (unsigned long long )tmp___0, (unsigned long )ringbuf->size); if ((unsigned long )ringbuf->virtual_start == (unsigned long )((void *)0)) { i915_gem_object_ggtt_unpin(obj); return (-22); } else { } return (0); } } void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf ) { { drm_gem_object_unreference___11(& (ringbuf->obj)->base); ringbuf->obj = (struct drm_i915_gem_object *)0; return; } } int intel_alloc_ringbuffer_obj(struct drm_device *dev , struct intel_ringbuffer *ringbuf ) { struct drm_i915_gem_object *obj ; struct drm_i915_private *__p ; { obj = (struct drm_i915_gem_object *)0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { obj = i915_gem_object_create_stolen(dev, (u32 )ringbuf->size); } else { } if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { obj = i915_gem_alloc_object(dev, (size_t )ringbuf->size); } else { } if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return (-12); } else { } obj->gt_ro = 1U; ringbuf->obj = obj; return (0); } } static int intel_init_ring_buffer(struct drm_device *dev , struct intel_engine_cs *ring ) { struct intel_ringbuffer *ringbuf ; int ret ; int __ret_warn_on ; long tmp ; void *tmp___0 ; struct lock_class_key __key ; long tmp___1 ; struct drm_i915_private *__p ; int __ret_warn_on___0 ; long tmp___2 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { __ret_warn_on = (unsigned long )ring->buffer != (unsigned long )((struct intel_ringbuffer *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 2003, "WARN_ON(ring->buffer)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = kzalloc(48UL, 208U); ringbuf = (struct intel_ringbuffer *)tmp___0; if ((unsigned long )ringbuf == (unsigned long )((struct intel_ringbuffer *)0)) { return (-12); } else { } ring->buffer = ringbuf; ring->dev = dev; INIT_LIST_HEAD(& ring->active_list); INIT_LIST_HEAD(& ring->request_list); INIT_LIST_HEAD(& ring->execlist_queue); i915_gem_batch_pool_init(dev, & ring->batch_pool); ringbuf->size = 131072; ringbuf->ring = ring; memset((void *)(& ring->semaphore.sync_seqno), 0, 16UL); __init_waitqueue_head(& ring->irq_queue, "&ring->irq_queue", & __key); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { ret = init_status_page(ring); if (ret != 0) { goto error; } else { } } else { tmp___1 = ldv__builtin_expect((unsigned int )ring->id != 0U, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c"), "i" (2026), "i" (12UL)); ldv_50258: ; goto ldv_50258; } else { } ret = init_phys_status_page(ring); if (ret != 0) { goto error; } else { } } __ret_warn_on___0 = (unsigned long )ringbuf->obj != (unsigned long )((struct drm_i915_gem_object *)0); tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 2032, "WARN_ON(ringbuf->obj)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); ret = intel_alloc_ringbuffer_obj(dev, ringbuf); if (ret != 0) { drm_err("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); goto error; } else { } ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); if (ret != 0) { drm_err("Failed to pin and map ringbuffer %s: %d\n", ring->name, ret); intel_destroy_ringbuffer_obj(ringbuf); goto error; } else { } ringbuf->effective_size = ringbuf->size; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 13687U) { ringbuf->effective_size = ringbuf->effective_size + -128; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) == 9570U) { ringbuf->effective_size = ringbuf->effective_size + -128; } else { } } ret = i915_cmd_parser_init_ring(ring); if (ret != 0) { goto error; } else { } return (0); error: kfree((void const *)ringbuf); ring->buffer = (struct intel_ringbuffer *)0; return (ret); } } void intel_cleanup_ring_buffer(struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; struct intel_ringbuffer *ringbuf ; bool tmp ; int tmp___0 ; int __ret_warn_on ; struct drm_i915_private *__p ; uint32_t tmp___1 ; int tmp___2 ; long tmp___3 ; { tmp = intel_ring_initialized(ring); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } dev_priv = to_i915((struct drm_device const *)ring->dev); ringbuf = ring->buffer; intel_stop_ring_buffer(ring); __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 156U), 1); if ((tmp___1 & 512U) == 0U) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } __ret_warn_on = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 2081, "WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); intel_unpin_ringbuffer_obj(ringbuf); intel_destroy_ringbuffer_obj(ringbuf); i915_gem_request_assign___2(& ring->outstanding_lazy_request, (struct drm_i915_gem_request *)0); if ((unsigned long )ring->cleanup != (unsigned long )((void (*)(struct intel_engine_cs * ))0)) { (*(ring->cleanup))(ring); } else { } cleanup_status_page(ring); i915_cmd_parser_fini_ring(ring); i915_gem_batch_pool_fini(& ring->batch_pool); kfree((void const *)ringbuf); ring->buffer = (struct intel_ringbuffer *)0; return; } } static int ring_wait_for_space(struct intel_engine_cs *ring , int n ) { struct intel_ringbuffer *ringbuf ; struct drm_i915_gem_request *request ; unsigned int space ; int ret ; int tmp ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; { ringbuf = ring->buffer; tmp = intel_ring_space(ringbuf); if (tmp >= n) { return (0); } else { } __mptr = (struct list_head const *)ring->request_list.next; request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL; goto ldv_50300; ldv_50299: tmp___0 = __intel_ring_space((int )request->postfix, (int )ringbuf->tail, ringbuf->size); space = (unsigned int )tmp___0; if ((unsigned int )n <= space) { goto ldv_50298; } else { } __mptr___0 = (struct list_head const *)request->list.next; request = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffffb8UL; ldv_50300: ; if ((unsigned long )(& request->list) != (unsigned long )(& ring->request_list)) { goto ldv_50299; } else { } ldv_50298: __ret_warn_on = (unsigned long )(& request->list) == (unsigned long )(& ring->request_list); tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 2116, "WARN_ON(&request->list == &ring->request_list)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (-28); } else { } ret = i915_wait_request(request); if (ret != 0) { return (ret); } else { } ringbuf->space = (int )space; return (0); } } static int intel_wrap_ring_buffer(struct intel_engine_cs *ring ) { uint32_t *virt ; struct intel_ringbuffer *ringbuf ; int rem ; int ret ; int tmp ; uint32_t *tmp___0 ; int tmp___1 ; { ringbuf = ring->buffer; rem = (int )((u32 )ringbuf->size - ringbuf->tail); if (ringbuf->space < rem) { tmp = ring_wait_for_space(ring, rem); ret = tmp; if (ret != 0) { return (ret); } else { } } else { } virt = (uint32_t *)ringbuf->virtual_start + (unsigned long )ringbuf->tail; rem = rem / 4; goto ldv_50311; ldv_50310: tmp___0 = virt; virt = virt + 1; iowrite32(0U, (void *)tmp___0); ldv_50311: tmp___1 = rem; rem = rem - 1; if (tmp___1 != 0) { goto ldv_50310; } else { } ringbuf->tail = 0U; intel_ring_update_space(ringbuf); return (0); } } int intel_ring_idle(struct intel_engine_cs *ring ) { struct drm_i915_gem_request *req ; int ret ; int tmp ; struct list_head const *__mptr ; struct drm_i915_private *tmp___0 ; struct drm_i915_private *tmp___1 ; int tmp___2 ; int tmp___3 ; { if ((unsigned long )ring->outstanding_lazy_request != (unsigned long )((struct drm_i915_gem_request *)0)) { ret = __i915_add_request(ring, (struct drm_file *)0, (struct drm_i915_gem_object *)0); if (ret != 0) { return (ret); } else { } } else { } tmp = list_empty((struct list_head const *)(& ring->request_list)); if (tmp != 0) { return (0); } else { } __mptr = (struct list_head const *)ring->request_list.prev; req = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffb8UL; tmp___0 = to_i915((struct drm_device const *)ring->dev); tmp___1 = to_i915((struct drm_device const *)ring->dev); tmp___2 = atomic_read((atomic_t const *)(& tmp___1->gpu_error.reset_counter)); tmp___3 = __i915_wait_request(req, (unsigned int )tmp___2, (int )tmp___0->mm.interruptible, (s64 *)0LL, (struct intel_rps_client *)0); return (tmp___3); } } int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request ) { { request->ringbuf = (request->ring)->buffer; return (0); } } static int __intel_ring_prepare(struct intel_engine_cs *ring , int bytes ) { struct intel_ringbuffer *ringbuf ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { ringbuf = ring->buffer; tmp___0 = ldv__builtin_expect(ringbuf->tail + (u32 )bytes > (u32 )ringbuf->effective_size, 0L); if (tmp___0 != 0L) { ret = intel_wrap_ring_buffer(ring); tmp = ldv__builtin_expect(ret != 0, 0L); if (tmp != 0L) { return (ret); } else { } } else { } tmp___2 = ldv__builtin_expect(ringbuf->space < bytes, 0L); if (tmp___2 != 0L) { ret = ring_wait_for_space(ring, bytes); tmp___1 = ldv__builtin_expect(ret != 0, 0L); if (tmp___1 != 0L) { return (ret); } else { } } else { } return (0); } } int intel_ring_begin(struct intel_engine_cs *ring , int num_dwords ) { struct drm_i915_private *dev_priv ; int ret ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; ret = i915_gem_check_wedge(& dev_priv->gpu_error, (int )dev_priv->mm.interruptible); if (ret != 0) { return (ret); } else { } ret = __intel_ring_prepare(ring, (int )((unsigned int )num_dwords * 4U)); if (ret != 0) { return (ret); } else { } ret = i915_gem_request_alloc(ring, ring->default_context); if (ret != 0) { return (ret); } else { } (ring->buffer)->space = (int )((unsigned int )(ring->buffer)->space - (unsigned int )((unsigned long )num_dwords) * 4U); return (0); } } int intel_ring_cacheline_align(struct intel_engine_cs *ring ) { int num_dwords ; int ret ; int tmp ; { num_dwords = (int )(((unsigned long )(ring->buffer)->tail & 63UL) / 4UL); if (num_dwords == 0) { return (0); } else { } num_dwords = (int )(16U - (unsigned int )num_dwords); ret = intel_ring_begin(ring, num_dwords); if (ret != 0) { return (ret); } else { } goto ldv_50341; ldv_50340: intel_ring_emit(ring, 0U); ldv_50341: tmp = num_dwords; num_dwords = num_dwords - 1; if (tmp != 0) { goto ldv_50340; } else { } intel_ring_advance(ring); return (0); } } void intel_ring_init_seqno(struct intel_engine_cs *ring , u32 seqno ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; long tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = ring->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = ldv__builtin_expect((unsigned long )ring->outstanding_lazy_request != (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c"), "i" (2255), "i" (12UL)); ldv_50349: ; goto ldv_50349; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 7U) { _L: /* CIL Label */ (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 64U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 68U), 0U, 1); __p = to_i915((struct drm_device const *)dev); if (((int )__p->info.ring_mask & 8) != 0) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 72U), 0U, 1); } else { } } else { } } (*(ring->set_seqno))(ring, seqno); ring->hangcheck.seqno = seqno; return; } } extern void __compiletime_assert_2298(void) ; static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring , u32 value ) { struct drm_i915_private *dev_priv ; int _a ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; bool __cond___2 ; bool __cond___3 ; bool __cond___4 ; { dev_priv = (struct drm_i915_private *)(ring->dev)->dev_private; _a = 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 73808L, (uint32_t )((_a << 16) | _a), 1); (*(dev_priv->uncore.funcs.mmio_writeq))(dev_priv, 74136L, 0ULL, 1); tmp = msecs_to_jiffies(50U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50398; ldv_50397: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 73808L, 1); if ((tmp___0 & 8U) != 0U) { ret__ = -110; } else { } goto ldv_50396; } else { } tmp___1 = drm_can_sleep___3(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_50398: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 73808L, 1); if ((tmp___2 & 8U) != 0U) { goto ldv_50397; } else { } ldv_50396: ; if (ret__ != 0) { drm_err("timed out waiting for the BSD ring to wake up\n"); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ring->mmio_base + 48U), value, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ring->mmio_base + 48U), 0); __cond___2 = 0; if ((int )__cond___2) { __compiletime_assert_2298(); } else { } __cond___3 = 0; if ((int )__cond___3) { __compiletime_assert_2298(); } else { } __cond___4 = 0; if ((int )__cond___4) { __compiletime_assert_2298(); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 73808L, (uint32_t )65536, 1); return; } } static int gen6_bsd_ring_flush(struct intel_engine_cs *ring , u32 invalidate , u32 flush ) { uint32_t cmd ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } cmd = 318767105U; __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { cmd = cmd + 1U; } else { } cmd = cmd | 2113536U; if ((invalidate & 62U) != 0U) { cmd = cmd | 262272U; } else { } intel_ring_emit(ring, cmd); intel_ring_emit(ring, 260U); __p___0 = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); } else { intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); } intel_ring_advance(ring); return (0); } } static int gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring , u64 offset , u32 len , unsigned int dispatch_flags ) { bool ppgtt ; int ret ; { ppgtt = (bool )(i915.enable_ppgtt != 0 && (dispatch_flags & 1U) == 0U); ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, (u32 )(((int )ppgtt << 8) | 411041793)); intel_ring_emit(ring, (unsigned int )offset); intel_ring_emit(ring, (unsigned int )(offset >> 32ULL)); intel_ring_emit(ring, 0U); intel_ring_advance(ring); return (0); } } static int hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring , u64 offset , u32 len , unsigned int dispatch_flags ) { int ret ; { ret = intel_ring_begin(ring, 2); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, (int )dispatch_flags & 1 ? 411041792U : 411050240U); intel_ring_emit(ring, (u32 )offset); intel_ring_advance(ring); return (0); } } static int gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring , u64 offset , u32 len , unsigned int dispatch_flags ) { int ret ; { ret = intel_ring_begin(ring, 2); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, (int )dispatch_flags & 1 ? 411041792U : 411042048U); intel_ring_emit(ring, (u32 )offset); intel_ring_advance(ring); return (0); } } static int gen6_ring_flush(struct intel_engine_cs *ring , u32 invalidate , u32 flush ) { struct drm_device *dev ; uint32_t cmd ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = ring->dev; ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } cmd = 318767105U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { cmd = cmd + 1U; } else { } cmd = cmd | 2113536U; if ((invalidate & 2U) != 0U) { cmd = cmd | 262144U; } else { } intel_ring_emit(ring, cmd); intel_ring_emit(ring, 260U); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); } else { intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); } intel_ring_advance(ring); return (0); } } int intel_init_render_ring_buffer(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; struct drm_i915_gem_object *obj ; int ret ; bool tmp ; int __ret_warn_on ; long tmp___0 ; unsigned long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; unsigned long tmp___4 ; unsigned long tmp___5 ; bool tmp___6 ; struct drm_i915_private *__p ; bool tmp___7 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; int _max1 ; int _max2 ; unsigned long tmp___8 ; struct drm_i915_private *__p___11 ; struct drm_i915_private *__p___12 ; struct drm_i915_private *__p___13 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring); ring->name = "render ring"; ring->id = 0; ring->mmio_base = 8192U; __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 7U) { tmp = i915_semaphore_is_enabled(dev); if ((int )tmp) { obj = i915_gem_alloc_object(dev, 4096UL); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { drm_err("Failed to allocate semaphore bo. Disabling semaphores\n"); i915.semaphores = 0; } else { i915_gem_object_set_cache_level(obj, 1); ret = i915_gem_obj_ggtt_pin(obj, 0U, 2U); if (ret != 0) { drm_gem_object_unreference___11(& obj->base); drm_err("Failed to pin semaphore bo. Disabling semaphores\n"); i915.semaphores = 0; } else { dev_priv->semaphore_obj = obj; } } } else { } ring->init_context = & intel_rcs_ctx_init; ring->add_request = & gen6_add_request; ring->flush = & gen8_render_ring_flush; ring->irq_get = & gen8_ring_get_irq; ring->irq_put = & gen8_ring_put_irq; ring->irq_enable_mask = 1U; ring->get_seqno = & gen6_ring_get_seqno; ring->set_seqno = & ring_set_seqno; tmp___6 = i915_semaphore_is_enabled(dev); if ((int )tmp___6) { __ret_warn_on = (unsigned long )dev_priv->semaphore_obj == (unsigned long )((struct drm_i915_gem_object *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ringbuffer.c", 2495, "WARN_ON(!dev_priv->semaphore_obj)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ring->semaphore.sync_to = & gen8_ring_sync; ring->semaphore.signal = & gen8_rcs_signal; if ((unsigned long )dev_priv->semaphore_obj == (unsigned long )((struct drm_i915_gem_object *)0)) { goto ldv_50489; } else { } tmp___1 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[0] = (u64 )(tmp___1 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL); tmp___2 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[1] = (u64 )((tmp___2 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 8UL); tmp___3 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[2] = (u64 )((tmp___3 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 16UL); tmp___4 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[3] = (u64 )((tmp___4 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 24UL); tmp___5 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[4] = (u64 )((tmp___5 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 32UL); ring->semaphore.__annonCompField77.signal_ggtt[(unsigned int )ring->id] = 196608ULL; ldv_50489: ; } else { } } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 5U) { ring->add_request = & gen6_add_request; ring->flush = & gen7_render_ring_flush; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { ring->flush = & gen6_render_ring_flush; } else { } ring->irq_get = & gen6_ring_get_irq; ring->irq_put = & gen6_ring_put_irq; ring->irq_enable_mask = 1U; ring->get_seqno = & gen6_ring_get_seqno; ring->set_seqno = & ring_set_seqno; tmp___7 = i915_semaphore_is_enabled(dev); if ((int )tmp___7) { ring->semaphore.sync_to = & gen6_ring_sync; ring->semaphore.signal = & gen6_signal; ring->semaphore.__annonCompField77.mbox.wait[0] = 196608U; ring->semaphore.__annonCompField77.mbox.wait[1] = 131072U; ring->semaphore.__annonCompField77.mbox.wait[2] = 0U; ring->semaphore.__annonCompField77.mbox.wait[3] = 131072U; ring->semaphore.__annonCompField77.mbox.wait[4] = 196608U; ring->semaphore.__annonCompField77.mbox.signal[0] = 0U; ring->semaphore.__annonCompField77.mbox.signal[1] = 73796U; ring->semaphore.__annonCompField77.mbox.signal[2] = 139328U; ring->semaphore.__annonCompField77.mbox.signal[3] = 106564U; ring->semaphore.__annonCompField77.mbox.signal[4] = 0U; } else { } } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 5U) { ring->add_request = & pc_render_add_request; ring->flush = & gen4_render_ring_flush; ring->get_seqno = & pc_render_get_seqno; ring->set_seqno = & pc_render_set_seqno; ring->irq_get = & gen5_ring_get_irq; ring->irq_put = & gen5_ring_put_irq; ring->irq_enable_mask = 17U; } else { ring->add_request = & i9xx_add_request; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 3U) { ring->flush = & gen2_render_ring_flush; } else { ring->flush = & gen4_render_ring_flush; } ring->get_seqno = & ring_get_seqno; ring->set_seqno = & ring_set_seqno; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 2U) { ring->irq_get = & i8xx_ring_get_irq; ring->irq_put = & i8xx_ring_put_irq; } else { ring->irq_get = & i9xx_ring_get_irq; ring->irq_put = & i9xx_ring_put_irq; } ring->irq_enable_mask = 2U; } } } ring->write_tail = & ring_write_tail; __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 45UL) != 0U) { ring->dispatch_execbuffer = & hsw_ring_dispatch_execbuffer; } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___9->info.gen) == 8U) { ring->dispatch_execbuffer = & gen8_ring_dispatch_execbuffer; } else { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) > 5U) { ring->dispatch_execbuffer = & gen6_ring_dispatch_execbuffer; } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) > 3U) { ring->dispatch_execbuffer = & i965_dispatch_execbuffer; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___5->info.device_id) == 13687U) { ring->dispatch_execbuffer = & i830_dispatch_execbuffer; } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___6->info.device_id) == 9570U) { ring->dispatch_execbuffer = & i830_dispatch_execbuffer; } else { ring->dispatch_execbuffer = & i915_dispatch_execbuffer; } } } } } } ring->init_hw = & init_render_ring; ring->cleanup = & render_ring_cleanup; __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___11->info.device_id) == 13687U) { goto _L; } else { __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___12->info.device_id) == 9570U) { _L: /* CIL Label */ _max1 = 8192; _max2 = 262144; obj = i915_gem_alloc_object(dev, (size_t )(_max1 > _max2 ? _max1 : _max2)); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { drm_err("Failed to allocate batch bo\n"); return (-12); } else { } ret = i915_gem_obj_ggtt_pin(obj, 0U, 0U); if (ret != 0) { drm_gem_object_unreference___11(& obj->base); drm_err("Failed to ping batch bo\n"); return (ret); } else { } ring->scratch.obj = obj; tmp___8 = i915_gem_obj_ggtt_offset(obj); ring->scratch.gtt_offset = (u32 )tmp___8; } else { } } ret = intel_init_ring_buffer(dev, ring); if (ret != 0) { return (ret); } else { } __p___13 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___13->info.gen) > 4U) { ret = intel_init_pipe_control(ring); if (ret != 0) { return (ret); } else { } } else { } return (0); } } int intel_init_bsd_ring_buffer(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; struct drm_i915_private *__p ; unsigned long tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; bool tmp___4 ; bool tmp___5 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring) + 1UL; ring->name = "bsd ring"; ring->id = 1; ring->write_tail = & ring_write_tail; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 5U) { ring->mmio_base = 73728U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { ring->write_tail = & gen6_bsd_ring_write_tail; } else { } ring->flush = & gen6_bsd_ring_flush; ring->add_request = & gen6_add_request; ring->get_seqno = & gen6_ring_get_seqno; ring->set_seqno = & ring_set_seqno; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { ring->irq_enable_mask = 1U; ring->irq_get = & gen8_ring_get_irq; ring->irq_put = & gen8_ring_put_irq; ring->dispatch_execbuffer = & gen8_ring_dispatch_execbuffer; tmp___4 = i915_semaphore_is_enabled(dev); if ((int )tmp___4) { ring->semaphore.sync_to = & gen8_ring_sync; ring->semaphore.signal = & gen8_xcs_signal; if ((unsigned long )dev_priv->semaphore_obj == (unsigned long )((struct drm_i915_gem_object *)0)) { goto ldv_50600; } else { } tmp = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[0] = (u64 )(tmp + (unsigned long )((unsigned int )ring->id * 5U) * 8UL); tmp___0 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[1] = (u64 )((tmp___0 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 8UL); tmp___1 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[2] = (u64 )((tmp___1 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 16UL); tmp___2 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[3] = (u64 )((tmp___2 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 24UL); tmp___3 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[4] = (u64 )((tmp___3 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 32UL); ring->semaphore.__annonCompField77.signal_ggtt[(unsigned int )ring->id] = 196608ULL; ldv_50600: ; } else { } } else { ring->irq_enable_mask = 4096U; ring->irq_get = & gen6_ring_get_irq; ring->irq_put = & gen6_ring_put_irq; ring->dispatch_execbuffer = & gen6_ring_dispatch_execbuffer; tmp___5 = i915_semaphore_is_enabled(dev); if ((int )tmp___5) { ring->semaphore.sync_to = & gen6_ring_sync; ring->semaphore.signal = & gen6_signal; ring->semaphore.__annonCompField77.mbox.wait[0] = 0U; ring->semaphore.__annonCompField77.mbox.wait[1] = 196608U; ring->semaphore.__annonCompField77.mbox.wait[2] = 131072U; ring->semaphore.__annonCompField77.mbox.wait[3] = 65536U; ring->semaphore.__annonCompField77.mbox.wait[4] = 196608U; ring->semaphore.__annonCompField77.mbox.signal[0] = 8256U; ring->semaphore.__annonCompField77.mbox.signal[1] = 0U; ring->semaphore.__annonCompField77.mbox.signal[2] = 139332U; ring->semaphore.__annonCompField77.mbox.signal[3] = 106568U; ring->semaphore.__annonCompField77.mbox.signal[4] = 0U; } else { } } } else { ring->mmio_base = 16384U; ring->flush = & bsd_ring_flush; ring->add_request = & i9xx_add_request; ring->get_seqno = & ring_get_seqno; ring->set_seqno = & ring_set_seqno; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 5U) { ring->irq_enable_mask = 32U; ring->irq_get = & gen5_ring_get_irq; ring->irq_put = & gen5_ring_put_irq; } else { ring->irq_enable_mask = 33554432U; ring->irq_get = & i9xx_ring_get_irq; ring->irq_put = & i9xx_ring_put_irq; } ring->dispatch_execbuffer = & i965_dispatch_execbuffer; } ring->init_hw = & init_ring_common; tmp___6 = intel_init_ring_buffer(dev, ring); return (tmp___6); } } int intel_init_bsd2_ring_buffer(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; unsigned long tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; bool tmp___4 ; int tmp___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring) + 4UL; ring->name = "bsd2 ring"; ring->id = 4; ring->write_tail = & ring_write_tail; ring->mmio_base = 114688U; ring->flush = & gen6_bsd_ring_flush; ring->add_request = & gen6_add_request; ring->get_seqno = & gen6_ring_get_seqno; ring->set_seqno = & ring_set_seqno; ring->irq_enable_mask = 65536U; ring->irq_get = & gen8_ring_get_irq; ring->irq_put = & gen8_ring_put_irq; ring->dispatch_execbuffer = & gen8_ring_dispatch_execbuffer; tmp___4 = i915_semaphore_is_enabled(dev); if ((int )tmp___4) { ring->semaphore.sync_to = & gen8_ring_sync; ring->semaphore.signal = & gen8_xcs_signal; if ((unsigned long )dev_priv->semaphore_obj == (unsigned long )((struct drm_i915_gem_object *)0)) { goto ldv_50612; } else { } tmp = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[0] = (u64 )(tmp + (unsigned long )((unsigned int )ring->id * 5U) * 8UL); tmp___0 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[1] = (u64 )((tmp___0 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 8UL); tmp___1 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[2] = (u64 )((tmp___1 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 16UL); tmp___2 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[3] = (u64 )((tmp___2 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 24UL); tmp___3 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[4] = (u64 )((tmp___3 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 32UL); ring->semaphore.__annonCompField77.signal_ggtt[(unsigned int )ring->id] = 196608ULL; ldv_50612: ; } else { } ring->init_hw = & init_ring_common; tmp___5 = intel_init_ring_buffer(dev, ring); return (tmp___5); } } int intel_init_blt_ring_buffer(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; unsigned long tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; bool tmp___4 ; bool tmp___5 ; struct drm_i915_private *__p ; int tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring) + 2UL; ring->name = "blitter ring"; ring->id = 2; ring->mmio_base = 139264U; ring->write_tail = & ring_write_tail; ring->flush = & gen6_ring_flush; ring->add_request = & gen6_add_request; ring->get_seqno = & gen6_ring_get_seqno; ring->set_seqno = & ring_set_seqno; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { ring->irq_enable_mask = 65536U; ring->irq_get = & gen8_ring_get_irq; ring->irq_put = & gen8_ring_put_irq; ring->dispatch_execbuffer = & gen8_ring_dispatch_execbuffer; tmp___4 = i915_semaphore_is_enabled(dev); if ((int )tmp___4) { ring->semaphore.sync_to = & gen8_ring_sync; ring->semaphore.signal = & gen8_xcs_signal; if ((unsigned long )dev_priv->semaphore_obj == (unsigned long )((struct drm_i915_gem_object *)0)) { goto ldv_50624; } else { } tmp = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[0] = (u64 )(tmp + (unsigned long )((unsigned int )ring->id * 5U) * 8UL); tmp___0 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[1] = (u64 )((tmp___0 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 8UL); tmp___1 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[2] = (u64 )((tmp___1 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 16UL); tmp___2 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[3] = (u64 )((tmp___2 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 24UL); tmp___3 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[4] = (u64 )((tmp___3 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 32UL); ring->semaphore.__annonCompField77.signal_ggtt[(unsigned int )ring->id] = 196608ULL; ldv_50624: ; } else { } } else { ring->irq_enable_mask = 4194304U; ring->irq_get = & gen6_ring_get_irq; ring->irq_put = & gen6_ring_put_irq; ring->dispatch_execbuffer = & gen6_ring_dispatch_execbuffer; tmp___5 = i915_semaphore_is_enabled(dev); if ((int )tmp___5) { ring->semaphore.signal = & gen6_signal; ring->semaphore.sync_to = & gen6_ring_sync; ring->semaphore.__annonCompField77.mbox.wait[0] = 131072U; ring->semaphore.__annonCompField77.mbox.wait[1] = 0U; ring->semaphore.__annonCompField77.mbox.wait[2] = 196608U; ring->semaphore.__annonCompField77.mbox.wait[3] = 0U; ring->semaphore.__annonCompField77.mbox.wait[4] = 196608U; ring->semaphore.__annonCompField77.mbox.signal[0] = 8260U; ring->semaphore.__annonCompField77.mbox.signal[1] = 73792U; ring->semaphore.__annonCompField77.mbox.signal[2] = 0U; ring->semaphore.__annonCompField77.mbox.signal[3] = 106560U; ring->semaphore.__annonCompField77.mbox.signal[4] = 0U; } else { } } ring->init_hw = & init_ring_common; tmp___6 = intel_init_ring_buffer(dev, ring); return (tmp___6); } } int intel_init_vebox_ring_buffer(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; unsigned long tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; bool tmp___4 ; bool tmp___5 ; struct drm_i915_private *__p ; int tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring) + 3UL; ring->name = "video enhancement ring"; ring->id = 3; ring->mmio_base = 106496U; ring->write_tail = & ring_write_tail; ring->flush = & gen6_ring_flush; ring->add_request = & gen6_add_request; ring->get_seqno = & gen6_ring_get_seqno; ring->set_seqno = & ring_set_seqno; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 7U) { ring->irq_enable_mask = 1U; ring->irq_get = & gen8_ring_get_irq; ring->irq_put = & gen8_ring_put_irq; ring->dispatch_execbuffer = & gen8_ring_dispatch_execbuffer; tmp___4 = i915_semaphore_is_enabled(dev); if ((int )tmp___4) { ring->semaphore.sync_to = & gen8_ring_sync; ring->semaphore.signal = & gen8_xcs_signal; if ((unsigned long )dev_priv->semaphore_obj == (unsigned long )((struct drm_i915_gem_object *)0)) { goto ldv_50636; } else { } tmp = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[0] = (u64 )(tmp + (unsigned long )((unsigned int )ring->id * 5U) * 8UL); tmp___0 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[1] = (u64 )((tmp___0 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 8UL); tmp___1 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[2] = (u64 )((tmp___1 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 16UL); tmp___2 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[3] = (u64 )((tmp___2 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 24UL); tmp___3 = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); ring->semaphore.__annonCompField77.signal_ggtt[4] = (u64 )((tmp___3 + (unsigned long )((unsigned int )ring->id * 5U) * 8UL) + 32UL); ring->semaphore.__annonCompField77.signal_ggtt[(unsigned int )ring->id] = 196608ULL; ldv_50636: ; } else { } } else { ring->irq_enable_mask = 1024U; ring->irq_get = & hsw_vebox_get_irq; ring->irq_put = & hsw_vebox_put_irq; ring->dispatch_execbuffer = & gen6_ring_dispatch_execbuffer; tmp___5 = i915_semaphore_is_enabled(dev); if ((int )tmp___5) { ring->semaphore.sync_to = & gen6_ring_sync; ring->semaphore.signal = & gen6_signal; ring->semaphore.__annonCompField77.mbox.wait[0] = 65536U; ring->semaphore.__annonCompField77.mbox.wait[1] = 65536U; ring->semaphore.__annonCompField77.mbox.wait[2] = 65536U; ring->semaphore.__annonCompField77.mbox.wait[3] = 196608U; ring->semaphore.__annonCompField77.mbox.wait[4] = 196608U; ring->semaphore.__annonCompField77.mbox.signal[0] = 8264U; ring->semaphore.__annonCompField77.mbox.signal[1] = 73800U; ring->semaphore.__annonCompField77.mbox.signal[2] = 139336U; ring->semaphore.__annonCompField77.mbox.signal[3] = 0U; ring->semaphore.__annonCompField77.mbox.signal[4] = 0U; } else { } } ring->init_hw = & init_ring_common; tmp___6 = intel_init_ring_buffer(dev, ring); return (tmp___6); } } int intel_ring_flush_all_caches(struct intel_engine_cs *ring ) { int ret ; { if (! ring->gpu_caches_dirty) { return (0); } else { } ret = (*(ring->flush))(ring, 0U, 62U); if (ret != 0) { return (ret); } else { } trace_i915_gem_ring_flush(ring, 0U, 62U); ring->gpu_caches_dirty = 0; return (0); } } int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring ) { uint32_t flush_domains ; int ret ; { flush_domains = 0U; if ((int )ring->gpu_caches_dirty) { flush_domains = 62U; } else { } ret = (*(ring->flush))(ring, 62U, flush_domains); if (ret != 0) { return (ret); } else { } trace_i915_gem_ring_flush(ring, 62U, flush_domains); ring->gpu_caches_dirty = 0; return (0); } } void intel_stop_ring_buffer(struct intel_engine_cs *ring ) { int ret ; bool tmp ; int tmp___0 ; struct drm_i915_private *tmp___1 ; bool tmp___2 ; int tmp___3 ; { tmp = intel_ring_initialized(ring); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } ret = intel_ring_idle(ring); if (ret != 0) { tmp___1 = to_i915((struct drm_device const *)ring->dev); tmp___2 = i915_reset_in_progress(& tmp___1->gpu_error); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { drm_err("failed to quiesce %s whilst cleaning up: %d\n", ring->name, ret); } else { } } else { } stop_ring(ring); return; } } bool ldv_queue_work_on_429(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_430(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_431(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_432(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_433(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern void ___might_sleep(char const * , int , int ) ; __inline static unsigned long arch_local_save_flags___12(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static bool static_key_false___9(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int rcu_read_lock_sched_held___9(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___12(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } __inline static int timer_pending(struct timer_list const *timer ) { { return ((unsigned long )timer->entry.pprev != (unsigned long )((struct hlist_node **/* const */)0)); } } extern int mod_timer_pinned(struct timer_list * , unsigned long ) ; int ldv_mod_timer_pinned_448(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; int ldv_del_timer_sync_449(struct timer_list *ldv_func_arg1 ) ; bool ldv_queue_work_on_443(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_445(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_444(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_447(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_446(struct workqueue_struct *ldv_func_arg1 ) ; __inline static unsigned char readb(void const volatile *addr ) { unsigned char ret ; { __asm__ volatile ("movb %1,%0": "=q" (ret): "m" (*((unsigned char volatile *)addr)): "memory"); return (ret); } } __inline static unsigned short readw(void const volatile *addr ) { unsigned short ret ; { __asm__ volatile ("movw %1,%0": "=r" (ret): "m" (*((unsigned short volatile *)addr)): "memory"); return (ret); } } __inline static void writeb(unsigned char val , void volatile *addr ) { { __asm__ volatile ("movb %0,%1": : "q" (val), "m" (*((unsigned char volatile *)addr)): "memory"); return; } } __inline static void writew(unsigned short val , void volatile *addr ) { { __asm__ volatile ("movw %0,%1": : "r" (val), "m" (*((unsigned short volatile *)addr)): "memory"); return; } } extern int _cond_resched(void) ; void activate_suitable_timer_21(struct timer_list *timer , unsigned long data ) ; void choose_timer_21(void) ; void ldv_timer_21(int state , struct timer_list *timer ) ; extern int pci_bus_read_config_byte(struct pci_bus * , unsigned int , int , u8 * ) ; extern int pci_bus_write_config_byte(struct pci_bus * , unsigned int , int , u8 ) ; __inline static int pci_read_config_byte(struct pci_dev const *dev , int where , u8 *val ) { int tmp ; { tmp = pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); return (tmp); } } __inline static int pci_write_config_byte(struct pci_dev const *dev , int where , u8 val ) { int tmp ; { tmp = pci_bus_write_config_byte(dev->bus, dev->devfn, where, (int )val); return (tmp); } } __inline static bool drm_can_sleep___4(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39975; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39975; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39975; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39975; default: __bad_percpu_size(); } ldv_39975: pscr_ret__ = pfo_ret__; goto ldv_39981; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39985; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39985; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39985; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39985; default: __bad_percpu_size(); } ldv_39985: pscr_ret__ = pfo_ret_____0; goto ldv_39981; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39994; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39994; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39994; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39994; default: __bad_percpu_size(); } ldv_39994: pscr_ret__ = pfo_ret_____1; goto ldv_39981; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40003; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40003; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40003; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40003; default: __bad_percpu_size(); } ldv_40003: pscr_ret__ = pfo_ret_____2; goto ldv_39981; default: __bad_size_call_parameter(); goto ldv_39981; } ldv_39981: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___12(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } __inline static void trace_i915_reg_rw___0(bool write , u32 reg , u64 val , int len , bool trace ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_412 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_414 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___9(& __tracepoint_i915_reg_rw.key); if ((int )tmp___1) { if (! trace) { return; } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_reg_rw.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___9(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 656, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_46690: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , bool , u32 , u64 , int , bool ))it_func))(__data, (int )write, reg, val, len, (int )trace); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_46690; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } if ((int )trace) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_reg_rw.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___9(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 656, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } return; } } void intel_uncore_init(struct drm_device *dev ) ; void intel_uncore_fini(struct drm_device *dev ) ; int i915_reg_read_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; int i915_get_reset_stats_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) ; void i915_check_vgpu(struct drm_device *dev ) ; static char const * const forcewake_domain_names[3U] = { "render", "blitter", "media"}; char const *intel_uncore_forcewake_domain_to_str(enum forcewake_domain_id const id ) { int __ret_warn_on ; long tmp ; { if ((unsigned int )id <= 2U) { return ((char const *)forcewake_domain_names[(unsigned int )id]); } else { } __ret_warn_on = (unsigned int )id != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 61, "WARN_ON(id)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return ("unknown"); } } static void assert_device_not_suspended(struct drm_i915_private *dev_priv ) { bool __warned ; int __ret_warn_once ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { goto _L; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { goto _L; } else { __p___4 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { _L: /* CIL Label */ if ((int )dev_priv->pm.suspended) { tmp = 1; } else { tmp = 0; } } else { tmp = 0; } } } } } __ret_warn_once = tmp; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 70, "Device suspended\n"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static void fw_domain_reset(struct intel_uncore_forcewake_domain const *d ) { int __ret_warn_on ; long tmp ; { __ret_warn_on = (unsigned int )d->reg_set == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 76, "WARN_ON(d->reg_set == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); writel(d->val_reset, (void volatile *)(d->i915)->regs + (unsigned long )d->reg_set); return; } } __inline static void fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d ) { { ldv_mod_timer_pinned_448(& d->timer, (unsigned long )jiffies + 1UL); return; } } __inline static void fw_domain_wait_ack_clear(struct intel_uncore_forcewake_domain const *d ) { char const *tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; unsigned int tmp___1 ; unsigned int tmp___2 ; { tmp___0 = msecs_to_jiffies(2U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48241; ldv_48240: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = readl((void const volatile *)(d->i915)->regs + (unsigned long )d->reg_ack); if ((int )tmp___1 & 1) { ret__ = -110; } else { } goto ldv_48239; } else { } cpu_relax(); ldv_48241: tmp___2 = readl((void const volatile *)(d->i915)->regs + (unsigned long )d->reg_ack); if ((int )tmp___2 & 1) { goto ldv_48240; } else { } ldv_48239: ; if (ret__ != 0) { tmp = intel_uncore_forcewake_domain_to_str(d->id); drm_err("%s: timed out waiting for forcewake ack to clear.\n", tmp); } else { } return; } } __inline static void fw_domain_get(struct intel_uncore_forcewake_domain const *d ) { { writel(d->val_set, (void volatile *)(d->i915)->regs + (unsigned long )d->reg_set); return; } } __inline static void fw_domain_wait_ack(struct intel_uncore_forcewake_domain const *d ) { char const *tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; unsigned int tmp___1 ; unsigned int tmp___2 ; { tmp___0 = msecs_to_jiffies(2U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48259; ldv_48258: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = readl((void const volatile *)(d->i915)->regs + (unsigned long )d->reg_ack); if ((tmp___1 & 1U) == 0U) { ret__ = -110; } else { } goto ldv_48257; } else { } cpu_relax(); ldv_48259: tmp___2 = readl((void const volatile *)(d->i915)->regs + (unsigned long )d->reg_ack); if ((tmp___2 & 1U) == 0U) { goto ldv_48258; } else { } ldv_48257: ; if (ret__ != 0) { tmp = intel_uncore_forcewake_domain_to_str(d->id); drm_err("%s: timed out waiting for forcewake ack request.\n", tmp); } else { } return; } } __inline static void fw_domain_put(struct intel_uncore_forcewake_domain const *d ) { { writel(d->val_clear, (void volatile *)(d->i915)->regs + (unsigned long )d->reg_set); return; } } __inline static void fw_domain_posting_read(struct intel_uncore_forcewake_domain const *d ) { { if ((unsigned int )d->reg_post != 0U) { readl((void const volatile *)(d->i915)->regs + (unsigned long )d->reg_post); } else { } return; } } static void fw_domains_get(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { struct intel_uncore_forcewake_domain *d ; enum forcewake_domain_id id ; { id = 0; d = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48274; ldv_48273: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )fw_domains) & (unsigned int )(1 << (int )id)) != 0U) { fw_domain_wait_ack_clear((struct intel_uncore_forcewake_domain const *)d); fw_domain_get((struct intel_uncore_forcewake_domain const *)d); fw_domain_wait_ack((struct intel_uncore_forcewake_domain const *)d); } else { } id = (enum forcewake_domain_id )((unsigned int )id + 1U); d = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48274: ; if ((unsigned int )id <= 2U) { goto ldv_48273; } else { } return; } } static void fw_domains_put(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { struct intel_uncore_forcewake_domain *d ; enum forcewake_domain_id id ; { id = 0; d = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48283; ldv_48282: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )fw_domains) & (unsigned int )(1 << (int )id)) != 0U) { fw_domain_put((struct intel_uncore_forcewake_domain const *)d); fw_domain_posting_read((struct intel_uncore_forcewake_domain const *)d); } else { } id = (enum forcewake_domain_id )((unsigned int )id + 1U); d = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48283: ; if ((unsigned int )id <= 2U) { goto ldv_48282; } else { } return; } } static void fw_domains_posting_read(struct drm_i915_private *dev_priv ) { struct intel_uncore_forcewake_domain *d ; enum forcewake_domain_id id ; { id = 0; d = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48292; ldv_48291: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )(1 << (int )id)) & 7U) != 0U) { fw_domain_posting_read((struct intel_uncore_forcewake_domain const *)d); goto ldv_48290; } else { } id = (enum forcewake_domain_id )((unsigned int )id + 1U); d = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48292: ; if ((unsigned int )id <= 2U) { goto ldv_48291; } else { } ldv_48290: ; return; } } static void fw_domains_reset(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { struct intel_uncore_forcewake_domain *d ; enum forcewake_domain_id id ; { if ((unsigned int )dev_priv->uncore.fw_domains == 0U) { return; } else { } id = 0; d = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48300; ldv_48299: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )fw_domains) & (unsigned int )(1 << (int )id)) != 0U) { fw_domain_reset((struct intel_uncore_forcewake_domain const *)d); } else { } id = (enum forcewake_domain_id )((unsigned int )id + 1U); d = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48300: ; if ((unsigned int )id <= 2U) { goto ldv_48299; } else { } fw_domains_posting_read(dev_priv); return; } } static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv ) { unsigned long timeout__ ; unsigned long tmp ; int ret__ ; unsigned int tmp___0 ; unsigned int tmp___1 ; { tmp = msecs_to_jiffies(1U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48315; ldv_48314: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = readl((void const volatile *)dev_priv->regs + 1278044U); if ((tmp___0 & 7U) != 0U) { ret__ = -110; } else { } goto ldv_48313; } else { } cpu_relax(); ldv_48315: tmp___1 = readl((void const volatile *)dev_priv->regs + 1278044U); if ((tmp___1 & 7U) != 0U) { goto ldv_48314; } else { } ldv_48313: ; if (ret__ != 0) { drm_err("GT thread status wait timed out\n"); } else { } return; } } static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { { fw_domains_get(dev_priv, fw_domains); __gen6_gt_wait_for_thread_c0(dev_priv); return; } } static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv ) { u32 gtfifodbg ; int __ret_warn_on ; long tmp ; long tmp___0 ; { gtfifodbg = readl((void const volatile *)dev_priv->regs + 1179648U); __ret_warn_on = gtfifodbg != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 203, "GT wake FIFO error 0x%x\n", gtfifodbg); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { writel(gtfifodbg, (void volatile *)dev_priv->regs + 1179648U); } else { } return; } } static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { { fw_domains_put(dev_priv, fw_domains); gen6_gt_check_fifodbg(dev_priv); return; } } __inline static u32 fifo_free_entries(struct drm_i915_private *dev_priv ) { u32 count ; unsigned int tmp ; { tmp = readl((void const volatile *)dev_priv->regs + 1179656U); count = tmp; return (count & 127U); } } static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv ) { int ret ; struct drm_i915_private *__p ; int loop ; u32 fifo ; u32 tmp ; int tmp___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; { ret = 0; __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); } else { } if (dev_priv->uncore.fifo_count <= 19U) { loop = 500; tmp = fifo_free_entries(dev_priv); fifo = tmp; goto ldv_48348; ldv_48347: __const_udelay(42950UL); fifo = fifo_free_entries(dev_priv); ldv_48348: ; if (fifo <= 20U) { tmp___0 = loop; loop = loop - 1; if (tmp___0 != 0) { goto ldv_48347; } else { goto ldv_48349; } } else { } ldv_48349: __ret_warn_on = loop < 0 && fifo <= 20U; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 238, "WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { ret = ret + 1; } else { } dev_priv->uncore.fifo_count = fifo; } else { } dev_priv->uncore.fifo_count = dev_priv->uncore.fifo_count - 1U; return (ret); } } static void intel_uncore_fw_release_timer(unsigned long arg ) { struct intel_uncore_forcewake_domain *domain ; unsigned long irqflags ; raw_spinlock_t *tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; { domain = (struct intel_uncore_forcewake_domain *)arg; assert_device_not_suspended(domain->i915); tmp = spinlock_check(& (domain->i915)->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); __ret_warn_on = domain->wake_count == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 255, "WARN_ON(domain->wake_count == 0)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { domain->wake_count = domain->wake_count + 1U; } else { } domain->wake_count = domain->wake_count - 1U; if (domain->wake_count == 0U) { (*((domain->i915)->uncore.funcs.force_wake_put))(domain->i915, (enum forcewake_domains )(1 << (int )domain->id)); } else { } spin_unlock_irqrestore(& (domain->i915)->uncore.lock, irqflags); return; } } void intel_uncore_forcewake_reset(struct drm_device *dev , bool restore ) { struct drm_i915_private *dev_priv ; unsigned long irqflags ; struct intel_uncore_forcewake_domain *domain ; int retry_count ; enum forcewake_domain_id id ; enum forcewake_domains fw ; enum forcewake_domains active_domains ; int tmp ; raw_spinlock_t *tmp___0 ; int tmp___1 ; int __ret_warn_on ; long tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; retry_count = 100; fw = 0; ldv_48385: active_domains = 0; id = 0; domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48375; ldv_48374: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )(1 << (int )id)) & 7U) != 0U) { tmp = ldv_del_timer_sync_449(& domain->timer); if (tmp == 0) { goto ldv_48373; } else { } intel_uncore_fw_release_timer((unsigned long )domain); } else { } ldv_48373: id = (enum forcewake_domain_id )((unsigned int )id + 1U); domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48375: ; if ((unsigned int )id <= 2U) { goto ldv_48374; } else { } tmp___0 = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp___0); id = 0; domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48381; ldv_48380: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )(1 << (int )id)) & 7U) != 0U) { tmp___1 = timer_pending((struct timer_list const *)(& domain->timer)); if (tmp___1 != 0) { active_domains = (enum forcewake_domains )((unsigned int )(1 << (int )id) | (unsigned int )active_domains); } else { } } else { } id = (enum forcewake_domain_id )((unsigned int )id + 1U); domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48381: ; if ((unsigned int )id <= 2U) { goto ldv_48380; } else { } if ((unsigned int )active_domains == 0U) { goto ldv_48383; } else { } retry_count = retry_count - 1; if (retry_count == 0) { drm_err("Timed out waiting for forcewake timers to finish\n"); goto ldv_48383; } else { } spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); ___might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 304, 0); _cond_resched(); goto ldv_48385; ldv_48383: __ret_warn_on = (unsigned int )active_domains != 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 307, "WARN_ON(active_domains)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); id = 0; domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48389; ldv_48388: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )(1 << (int )id)) & 7U) != 0U) { if (domain->wake_count != 0U) { fw = (enum forcewake_domains )((unsigned int )(1 << (int )id) | (unsigned int )fw); } else { } } else { } id = (enum forcewake_domain_id )((unsigned int )id + 1U); domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48389: ; if ((unsigned int )id <= 2U) { goto ldv_48388; } else { } if ((unsigned int )fw != 0U) { (*(dev_priv->uncore.funcs.force_wake_put))(dev_priv, fw); } else { } fw_domains_reset(dev_priv, 7); if ((int )restore) { if ((unsigned int )fw != 0U) { (*(dev_priv->uncore.funcs.force_wake_get))(dev_priv, fw); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 7U) { dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); } else { } } } else { } if (! restore) { assert_forcewakes_inactive(dev_priv); } else { } spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void intel_uncore_ellc_detect(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; unsigned int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { goto _L; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 8U) { _L: /* CIL Label */ tmp = readl((void const volatile *)dev_priv->regs + 1179664U); if ((int )tmp & 1) { dev_priv->ellc_size = 128UL; printk("\016[drm] Found %zuMB of eLLC\n", dev_priv->ellc_size); } else { } } else { } } } return; } } static void __intel_uncore_early_sanitize(struct drm_device *dev , bool restore_forcewake ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; unsigned int tmp ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; unsigned int tmp___0 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 47UL) != 0U) { writel(2147483648U, (void volatile *)dev_priv->regs + 271104U); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U) { tmp = readl((void const volatile *)dev_priv->regs + 1179648U); writel(tmp, (void volatile *)dev_priv->regs + 1179648U); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 7U) { tmp = readl((void const volatile *)dev_priv->regs + 1179648U); writel(tmp, (void volatile *)dev_priv->regs + 1179648U); } else { } } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { tmp___0 = readl((void const volatile *)dev_priv->regs + 1179656U); writel(tmp___0 | 6144U, (void volatile *)dev_priv->regs + 1179656U); } else { } } else { } intel_uncore_forcewake_reset(dev, (int )restore_forcewake); return; } } void intel_uncore_early_sanitize(struct drm_device *dev , bool restore_forcewake ) { { __intel_uncore_early_sanitize(dev, (int )restore_forcewake); i915_check_and_clear_faults(dev); return; } } void intel_uncore_sanitize(struct drm_device *dev ) { { intel_disable_gt_powersave(dev); return; } } static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { struct intel_uncore_forcewake_domain *domain ; enum forcewake_domain_id id ; unsigned int tmp ; { if ((unsigned long )dev_priv->uncore.funcs.force_wake_get == (unsigned long )((void (*)(struct drm_i915_private * , enum forcewake_domains ))0)) { return; } else { } fw_domains = (enum forcewake_domains )((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )fw_domains); id = 0; domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48480; ldv_48479: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )fw_domains) & (unsigned int )(1 << (int )id)) != 0U) { tmp = domain->wake_count; domain->wake_count = domain->wake_count + 1U; if (tmp != 0U) { fw_domains = (enum forcewake_domains )((unsigned int )(~ (1 << (int )id)) & (unsigned int )fw_domains); } else { } } else { } id = (enum forcewake_domain_id )((unsigned int )id + 1U); domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48480: ; if ((unsigned int )id <= 2U) { goto ldv_48479; } else { } if ((unsigned int )fw_domains != 0U) { (*(dev_priv->uncore.funcs.force_wake_get))(dev_priv, fw_domains); } else { } return; } } void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { unsigned long irqflags ; int __ret_warn_on ; long tmp ; raw_spinlock_t *tmp___0 ; { if ((unsigned long )dev_priv->uncore.funcs.force_wake_get == (unsigned long )((void (*)(struct drm_i915_private * , enum forcewake_domains ))0)) { return; } else { } __ret_warn_on = (int )dev_priv->pm.suspended; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 427, "WARN_ON(dev_priv->pm.suspended)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp___0); __intel_uncore_forcewake_get(dev_priv, fw_domains); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { int tmp ; long tmp___0 ; { tmp = queued_spin_is_locked(& dev_priv->uncore.lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c"), "i" (445), "i" (12UL)); ldv_48496: ; goto ldv_48496; } else { } if ((unsigned long )dev_priv->uncore.funcs.force_wake_get == (unsigned long )((void (*)(struct drm_i915_private * , enum forcewake_domains ))0)) { return; } else { } __intel_uncore_forcewake_get(dev_priv, fw_domains); return; } } static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { struct intel_uncore_forcewake_domain *domain ; enum forcewake_domain_id id ; int __ret_warn_on ; long tmp ; long tmp___0 ; { if ((unsigned long )dev_priv->uncore.funcs.force_wake_put == (unsigned long )((void (*)(struct drm_i915_private * , enum forcewake_domains ))0)) { return; } else { } fw_domains = (enum forcewake_domains )((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )fw_domains); id = 0; domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48507; ldv_48506: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )fw_domains) & (unsigned int )(1 << (int )id)) != 0U) { __ret_warn_on = domain->wake_count == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 465, "WARN_ON(domain->wake_count == 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { goto ldv_48505; } else { } domain->wake_count = domain->wake_count - 1U; if (domain->wake_count != 0U) { goto ldv_48505; } else { } domain->wake_count = domain->wake_count + 1U; fw_domain_arm_timer(domain); } else { } ldv_48505: id = (enum forcewake_domain_id )((unsigned int )id + 1U); domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48507: ; if ((unsigned int )id <= 2U) { goto ldv_48506; } else { } return; } } void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { unsigned long irqflags ; raw_spinlock_t *tmp ; { if ((unsigned long )dev_priv->uncore.funcs.force_wake_put == (unsigned long )((void (*)(struct drm_i915_private * , enum forcewake_domains ))0)) { return; } else { } tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); __intel_uncore_forcewake_put(dev_priv, fw_domains); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { int tmp ; long tmp___0 ; { tmp = queued_spin_is_locked(& dev_priv->uncore.lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c"), "i" (508), "i" (12UL)); ldv_48521: ; goto ldv_48521; } else { } if ((unsigned long )dev_priv->uncore.funcs.force_wake_put == (unsigned long )((void (*)(struct drm_i915_private * , enum forcewake_domains ))0)) { return; } else { } __intel_uncore_forcewake_put(dev_priv, fw_domains); return; } } void assert_forcewakes_inactive(struct drm_i915_private *dev_priv ) { struct intel_uncore_forcewake_domain *domain ; enum forcewake_domain_id id ; int __ret_warn_on ; long tmp ; { if ((unsigned long )dev_priv->uncore.funcs.force_wake_get == (unsigned long )((void (*)(struct drm_i915_private * , enum forcewake_domains ))0)) { return; } else { } id = 0; domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48530; ldv_48529: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )(1 << (int )id)) & 7U) != 0U) { __ret_warn_on = domain->wake_count != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 525, "WARN_ON(domain->wake_count)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } id = (enum forcewake_domain_id )((unsigned int )id + 1U); domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48530: ; if ((unsigned int )id <= 2U) { goto ldv_48529; } else { } return; } } static void ilk_dummy_write(struct drm_i915_private *dev_priv ) { { writel(0U, (void volatile *)dev_priv->regs + 8348U); return; } } static void hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv , u32 reg , bool read , bool before ) { char const *op ; char const *when ; int __ret_warn_on ; long tmp ; unsigned int tmp___0 ; { op = (int )read ? "reading" : "writing to"; when = (int )before ? "before" : "after"; if (i915.mmio_debug == 0) { return; } else { } tmp___0 = readl((void const volatile *)dev_priv->regs + 271104U); if ((int )tmp___0 < 0) { __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 620, "Unclaimed register detected %s %s register 0x%x\n", when, op, reg); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); writel(2147483648U, (void volatile *)dev_priv->regs + 271104U); i915.mmio_debug = i915.mmio_debug - 1; } else { } return; } } static void hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv ) { bool mmio_debug_once ; long tmp ; unsigned int tmp___0 ; { mmio_debug_once = 1; if (i915.mmio_debug != 0 || ! mmio_debug_once) { return; } else { } tmp___0 = readl((void const volatile *)dev_priv->regs + 271104U); if ((int )tmp___0 < 0) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("hsw_unclaimed_reg_detect", "Unclaimed register detected, enabling oneshot unclaimed register reporting. Please use i915.mmio_debug=N for more information.\n"); } else { } writel(2147483648U, (void volatile *)dev_priv->regs + 271104U); mmio_debug_once = ! mmio_debug_once; i915.mmio_debug = (int )mmio_debug_once; } else { } return; } } static u8 gen5_read8(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { u8 val ; { val = 0U; assert_device_not_suspended(dev_priv); ilk_dummy_write(dev_priv); val = readb((void const volatile *)dev_priv->regs + (unsigned long )reg); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 1, (int )trace); return (val); } } static u16 gen5_read16(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { u16 val ; { val = 0U; assert_device_not_suspended(dev_priv); ilk_dummy_write(dev_priv); val = readw((void const volatile *)dev_priv->regs + (unsigned long )reg); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 2, (int )trace); return (val); } } static u32 gen5_read32(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { u32 val ; { val = 0U; assert_device_not_suspended(dev_priv); ilk_dummy_write(dev_priv); val = readl((void const volatile *)dev_priv->regs + (unsigned long )reg); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 4, (int )trace); return (val); } } static u64 gen5_read64(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { u64 val ; unsigned long tmp ; { val = 0ULL; assert_device_not_suspended(dev_priv); ilk_dummy_write(dev_priv); tmp = readq((void const volatile *)dev_priv->regs + (unsigned long )reg); val = (u64 )tmp; trace_i915_reg_rw___0(0, (u32 )reg, val, 8, (int )trace); return (val); } } static u8 gen2_read8(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { u8 val ; { val = 0U; assert_device_not_suspended(dev_priv); val = readb((void const volatile *)dev_priv->regs + (unsigned long )reg); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 1, (int )trace); return (val); } } static u16 gen2_read16(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { u16 val ; { val = 0U; assert_device_not_suspended(dev_priv); val = readw((void const volatile *)dev_priv->regs + (unsigned long )reg); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 2, (int )trace); return (val); } } static u32 gen2_read32(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { u32 val ; { val = 0U; assert_device_not_suspended(dev_priv); val = readl((void const volatile *)dev_priv->regs + (unsigned long )reg); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 4, (int )trace); return (val); } } static u64 gen2_read64(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { u64 val ; unsigned long tmp ; { val = 0ULL; assert_device_not_suspended(dev_priv); tmp = readq((void const volatile *)dev_priv->regs + (unsigned long )reg); val = (u64 )tmp; trace_i915_reg_rw___0(0, (u32 )reg, val, 8, (int )trace); return (val); } } __inline static void __force_wake_get(struct drm_i915_private *dev_priv , enum forcewake_domains fw_domains ) { struct intel_uncore_forcewake_domain *domain ; enum forcewake_domain_id id ; int __ret_warn_on ; long tmp ; long tmp___0 ; { __ret_warn_on = (unsigned int )fw_domains == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 700, "WARN_ON(!fw_domains)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } id = 0; domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain); goto ldv_48608; ldv_48607: ; if ((((unsigned int )dev_priv->uncore.fw_domains & (unsigned int )fw_domains) & (unsigned int )(1 << (int )id)) != 0U) { if (domain->wake_count != 0U) { fw_domains = (enum forcewake_domains )((unsigned int )(~ (1 << (int )id)) & (unsigned int )fw_domains); goto ldv_48606; } else { } domain->wake_count = domain->wake_count + 1U; fw_domain_arm_timer(domain); } else { } ldv_48606: id = (enum forcewake_domain_id )((unsigned int )id + 1U); domain = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )id; ldv_48608: ; if ((unsigned int )id <= 2U) { goto ldv_48607; } else { } if ((unsigned int )fw_domains != 0U) { (*(dev_priv->uncore.funcs.force_wake_get))(dev_priv, fw_domains); } else { } return; } } static u8 vgpu_read8(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u8 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); val = readb((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 1, (int )trace); return (val); } } static u16 vgpu_read16(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u16 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); val = readw((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 2, (int )trace); return (val); } } static u32 vgpu_read32(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u32 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); val = readl((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 4, (int )trace); return (val); } } static u64 vgpu_read64(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u64 val ; raw_spinlock_t *tmp ; unsigned long tmp___0 ; { val = 0ULL; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); tmp___0 = readq((void const volatile *)dev_priv->regs + (unsigned long )reg); val = (u64 )tmp___0; spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, val, 8, (int )trace); return (val); } } static u8 gen9_read8(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { enum forcewake_domains fw_engine ; unsigned long irqflags ; u8 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg > 262143L || (reg > 2815L && reg <= 8191L)) { fw_engine = 0; } else if (((((((((reg > 8191L && reg <= 9983L) || (reg > 12287L && reg <= 16383L)) || (reg > 20991L && reg <= 32767L)) || (reg > 33087L && reg <= 33119L)) || (reg > 33535L && reg <= 34047L)) || (reg > 35839L && reg <= 36095L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59647L)) || (reg > 148479L && reg <= 149503L)) { fw_engine = 1; } else if ((((((reg > 33071L && reg <= 33087L) || (reg > 34815L && reg <= 35327L)) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 125439L)) || (reg > 196607L && reg <= 262143L)) { fw_engine = 4; } else if (reg > 37887L && reg <= 38911L) { fw_engine = 5; } else { fw_engine = 2; } if ((unsigned int )fw_engine != 0U) { __force_wake_get(dev_priv, fw_engine); } else { } val = readb((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 1, (int )trace); return (val); } } static u16 gen9_read16(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { enum forcewake_domains fw_engine ; unsigned long irqflags ; u16 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg > 262143L || (reg > 2815L && reg <= 8191L)) { fw_engine = 0; } else if (((((((((reg > 8191L && reg <= 9983L) || (reg > 12287L && reg <= 16383L)) || (reg > 20991L && reg <= 32767L)) || (reg > 33087L && reg <= 33119L)) || (reg > 33535L && reg <= 34047L)) || (reg > 35839L && reg <= 36095L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59647L)) || (reg > 148479L && reg <= 149503L)) { fw_engine = 1; } else if ((((((reg > 33071L && reg <= 33087L) || (reg > 34815L && reg <= 35327L)) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 125439L)) || (reg > 196607L && reg <= 262143L)) { fw_engine = 4; } else if (reg > 37887L && reg <= 38911L) { fw_engine = 5; } else { fw_engine = 2; } if ((unsigned int )fw_engine != 0U) { __force_wake_get(dev_priv, fw_engine); } else { } val = readw((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 2, (int )trace); return (val); } } static u32 gen9_read32(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { enum forcewake_domains fw_engine ; unsigned long irqflags ; u32 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg > 262143L || (reg > 2815L && reg <= 8191L)) { fw_engine = 0; } else if (((((((((reg > 8191L && reg <= 9983L) || (reg > 12287L && reg <= 16383L)) || (reg > 20991L && reg <= 32767L)) || (reg > 33087L && reg <= 33119L)) || (reg > 33535L && reg <= 34047L)) || (reg > 35839L && reg <= 36095L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59647L)) || (reg > 148479L && reg <= 149503L)) { fw_engine = 1; } else if ((((((reg > 33071L && reg <= 33087L) || (reg > 34815L && reg <= 35327L)) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 125439L)) || (reg > 196607L && reg <= 262143L)) { fw_engine = 4; } else if (reg > 37887L && reg <= 38911L) { fw_engine = 5; } else { fw_engine = 2; } if ((unsigned int )fw_engine != 0U) { __force_wake_get(dev_priv, fw_engine); } else { } val = readl((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 4, (int )trace); return (val); } } static u64 gen9_read64(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { enum forcewake_domains fw_engine ; unsigned long irqflags ; u64 val ; raw_spinlock_t *tmp ; unsigned long tmp___0 ; { val = 0ULL; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg > 262143L || (reg > 2815L && reg <= 8191L)) { fw_engine = 0; } else if (((((((((reg > 8191L && reg <= 9983L) || (reg > 12287L && reg <= 16383L)) || (reg > 20991L && reg <= 32767L)) || (reg > 33087L && reg <= 33119L)) || (reg > 33535L && reg <= 34047L)) || (reg > 35839L && reg <= 36095L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59647L)) || (reg > 148479L && reg <= 149503L)) { fw_engine = 1; } else if ((((((reg > 33071L && reg <= 33087L) || (reg > 34815L && reg <= 35327L)) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 125439L)) || (reg > 196607L && reg <= 262143L)) { fw_engine = 4; } else if (reg > 37887L && reg <= 38911L) { fw_engine = 5; } else { fw_engine = 2; } if ((unsigned int )fw_engine != 0U) { __force_wake_get(dev_priv, fw_engine); } else { } tmp___0 = readq((void const volatile *)dev_priv->regs + (unsigned long )reg); val = (u64 )tmp___0; spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, val, 8, (int )trace); return (val); } } static u8 chv_read8(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u8 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (((((reg > 8191L && reg <= 16383L) || (reg > 20991L && reg <= 32767L)) || (reg > 33535L && reg <= 34047L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59391L)) { __force_wake_get(dev_priv, 1); } else if ((((((reg > 34815L && reg <= 35071L) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 114687L)) || (reg > 124927L && reg <= 125439L)) || (reg > 196607L && reg <= 229375L)) { __force_wake_get(dev_priv, 4); } else if (((((reg > 16383L && reg <= 20479L) || (reg > 32767L && reg <= 33535L)) || (reg > 34047L && reg <= 34303L)) || (reg > 36863L && reg <= 45055L)) || (reg > 61439L && reg <= 65535L)) { __force_wake_get(dev_priv, 5); } else { } val = readb((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 1, (int )trace); return (val); } } static u16 chv_read16(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u16 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (((((reg > 8191L && reg <= 16383L) || (reg > 20991L && reg <= 32767L)) || (reg > 33535L && reg <= 34047L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59391L)) { __force_wake_get(dev_priv, 1); } else if ((((((reg > 34815L && reg <= 35071L) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 114687L)) || (reg > 124927L && reg <= 125439L)) || (reg > 196607L && reg <= 229375L)) { __force_wake_get(dev_priv, 4); } else if (((((reg > 16383L && reg <= 20479L) || (reg > 32767L && reg <= 33535L)) || (reg > 34047L && reg <= 34303L)) || (reg > 36863L && reg <= 45055L)) || (reg > 61439L && reg <= 65535L)) { __force_wake_get(dev_priv, 5); } else { } val = readw((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 2, (int )trace); return (val); } } static u32 chv_read32(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u32 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (((((reg > 8191L && reg <= 16383L) || (reg > 20991L && reg <= 32767L)) || (reg > 33535L && reg <= 34047L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59391L)) { __force_wake_get(dev_priv, 1); } else if ((((((reg > 34815L && reg <= 35071L) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 114687L)) || (reg > 124927L && reg <= 125439L)) || (reg > 196607L && reg <= 229375L)) { __force_wake_get(dev_priv, 4); } else if (((((reg > 16383L && reg <= 20479L) || (reg > 32767L && reg <= 33535L)) || (reg > 34047L && reg <= 34303L)) || (reg > 36863L && reg <= 45055L)) || (reg > 61439L && reg <= 65535L)) { __force_wake_get(dev_priv, 5); } else { } val = readl((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 4, (int )trace); return (val); } } static u64 chv_read64(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u64 val ; raw_spinlock_t *tmp ; unsigned long tmp___0 ; { val = 0ULL; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (((((reg > 8191L && reg <= 16383L) || (reg > 20991L && reg <= 32767L)) || (reg > 33535L && reg <= 34047L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59391L)) { __force_wake_get(dev_priv, 1); } else if ((((((reg > 34815L && reg <= 35071L) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 114687L)) || (reg > 124927L && reg <= 125439L)) || (reg > 196607L && reg <= 229375L)) { __force_wake_get(dev_priv, 4); } else if (((((reg > 16383L && reg <= 20479L) || (reg > 32767L && reg <= 33535L)) || (reg > 34047L && reg <= 34303L)) || (reg > 36863L && reg <= 45055L)) || (reg > 61439L && reg <= 65535L)) { __force_wake_get(dev_priv, 5); } else { } tmp___0 = readq((void const volatile *)dev_priv->regs + (unsigned long )reg); val = (u64 )tmp___0; spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, val, 8, (int )trace); return (val); } } static u8 vlv_read8(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u8 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if ((((reg > 8191L && reg <= 16383L) || (reg > 20479L && reg <= 32767L)) || (reg > 45055L && reg <= 73727L)) || (reg > 188415L && reg <= 196607L)) { __force_wake_get(dev_priv, 1); } else if (((reg > 73727L && reg <= 81919L) || (reg > 139263L && reg <= 147455L)) || (reg > 196607L && reg <= 262143L)) { __force_wake_get(dev_priv, 4); } else { } val = readb((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 1, (int )trace); return (val); } } static u16 vlv_read16(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u16 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if ((((reg > 8191L && reg <= 16383L) || (reg > 20479L && reg <= 32767L)) || (reg > 45055L && reg <= 73727L)) || (reg > 188415L && reg <= 196607L)) { __force_wake_get(dev_priv, 1); } else if (((reg > 73727L && reg <= 81919L) || (reg > 139263L && reg <= 147455L)) || (reg > 196607L && reg <= 262143L)) { __force_wake_get(dev_priv, 4); } else { } val = readw((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 2, (int )trace); return (val); } } static u32 vlv_read32(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u32 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if ((((reg > 8191L && reg <= 16383L) || (reg > 20479L && reg <= 32767L)) || (reg > 45055L && reg <= 73727L)) || (reg > 188415L && reg <= 196607L)) { __force_wake_get(dev_priv, 1); } else if (((reg > 73727L && reg <= 81919L) || (reg > 139263L && reg <= 147455L)) || (reg > 196607L && reg <= 262143L)) { __force_wake_get(dev_priv, 4); } else { } val = readl((void const volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 4, (int )trace); return (val); } } static u64 vlv_read64(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u64 val ; raw_spinlock_t *tmp ; unsigned long tmp___0 ; { val = 0ULL; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if ((((reg > 8191L && reg <= 16383L) || (reg > 20479L && reg <= 32767L)) || (reg > 45055L && reg <= 73727L)) || (reg > 188415L && reg <= 196607L)) { __force_wake_get(dev_priv, 1); } else if (((reg > 73727L && reg <= 81919L) || (reg > 139263L && reg <= 147455L)) || (reg > 196607L && reg <= 262143L)) { __force_wake_get(dev_priv, 4); } else { } tmp___0 = readq((void const volatile *)dev_priv->regs + (unsigned long )reg); val = (u64 )tmp___0; spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, val, 8, (int )trace); return (val); } } static u8 gen6_read8(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u8 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 1, 1); if (reg <= 262143L && reg != 41356L) { __force_wake_get(dev_priv, 1); } else { } val = readb((void const volatile *)dev_priv->regs + (unsigned long )reg); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 1, 0); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 1, (int )trace); return (val); } } static u16 gen6_read16(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u16 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 1, 1); if (reg <= 262143L && reg != 41356L) { __force_wake_get(dev_priv, 1); } else { } val = readw((void const volatile *)dev_priv->regs + (unsigned long )reg); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 1, 0); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 2, (int )trace); return (val); } } static u32 gen6_read32(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u32 val ; raw_spinlock_t *tmp ; { val = 0U; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 1, 1); if (reg <= 262143L && reg != 41356L) { __force_wake_get(dev_priv, 1); } else { } val = readl((void const volatile *)dev_priv->regs + (unsigned long )reg); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 1, 0); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, (u64 )val, 4, (int )trace); return (val); } } static u64 gen6_read64(struct drm_i915_private *dev_priv , off_t reg , bool trace ) { unsigned long irqflags ; u64 val ; raw_spinlock_t *tmp ; unsigned long tmp___0 ; { val = 0ULL; assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 1, 1); if (reg <= 262143L && reg != 41356L) { __force_wake_get(dev_priv, 1); } else { } tmp___0 = readq((void const volatile *)dev_priv->regs + (unsigned long )reg); val = (u64 )tmp___0; hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 1, 0); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); trace_i915_reg_rw___0(0, (u32 )reg, val, 8, (int )trace); return (val); } } static void gen5_write8(struct drm_i915_private *dev_priv , off_t reg , u8 val , bool trace ) { { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 1, (int )trace); assert_device_not_suspended(dev_priv); ilk_dummy_write(dev_priv); writeb((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); return; } } static void gen5_write16(struct drm_i915_private *dev_priv , off_t reg , u16 val , bool trace ) { { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 2, (int )trace); assert_device_not_suspended(dev_priv); ilk_dummy_write(dev_priv); writew((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); return; } } static void gen5_write32(struct drm_i915_private *dev_priv , off_t reg , u32 val , bool trace ) { { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 4, (int )trace); assert_device_not_suspended(dev_priv); ilk_dummy_write(dev_priv); writel(val, (void volatile *)dev_priv->regs + (unsigned long )reg); return; } } static void gen5_write64(struct drm_i915_private *dev_priv , off_t reg , u64 val , bool trace ) { { trace_i915_reg_rw___0(1, (u32 )reg, val, 8, (int )trace); assert_device_not_suspended(dev_priv); ilk_dummy_write(dev_priv); writeq((unsigned long )val, (void volatile *)dev_priv->regs + (unsigned long )reg); return; } } static void gen2_write8(struct drm_i915_private *dev_priv , off_t reg , u8 val , bool trace ) { { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 1, (int )trace); assert_device_not_suspended(dev_priv); writeb((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); return; } } static void gen2_write16(struct drm_i915_private *dev_priv , off_t reg , u16 val , bool trace ) { { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 2, (int )trace); assert_device_not_suspended(dev_priv); writew((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); return; } } static void gen2_write32(struct drm_i915_private *dev_priv , off_t reg , u32 val , bool trace ) { { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 4, (int )trace); assert_device_not_suspended(dev_priv); writel(val, (void volatile *)dev_priv->regs + (unsigned long )reg); return; } } static void gen2_write64(struct drm_i915_private *dev_priv , off_t reg , u64 val , bool trace ) { { trace_i915_reg_rw___0(1, (u32 )reg, val, 8, (int )trace); assert_device_not_suspended(dev_priv); writeq((unsigned long )val, (void volatile *)dev_priv->regs + (unsigned long )reg); return; } } static u32 const gen8_shadowed_regs[7U] = { 41352U, 40968U, 40972U, 8240U, 73776U, 106544U, 139312U}; static bool is_gen8_shadowed(struct drm_i915_private *dev_priv , u32 reg ) { int i ; { i = 0; goto ldv_48871; ldv_48870: ; if ((u32 )gen8_shadowed_regs[i] == reg) { return (1); } else { } i = i + 1; ldv_48871: ; if ((unsigned int )i <= 6U) { goto ldv_48870; } else { } return (0); } } static u32 const gen9_shadowed_regs[9U] = { 8240U, 73776U, 106544U, 139312U, 41352U, 41592U, 41584U, 40968U, 40972U}; static bool is_gen9_shadowed(struct drm_i915_private *dev_priv , u32 reg ) { int i ; { i = 0; goto ldv_48882; ldv_48881: ; if ((u32 )gen9_shadowed_regs[i] == reg) { return (1); } else { } i = i + 1; ldv_48882: ; if ((unsigned int )i <= 8U) { goto ldv_48881; } else { } return (0); } } static void gen9_write8(struct drm_i915_private *dev_priv , off_t reg , u8 val , bool trace ) { enum forcewake_domains fw_engine ; unsigned long irqflags ; raw_spinlock_t *tmp ; bool tmp___0 ; { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 1, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg > 262143L || (reg > 2815L && reg <= 8191L)) { fw_engine = 0; } else { tmp___0 = is_gen9_shadowed(dev_priv, (u32 )reg); if ((int )tmp___0) { fw_engine = 0; } else if (((((((((reg > 8191L && reg <= 9983L) || (reg > 12287L && reg <= 16383L)) || (reg > 20991L && reg <= 32767L)) || (reg > 33087L && reg <= 33119L)) || (reg > 33535L && reg <= 34047L)) || (reg > 35839L && reg <= 36095L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59647L)) || (reg > 148479L && reg <= 149503L)) { fw_engine = 1; } else if ((((((reg > 33071L && reg <= 33087L) || (reg > 34815L && reg <= 35327L)) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 125439L)) || (reg > 196607L && reg <= 262143L)) { fw_engine = 4; } else if (reg > 37887L && reg <= 38911L) { fw_engine = 5; } else { fw_engine = 2; } } if ((unsigned int )fw_engine != 0U) { __force_wake_get(dev_priv, fw_engine); } else { } writeb((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen9_write16(struct drm_i915_private *dev_priv , off_t reg , u16 val , bool trace ) { enum forcewake_domains fw_engine ; unsigned long irqflags ; raw_spinlock_t *tmp ; bool tmp___0 ; { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 2, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg > 262143L || (reg > 2815L && reg <= 8191L)) { fw_engine = 0; } else { tmp___0 = is_gen9_shadowed(dev_priv, (u32 )reg); if ((int )tmp___0) { fw_engine = 0; } else if (((((((((reg > 8191L && reg <= 9983L) || (reg > 12287L && reg <= 16383L)) || (reg > 20991L && reg <= 32767L)) || (reg > 33087L && reg <= 33119L)) || (reg > 33535L && reg <= 34047L)) || (reg > 35839L && reg <= 36095L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59647L)) || (reg > 148479L && reg <= 149503L)) { fw_engine = 1; } else if ((((((reg > 33071L && reg <= 33087L) || (reg > 34815L && reg <= 35327L)) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 125439L)) || (reg > 196607L && reg <= 262143L)) { fw_engine = 4; } else if (reg > 37887L && reg <= 38911L) { fw_engine = 5; } else { fw_engine = 2; } } if ((unsigned int )fw_engine != 0U) { __force_wake_get(dev_priv, fw_engine); } else { } writew((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen9_write32(struct drm_i915_private *dev_priv , off_t reg , u32 val , bool trace ) { enum forcewake_domains fw_engine ; unsigned long irqflags ; raw_spinlock_t *tmp ; bool tmp___0 ; { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 4, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg > 262143L || (reg > 2815L && reg <= 8191L)) { fw_engine = 0; } else { tmp___0 = is_gen9_shadowed(dev_priv, (u32 )reg); if ((int )tmp___0) { fw_engine = 0; } else if (((((((((reg > 8191L && reg <= 9983L) || (reg > 12287L && reg <= 16383L)) || (reg > 20991L && reg <= 32767L)) || (reg > 33087L && reg <= 33119L)) || (reg > 33535L && reg <= 34047L)) || (reg > 35839L && reg <= 36095L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59647L)) || (reg > 148479L && reg <= 149503L)) { fw_engine = 1; } else if ((((((reg > 33071L && reg <= 33087L) || (reg > 34815L && reg <= 35327L)) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 125439L)) || (reg > 196607L && reg <= 262143L)) { fw_engine = 4; } else if (reg > 37887L && reg <= 38911L) { fw_engine = 5; } else { fw_engine = 2; } } if ((unsigned int )fw_engine != 0U) { __force_wake_get(dev_priv, fw_engine); } else { } writel(val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen9_write64(struct drm_i915_private *dev_priv , off_t reg , u64 val , bool trace ) { enum forcewake_domains fw_engine ; unsigned long irqflags ; raw_spinlock_t *tmp ; bool tmp___0 ; { trace_i915_reg_rw___0(1, (u32 )reg, val, 8, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg > 262143L || (reg > 2815L && reg <= 8191L)) { fw_engine = 0; } else { tmp___0 = is_gen9_shadowed(dev_priv, (u32 )reg); if ((int )tmp___0) { fw_engine = 0; } else if (((((((((reg > 8191L && reg <= 9983L) || (reg > 12287L && reg <= 16383L)) || (reg > 20991L && reg <= 32767L)) || (reg > 33087L && reg <= 33119L)) || (reg > 33535L && reg <= 34047L)) || (reg > 35839L && reg <= 36095L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59647L)) || (reg > 148479L && reg <= 149503L)) { fw_engine = 1; } else if ((((((reg > 33071L && reg <= 33087L) || (reg > 34815L && reg <= 35327L)) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 125439L)) || (reg > 196607L && reg <= 262143L)) { fw_engine = 4; } else if (reg > 37887L && reg <= 38911L) { fw_engine = 5; } else { fw_engine = 2; } } if ((unsigned int )fw_engine != 0U) { __force_wake_get(dev_priv, fw_engine); } else { } writeq((unsigned long )val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void chv_write8(struct drm_i915_private *dev_priv , off_t reg , u8 val , bool trace ) { bool shadowed ; bool tmp ; unsigned long irqflags ; raw_spinlock_t *tmp___0 ; { tmp = is_gen8_shadowed(dev_priv, (u32 )reg); shadowed = tmp; trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 1, (int )trace); assert_device_not_suspended(dev_priv); tmp___0 = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp___0); if (! shadowed) { if (((((reg > 8191L && reg <= 16383L) || (reg > 20991L && reg <= 32767L)) || (reg > 33535L && reg <= 34047L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59391L)) { __force_wake_get(dev_priv, 1); } else if ((((((reg > 34815L && reg <= 35071L) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 114687L)) || (reg > 124927L && reg <= 125439L)) || (reg > 196607L && reg <= 229375L)) { __force_wake_get(dev_priv, 4); } else if (((((reg > 16383L && reg <= 20479L) || (reg > 32767L && reg <= 33535L)) || (reg > 34047L && reg <= 34303L)) || (reg > 36863L && reg <= 45055L)) || (reg > 61439L && reg <= 65535L)) { __force_wake_get(dev_priv, 5); } else { } } else { } writeb((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void chv_write16(struct drm_i915_private *dev_priv , off_t reg , u16 val , bool trace ) { bool shadowed ; bool tmp ; unsigned long irqflags ; raw_spinlock_t *tmp___0 ; { tmp = is_gen8_shadowed(dev_priv, (u32 )reg); shadowed = tmp; trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 2, (int )trace); assert_device_not_suspended(dev_priv); tmp___0 = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp___0); if (! shadowed) { if (((((reg > 8191L && reg <= 16383L) || (reg > 20991L && reg <= 32767L)) || (reg > 33535L && reg <= 34047L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59391L)) { __force_wake_get(dev_priv, 1); } else if ((((((reg > 34815L && reg <= 35071L) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 114687L)) || (reg > 124927L && reg <= 125439L)) || (reg > 196607L && reg <= 229375L)) { __force_wake_get(dev_priv, 4); } else if (((((reg > 16383L && reg <= 20479L) || (reg > 32767L && reg <= 33535L)) || (reg > 34047L && reg <= 34303L)) || (reg > 36863L && reg <= 45055L)) || (reg > 61439L && reg <= 65535L)) { __force_wake_get(dev_priv, 5); } else { } } else { } writew((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void chv_write32(struct drm_i915_private *dev_priv , off_t reg , u32 val , bool trace ) { bool shadowed ; bool tmp ; unsigned long irqflags ; raw_spinlock_t *tmp___0 ; { tmp = is_gen8_shadowed(dev_priv, (u32 )reg); shadowed = tmp; trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 4, (int )trace); assert_device_not_suspended(dev_priv); tmp___0 = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp___0); if (! shadowed) { if (((((reg > 8191L && reg <= 16383L) || (reg > 20991L && reg <= 32767L)) || (reg > 33535L && reg <= 34047L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59391L)) { __force_wake_get(dev_priv, 1); } else if ((((((reg > 34815L && reg <= 35071L) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 114687L)) || (reg > 124927L && reg <= 125439L)) || (reg > 196607L && reg <= 229375L)) { __force_wake_get(dev_priv, 4); } else if (((((reg > 16383L && reg <= 20479L) || (reg > 32767L && reg <= 33535L)) || (reg > 34047L && reg <= 34303L)) || (reg > 36863L && reg <= 45055L)) || (reg > 61439L && reg <= 65535L)) { __force_wake_get(dev_priv, 5); } else { } } else { } writel(val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void chv_write64(struct drm_i915_private *dev_priv , off_t reg , u64 val , bool trace ) { bool shadowed ; bool tmp ; unsigned long irqflags ; raw_spinlock_t *tmp___0 ; { tmp = is_gen8_shadowed(dev_priv, (u32 )reg); shadowed = tmp; trace_i915_reg_rw___0(1, (u32 )reg, val, 8, (int )trace); assert_device_not_suspended(dev_priv); tmp___0 = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp___0); if (! shadowed) { if (((((reg > 8191L && reg <= 16383L) || (reg > 20991L && reg <= 32767L)) || (reg > 33535L && reg <= 34047L)) || (reg > 45055L && reg <= 46207L)) || (reg > 57343L && reg <= 59391L)) { __force_wake_get(dev_priv, 1); } else if ((((((reg > 34815L && reg <= 35071L) || (reg > 53247L && reg <= 55295L)) || (reg > 73727L && reg <= 81919L)) || (reg > 106495L && reg <= 114687L)) || (reg > 124927L && reg <= 125439L)) || (reg > 196607L && reg <= 229375L)) { __force_wake_get(dev_priv, 4); } else if (((((reg > 16383L && reg <= 20479L) || (reg > 32767L && reg <= 33535L)) || (reg > 34047L && reg <= 34303L)) || (reg > 36863L && reg <= 45055L)) || (reg > 61439L && reg <= 65535L)) { __force_wake_get(dev_priv, 5); } else { } } else { } writeq((unsigned long )val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen8_write8(struct drm_i915_private *dev_priv , off_t reg , u8 val , bool trace ) { unsigned long irqflags ; raw_spinlock_t *tmp ; bool tmp___0 ; int tmp___1 ; { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 1, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 1); if (reg <= 262143L) { tmp___0 = is_gen8_shadowed(dev_priv, (u32 )reg); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __force_wake_get(dev_priv, 1); } else { } } else { } writeb((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 0); hsw_unclaimed_reg_detect(dev_priv); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen8_write16(struct drm_i915_private *dev_priv , off_t reg , u16 val , bool trace ) { unsigned long irqflags ; raw_spinlock_t *tmp ; bool tmp___0 ; int tmp___1 ; { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 2, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 1); if (reg <= 262143L) { tmp___0 = is_gen8_shadowed(dev_priv, (u32 )reg); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __force_wake_get(dev_priv, 1); } else { } } else { } writew((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 0); hsw_unclaimed_reg_detect(dev_priv); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen8_write32(struct drm_i915_private *dev_priv , off_t reg , u32 val , bool trace ) { unsigned long irqflags ; raw_spinlock_t *tmp ; bool tmp___0 ; int tmp___1 ; { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 4, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 1); if (reg <= 262143L) { tmp___0 = is_gen8_shadowed(dev_priv, (u32 )reg); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __force_wake_get(dev_priv, 1); } else { } } else { } writel(val, (void volatile *)dev_priv->regs + (unsigned long )reg); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 0); hsw_unclaimed_reg_detect(dev_priv); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen8_write64(struct drm_i915_private *dev_priv , off_t reg , u64 val , bool trace ) { unsigned long irqflags ; raw_spinlock_t *tmp ; bool tmp___0 ; int tmp___1 ; { trace_i915_reg_rw___0(1, (u32 )reg, val, 8, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 1); if (reg <= 262143L) { tmp___0 = is_gen8_shadowed(dev_priv, (u32 )reg); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __force_wake_get(dev_priv, 1); } else { } } else { } writeq((unsigned long )val, (void volatile *)dev_priv->regs + (unsigned long )reg); hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 0); hsw_unclaimed_reg_detect(dev_priv); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void hsw_write8(struct drm_i915_private *dev_priv , off_t reg , u8 val , bool trace ) { u32 __fifo_ret ; unsigned long irqflags ; raw_spinlock_t *tmp ; int tmp___0 ; long tmp___1 ; { __fifo_ret = 0U; trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 1, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg <= 262143L && reg != 41356L) { tmp___0 = __gen6_gt_wait_for_fifo(dev_priv); __fifo_ret = (u32 )tmp___0; } else { } hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 1); writeb((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); tmp___1 = ldv__builtin_expect(__fifo_ret != 0U, 0L); if (tmp___1 != 0L) { gen6_gt_check_fifodbg(dev_priv); } else { } hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 0); hsw_unclaimed_reg_detect(dev_priv); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void hsw_write16(struct drm_i915_private *dev_priv , off_t reg , u16 val , bool trace ) { u32 __fifo_ret ; unsigned long irqflags ; raw_spinlock_t *tmp ; int tmp___0 ; long tmp___1 ; { __fifo_ret = 0U; trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 2, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg <= 262143L && reg != 41356L) { tmp___0 = __gen6_gt_wait_for_fifo(dev_priv); __fifo_ret = (u32 )tmp___0; } else { } hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 1); writew((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); tmp___1 = ldv__builtin_expect(__fifo_ret != 0U, 0L); if (tmp___1 != 0L) { gen6_gt_check_fifodbg(dev_priv); } else { } hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 0); hsw_unclaimed_reg_detect(dev_priv); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void hsw_write32(struct drm_i915_private *dev_priv , off_t reg , u32 val , bool trace ) { u32 __fifo_ret ; unsigned long irqflags ; raw_spinlock_t *tmp ; int tmp___0 ; long tmp___1 ; { __fifo_ret = 0U; trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 4, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg <= 262143L && reg != 41356L) { tmp___0 = __gen6_gt_wait_for_fifo(dev_priv); __fifo_ret = (u32 )tmp___0; } else { } hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 1); writel(val, (void volatile *)dev_priv->regs + (unsigned long )reg); tmp___1 = ldv__builtin_expect(__fifo_ret != 0U, 0L); if (tmp___1 != 0L) { gen6_gt_check_fifodbg(dev_priv); } else { } hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 0); hsw_unclaimed_reg_detect(dev_priv); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void hsw_write64(struct drm_i915_private *dev_priv , off_t reg , u64 val , bool trace ) { u32 __fifo_ret ; unsigned long irqflags ; raw_spinlock_t *tmp ; int tmp___0 ; long tmp___1 ; { __fifo_ret = 0U; trace_i915_reg_rw___0(1, (u32 )reg, val, 8, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg <= 262143L && reg != 41356L) { tmp___0 = __gen6_gt_wait_for_fifo(dev_priv); __fifo_ret = (u32 )tmp___0; } else { } hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 1); writeq((unsigned long )val, (void volatile *)dev_priv->regs + (unsigned long )reg); tmp___1 = ldv__builtin_expect(__fifo_ret != 0U, 0L); if (tmp___1 != 0L) { gen6_gt_check_fifodbg(dev_priv); } else { } hsw_unclaimed_reg_debug(dev_priv, (u32 )reg, 0, 0); hsw_unclaimed_reg_detect(dev_priv); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen6_write8(struct drm_i915_private *dev_priv , off_t reg , u8 val , bool trace ) { u32 __fifo_ret ; unsigned long irqflags ; raw_spinlock_t *tmp ; int tmp___0 ; long tmp___1 ; { __fifo_ret = 0U; trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 1, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg <= 262143L && reg != 41356L) { tmp___0 = __gen6_gt_wait_for_fifo(dev_priv); __fifo_ret = (u32 )tmp___0; } else { } writeb((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); tmp___1 = ldv__builtin_expect(__fifo_ret != 0U, 0L); if (tmp___1 != 0L) { gen6_gt_check_fifodbg(dev_priv); } else { } spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen6_write16(struct drm_i915_private *dev_priv , off_t reg , u16 val , bool trace ) { u32 __fifo_ret ; unsigned long irqflags ; raw_spinlock_t *tmp ; int tmp___0 ; long tmp___1 ; { __fifo_ret = 0U; trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 2, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg <= 262143L && reg != 41356L) { tmp___0 = __gen6_gt_wait_for_fifo(dev_priv); __fifo_ret = (u32 )tmp___0; } else { } writew((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); tmp___1 = ldv__builtin_expect(__fifo_ret != 0U, 0L); if (tmp___1 != 0L) { gen6_gt_check_fifodbg(dev_priv); } else { } spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen6_write32(struct drm_i915_private *dev_priv , off_t reg , u32 val , bool trace ) { u32 __fifo_ret ; unsigned long irqflags ; raw_spinlock_t *tmp ; int tmp___0 ; long tmp___1 ; { __fifo_ret = 0U; trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 4, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg <= 262143L && reg != 41356L) { tmp___0 = __gen6_gt_wait_for_fifo(dev_priv); __fifo_ret = (u32 )tmp___0; } else { } writel(val, (void volatile *)dev_priv->regs + (unsigned long )reg); tmp___1 = ldv__builtin_expect(__fifo_ret != 0U, 0L); if (tmp___1 != 0L) { gen6_gt_check_fifodbg(dev_priv); } else { } spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void gen6_write64(struct drm_i915_private *dev_priv , off_t reg , u64 val , bool trace ) { u32 __fifo_ret ; unsigned long irqflags ; raw_spinlock_t *tmp ; int tmp___0 ; long tmp___1 ; { __fifo_ret = 0U; trace_i915_reg_rw___0(1, (u32 )reg, val, 8, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); if (reg <= 262143L && reg != 41356L) { tmp___0 = __gen6_gt_wait_for_fifo(dev_priv); __fifo_ret = (u32 )tmp___0; } else { } writeq((unsigned long )val, (void volatile *)dev_priv->regs + (unsigned long )reg); tmp___1 = ldv__builtin_expect(__fifo_ret != 0U, 0L); if (tmp___1 != 0L) { gen6_gt_check_fifodbg(dev_priv); } else { } spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void vgpu_write8(struct drm_i915_private *dev_priv , off_t reg , u8 val , bool trace ) { unsigned long irqflags ; raw_spinlock_t *tmp ; { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 1, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); writeb((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void vgpu_write16(struct drm_i915_private *dev_priv , off_t reg , u16 val , bool trace ) { unsigned long irqflags ; raw_spinlock_t *tmp ; { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 2, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); writew((int )val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void vgpu_write32(struct drm_i915_private *dev_priv , off_t reg , u32 val , bool trace ) { unsigned long irqflags ; raw_spinlock_t *tmp ; { trace_i915_reg_rw___0(1, (u32 )reg, (u64 )val, 4, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); writel(val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } static void vgpu_write64(struct drm_i915_private *dev_priv , off_t reg , u64 val , bool trace ) { unsigned long irqflags ; raw_spinlock_t *tmp ; { trace_i915_reg_rw___0(1, (u32 )reg, val, 8, (int )trace); assert_device_not_suspended(dev_priv); tmp = spinlock_check(& dev_priv->uncore.lock); irqflags = _raw_spin_lock_irqsave(tmp); writeq((unsigned long )val, (void volatile *)dev_priv->regs + (unsigned long )reg); spin_unlock_irqrestore(& dev_priv->uncore.lock, irqflags); return; } } extern void __compiletime_assert_1077(void) ; extern void __compiletime_assert_1079(void) ; static void fw_domain_init(struct drm_i915_private *dev_priv , enum forcewake_domain_id domain_id , u32 reg_set , u32 reg_ack ) { struct intel_uncore_forcewake_domain *d ; int __ret_warn_on ; long tmp ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; bool __cond ; bool __cond___0 ; bool __cond___1 ; int _a ; bool __cond___5 ; bool __cond___6 ; bool __cond___7 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { __ret_warn_on = (unsigned int )domain_id > 2U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 1060, "WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } d = (struct intel_uncore_forcewake_domain *)(& dev_priv->uncore.fw_domain) + (unsigned long )domain_id; __ret_warn_on___0 = d->wake_count != 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 1065, "WARN_ON(d->wake_count)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); d->wake_count = 0U; d->reg_set = reg_set; d->reg_ack = reg_ack; __p = dev_priv; if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { d->val_reset = 0U; d->val_set = 1U; d->val_clear = 0U; } else { __cond = 0; if ((int )__cond) { __compiletime_assert_1077(); } else { } __cond___0 = 0; if ((int )__cond___0) { __compiletime_assert_1077(); } else { } __cond___1 = 0; if ((int )__cond___1) { __compiletime_assert_1077(); } else { } d->val_reset = (u32 )-65536; _a = 1; d->val_set = (u32 )((_a << 16) | _a); __cond___5 = 0; if ((int )__cond___5) { __compiletime_assert_1079(); } else { } __cond___6 = 0; if ((int )__cond___6) { __compiletime_assert_1079(); } else { } __cond___7 = 0; if ((int )__cond___7) { __compiletime_assert_1079(); } else { } d->val_clear = (u32 )65536; } __p___3 = dev_priv; if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { d->reg_post = 1245364U; } else { __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U) { d->reg_post = 41344U; } else { __p___1 = dev_priv; if ((unsigned int )((unsigned char )__p___1->info.gen) == 7U) { d->reg_post = 41344U; } else { __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { d->reg_post = 41344U; } else { d->reg_post = 0U; } } } } d->i915 = dev_priv; d->id = domain_id; reg_timer_21(& d->timer, & intel_uncore_fw_release_timer, (unsigned long )d); dev_priv->uncore.fw_domains = (enum forcewake_domains )((unsigned int )dev_priv->uncore.fw_domains | (unsigned int )(1 << (int )domain_id)); fw_domain_reset((struct intel_uncore_forcewake_domain const *)d); return; } } static void intel_uncore_fw_domains_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; u32 ecobus ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; int __ret_warn_on ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 5U) { return; } else { } __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) == 9U) { dev_priv->uncore.funcs.force_wake_get = & fw_domains_get; dev_priv->uncore.funcs.force_wake_put = & fw_domains_put; fw_domain_init(dev_priv, 0, 41592U, 3460U); fw_domain_init(dev_priv, 1, 41352U, 1245252U); fw_domain_init(dev_priv, 2, 41584U, 3464U); } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { dev_priv->uncore.funcs.force_wake_get = & fw_domains_get; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { dev_priv->uncore.funcs.force_wake_put = & fw_domains_put_with_fifo; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) != 8U) { dev_priv->uncore.funcs.force_wake_put = & fw_domains_put_with_fifo; } else { dev_priv->uncore.funcs.force_wake_put = & fw_domains_put; } } fw_domain_init(dev_priv, 0, 1245360U, 1245364U); fw_domain_init(dev_priv, 2, 1245368U, 1245372U); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { dev_priv->uncore.funcs.force_wake_get = & fw_domains_get_with_thread_status; dev_priv->uncore.funcs.force_wake_put = & fw_domains_put; fw_domain_init(dev_priv, 0, 41352U, 1245252U); } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 8U) { dev_priv->uncore.funcs.force_wake_get = & fw_domains_get_with_thread_status; dev_priv->uncore.funcs.force_wake_put = & fw_domains_put; fw_domain_init(dev_priv, 0, 41352U, 1245252U); } else { goto _L; } } else { _L: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { dev_priv->uncore.funcs.force_wake_get = & fw_domains_get_with_thread_status; dev_priv->uncore.funcs.force_wake_put = & fw_domains_put_with_fifo; writel(0U, (void volatile *)dev_priv->regs + 41356U); readl((void const volatile *)dev_priv->regs + 41344U); fw_domain_init(dev_priv, 0, 41352U, 1245248U); mutex_lock_nested(& dev->struct_mutex, 0U); fw_domains_get_with_thread_status(dev_priv, 7); ecobus = readl((void const volatile *)dev_priv->regs + 41344U); fw_domains_put_with_fifo(dev_priv, 7); mutex_unlock(& dev->struct_mutex); if ((ecobus & 32U) == 0U) { printk("\016[drm] No MT forcewake available on Ivybridge, this can result in issues\n"); printk("\016[drm] when using vblank-synced partial screen updates.\n"); fw_domain_init(dev_priv, 0, 41356U, 1245328U); } else { } } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 6U) { dev_priv->uncore.funcs.force_wake_get = & fw_domains_get_with_thread_status; dev_priv->uncore.funcs.force_wake_put = & fw_domains_put_with_fifo; fw_domain_init(dev_priv, 0, 41356U, 1245328U); } else { } } } } } } __ret_warn_on = (unsigned int )dev_priv->uncore.fw_domains == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 1186, "WARN_ON(dev_priv->uncore.fw_domains == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } void intel_uncore_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; int __ret_warn_on ; struct drm_i915_private *__p___0 ; long tmp ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; bool tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i915_check_vgpu(dev); intel_uncore_ellc_detect(dev); intel_uncore_fw_domains_init(dev); __intel_uncore_early_sanitize(dev, 0); __p = to_i915((struct drm_device const *)dev); switch ((int )__p->info.gen) { default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { __p___0 = to_i915((struct drm_device const *)dev); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 1201, "Missing switch case (%lu) in %s\n", (long )__p___0->info.gen, "intel_uncore_init"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; case 9: dev_priv->uncore.funcs.mmio_writeb = & gen9_write8; dev_priv->uncore.funcs.mmio_writew = & gen9_write16; dev_priv->uncore.funcs.mmio_writel = & gen9_write32; dev_priv->uncore.funcs.mmio_writeq = & gen9_write64; dev_priv->uncore.funcs.mmio_readb = & gen9_read8; dev_priv->uncore.funcs.mmio_readw = & gen9_read16; dev_priv->uncore.funcs.mmio_readl = & gen9_read32; dev_priv->uncore.funcs.mmio_readq = & gen9_read64; goto ldv_49310; case 8: __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { dev_priv->uncore.funcs.mmio_writeb = & chv_write8; dev_priv->uncore.funcs.mmio_writew = & chv_write16; dev_priv->uncore.funcs.mmio_writel = & chv_write32; dev_priv->uncore.funcs.mmio_writeq = & chv_write64; dev_priv->uncore.funcs.mmio_readb = & chv_read8; dev_priv->uncore.funcs.mmio_readw = & chv_read16; dev_priv->uncore.funcs.mmio_readl = & chv_read32; dev_priv->uncore.funcs.mmio_readq = & chv_read64; } else { goto _L; } } else { _L: /* CIL Label */ dev_priv->uncore.funcs.mmio_writeb = & gen8_write8; dev_priv->uncore.funcs.mmio_writew = & gen8_write16; dev_priv->uncore.funcs.mmio_writel = & gen8_write32; dev_priv->uncore.funcs.mmio_writeq = & gen8_write64; dev_priv->uncore.funcs.mmio_readb = & gen6_read8; dev_priv->uncore.funcs.mmio_readw = & gen6_read16; dev_priv->uncore.funcs.mmio_readl = & gen6_read32; dev_priv->uncore.funcs.mmio_readq = & gen6_read64; } goto ldv_49310; case 7: ; case 6: __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { dev_priv->uncore.funcs.mmio_writeb = & hsw_write8; dev_priv->uncore.funcs.mmio_writew = & hsw_write16; dev_priv->uncore.funcs.mmio_writel = & hsw_write32; dev_priv->uncore.funcs.mmio_writeq = & hsw_write64; } else { dev_priv->uncore.funcs.mmio_writeb = & gen6_write8; dev_priv->uncore.funcs.mmio_writew = & gen6_write16; dev_priv->uncore.funcs.mmio_writel = & gen6_write32; dev_priv->uncore.funcs.mmio_writeq = & gen6_write64; } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { dev_priv->uncore.funcs.mmio_readb = & vlv_read8; dev_priv->uncore.funcs.mmio_readw = & vlv_read16; dev_priv->uncore.funcs.mmio_readl = & vlv_read32; dev_priv->uncore.funcs.mmio_readq = & vlv_read64; } else { dev_priv->uncore.funcs.mmio_readb = & gen6_read8; dev_priv->uncore.funcs.mmio_readw = & gen6_read16; dev_priv->uncore.funcs.mmio_readl = & gen6_read32; dev_priv->uncore.funcs.mmio_readq = & gen6_read64; } goto ldv_49310; case 5: dev_priv->uncore.funcs.mmio_writeb = & gen5_write8; dev_priv->uncore.funcs.mmio_writew = & gen5_write16; dev_priv->uncore.funcs.mmio_writel = & gen5_write32; dev_priv->uncore.funcs.mmio_writeq = & gen5_write64; dev_priv->uncore.funcs.mmio_readb = & gen5_read8; dev_priv->uncore.funcs.mmio_readw = & gen5_read16; dev_priv->uncore.funcs.mmio_readl = & gen5_read32; dev_priv->uncore.funcs.mmio_readq = & gen5_read64; goto ldv_49310; case 4: ; case 3: ; case 2: dev_priv->uncore.funcs.mmio_writeb = & gen2_write8; dev_priv->uncore.funcs.mmio_writew = & gen2_write16; dev_priv->uncore.funcs.mmio_writel = & gen2_write32; dev_priv->uncore.funcs.mmio_writeq = & gen2_write64; dev_priv->uncore.funcs.mmio_readb = & gen2_read8; dev_priv->uncore.funcs.mmio_readw = & gen2_read16; dev_priv->uncore.funcs.mmio_readl = & gen2_read32; dev_priv->uncore.funcs.mmio_readq = & gen2_read64; goto ldv_49310; } ldv_49310: tmp___0 = intel_vgpu_active(dev); if ((int )tmp___0) { dev_priv->uncore.funcs.mmio_writeb = & vgpu_write8; dev_priv->uncore.funcs.mmio_writew = & vgpu_write16; dev_priv->uncore.funcs.mmio_writel = & vgpu_write32; dev_priv->uncore.funcs.mmio_writeq = & vgpu_write64; dev_priv->uncore.funcs.mmio_readb = & vgpu_read8; dev_priv->uncore.funcs.mmio_readw = & vgpu_read16; dev_priv->uncore.funcs.mmio_readl = & vgpu_read32; dev_priv->uncore.funcs.mmio_readq = & vgpu_read64; } else { } i915_check_and_clear_faults(dev); return; } } void intel_uncore_fini(struct drm_device *dev ) { { intel_uncore_sanitize(dev); intel_uncore_forcewake_reset(dev, 0); return; } } static struct register_whitelist const whitelist[1U] = { {9048ULL, 8U, 1008U}}; int i915_reg_read_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_private *dev_priv ; struct drm_i915_reg_read *reg ; struct register_whitelist const *entry ; int i ; int ret ; struct drm_i915_private *__p ; uint32_t tmp ; uint16_t tmp___0 ; uint8_t tmp___1 ; int __ret_warn_on ; long tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; reg = (struct drm_i915_reg_read *)data; entry = (struct register_whitelist const *)(& whitelist); ret = 0; i = 0; goto ldv_49370; ldv_49369: ; if ((unsigned long long )entry->offset == reg->offset) { __p = to_i915((struct drm_device const *)dev); if (((unsigned int )(1 << (int )__p->info.gen) & (unsigned int )entry->gen_bitmask) != 0U) { goto ldv_49368; } else { } } else { } i = i + 1; entry = entry + 1; ldv_49370: ; if (i == 0) { goto ldv_49369; } else { } ldv_49368: ; if (i == 1) { return (-22); } else { } intel_runtime_pm_get(dev_priv); switch (entry->size) { case 8U: reg->val = (*(dev_priv->uncore.funcs.mmio_readq))(dev_priv, (off_t )reg->offset, 1); goto ldv_49374; case 4U: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg->offset, 1); reg->val = (__u64 )tmp; goto ldv_49374; case 2U: tmp___0 = (*(dev_priv->uncore.funcs.mmio_readw))(dev_priv, (off_t )reg->offset, 1); reg->val = (__u64 )tmp___0; goto ldv_49374; case 1U: tmp___1 = (*(dev_priv->uncore.funcs.mmio_readb))(dev_priv, (off_t )reg->offset, 1); reg->val = (__u64 )tmp___1; goto ldv_49374; default: __ret_warn_on = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_uncore.c", 1304, "Missing switch case (%lu) in %s\n", (long )entry->size, "i915_reg_read_ioctl"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = -22; goto out; } ldv_49374: ; out: intel_runtime_pm_put(dev_priv); return (ret); } } int i915_get_reset_stats_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_private *dev_priv ; struct drm_i915_reset_stats *args ; struct i915_ctx_hang_stats *hs ; struct intel_context *ctx ; int ret ; bool tmp ; int tmp___0 ; long tmp___1 ; bool tmp___2 ; bool tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; args = (struct drm_i915_reset_stats *)data; if (args->flags != 0U || args->pad != 0U) { return (-22); } else { } if (args->ctx_id == 0U) { tmp = capable(21); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-1); } else { } } else { } ret = mutex_lock_interruptible_nested(& dev->struct_mutex, 0U); if (ret != 0) { return (ret); } else { } ctx = i915_gem_context_get((struct drm_i915_file_private *)file->driver_priv, args->ctx_id); tmp___2 = IS_ERR((void const *)ctx); if ((int )tmp___2) { mutex_unlock(& dev->struct_mutex); tmp___1 = PTR_ERR((void const *)ctx); return ((int )tmp___1); } else { } hs = & ctx->hang_stats; tmp___3 = capable(21); if ((int )tmp___3) { args->reset_count = i915_reset_count(& dev_priv->gpu_error); } else { args->reset_count = 0U; } args->batch_active = hs->batch_active; args->batch_pending = hs->batch_pending; mutex_unlock(& dev->struct_mutex); return (0); } } static int i915_reset_complete(struct drm_device *dev ) { u8 gdrst ; { pci_read_config_byte((struct pci_dev const *)dev->pdev, 192, & gdrst); return (((int )gdrst & 2) == 0); } } static int i915_do_reset(struct drm_device *dev ) { unsigned long timeout__ ; unsigned long tmp ; int ret__ ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { pci_write_config_byte((struct pci_dev const *)dev->pdev, 192, 1); __const_udelay(85900UL); pci_write_config_byte((struct pci_dev const *)dev->pdev, 192, 0); tmp = msecs_to_jiffies(500U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_49410; ldv_49409: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = i915_reset_complete(dev); if (tmp___0 == 0) { ret__ = -110; } else { } goto ldv_49408; } else { } tmp___1 = drm_can_sleep___4(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_49410: tmp___2 = i915_reset_complete(dev); if (tmp___2 == 0) { goto ldv_49409; } else { } ldv_49408: ; return (ret__); } } static int g4x_reset_complete(struct drm_device *dev ) { u8 gdrst ; { pci_read_config_byte((struct pci_dev const *)dev->pdev, 192, & gdrst); return (((int )gdrst & 1) == 0); } } static int g33_do_reset(struct drm_device *dev ) { unsigned long timeout__ ; unsigned long tmp ; int ret__ ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { pci_write_config_byte((struct pci_dev const *)dev->pdev, 192, 1); tmp = msecs_to_jiffies(500U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_49429; ldv_49428: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = g4x_reset_complete(dev); if (tmp___0 == 0) { ret__ = -110; } else { } goto ldv_49427; } else { } tmp___1 = drm_can_sleep___4(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_49429: tmp___2 = g4x_reset_complete(dev); if (tmp___2 == 0) { goto ldv_49428; } else { } ldv_49427: ; return (ret__); } } static int g4x_do_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; uint32_t tmp___3 ; unsigned long timeout_____0 ; unsigned long tmp___4 ; int ret_____0 ; int tmp___5 ; bool tmp___6 ; int tmp___7 ; uint32_t tmp___8 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pci_write_config_byte((struct pci_dev const *)dev->pdev, 192, 5); tmp = msecs_to_jiffies(500U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_49446; ldv_49445: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = g4x_reset_complete(dev); if (tmp___0 == 0) { ret__ = -110; } else { } goto ldv_49444; } else { } tmp___1 = drm_can_sleep___4(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_49446: tmp___2 = g4x_reset_complete(dev); if (tmp___2 == 0) { goto ldv_49445; } else { } ldv_49444: ret = ret__; if (ret != 0) { return (ret); } else { } tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 25100L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25100L, tmp___3 | 16U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 25100L, 0); pci_write_config_byte((struct pci_dev const *)dev->pdev, 192, 13); tmp___4 = msecs_to_jiffies(500U); timeout_____0 = (tmp___4 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_49458; ldv_49457: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___5 = g4x_reset_complete(dev); if (tmp___5 == 0) { ret_____0 = -110; } else { } goto ldv_49456; } else { } tmp___6 = drm_can_sleep___4(); if ((int )tmp___6) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_49458: tmp___7 = g4x_reset_complete(dev); if (tmp___7 == 0) { goto ldv_49457; } else { } ldv_49456: ret = ret_____0; if (ret != 0) { return (ret); } else { } tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 25100L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 25100L, tmp___8 & 4294967279U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 25100L, 0); pci_write_config_byte((struct pci_dev const *)dev->pdev, 192, 0); return (0); } } static int ironlake_do_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; unsigned long timeout_____0 ; unsigned long tmp___3 ; int ret_____0 ; uint32_t tmp___4 ; bool tmp___5 ; uint32_t tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 76964L, 3U, 1); tmp = msecs_to_jiffies(500U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_49475; ldv_49474: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 76964L, 1); if ((int )tmp___0 & 1) { ret__ = -110; } else { } goto ldv_49473; } else { } tmp___1 = drm_can_sleep___4(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_49475: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 76964L, 1); if ((int )tmp___2 & 1) { goto ldv_49474; } else { } ldv_49473: ret = ret__; if (ret != 0) { return (ret); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 76964L, 7U, 1); tmp___3 = msecs_to_jiffies(500U); timeout_____0 = (tmp___3 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_49487; ldv_49486: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 76964L, 1); if ((int )tmp___4 & 1) { ret_____0 = -110; } else { } goto ldv_49485; } else { } tmp___5 = drm_can_sleep___4(); if ((int )tmp___5) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_49487: tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 76964L, 1); if ((int )tmp___6 & 1) { goto ldv_49486; } else { } ldv_49485: ret = ret_____0; if (ret != 0) { return (ret); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 76964L, 0U, 1); return (0); } } static int gen6_do_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; unsigned int tmp___0 ; bool tmp___1 ; unsigned int tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; writel(1U, (void volatile *)dev_priv->regs + 37916U); tmp = msecs_to_jiffies(500U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_49504; ldv_49503: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = readl((void const volatile *)dev_priv->regs + 37916U); if ((int )tmp___0 & 1) { ret__ = -110; } else { } goto ldv_49502; } else { } tmp___1 = drm_can_sleep___4(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_49504: tmp___2 = readl((void const volatile *)dev_priv->regs + 37916U); if ((int )tmp___2 & 1) { goto ldv_49503; } else { } ldv_49502: ret = ret__; intel_uncore_forcewake_reset(dev, 1); return (ret); } } int intel_gpu_reset(struct drm_device *dev ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 5U) { tmp = gen6_do_reset(dev); return (tmp); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 5U) { tmp___0 = ironlake_do_reset(dev); return (tmp___0); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { tmp___1 = g4x_do_reset(dev); return (tmp___1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { tmp___2 = g33_do_reset(dev); return (tmp___2); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 2U) { tmp___3 = i915_do_reset(dev); return (tmp___3); } else { return (-19); } } } } } } } void intel_uncore_check_errors(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; unsigned int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 47UL) != 0U) { tmp = readl((void const volatile *)dev_priv->regs + 271104U); if ((int )tmp < 0) { drm_err("Unclaimed register before interrupt\n"); writel(2147483648U, (void volatile *)dev_priv->regs + 271104U); } else { } } else { } return; } } void activate_suitable_timer_21(struct timer_list *timer , unsigned long data ) { { if (ldv_timer_21_0 == 0 || ldv_timer_21_0 == 2) { ldv_timer_list_21_0 = timer; ldv_timer_list_21_0->data = data; ldv_timer_21_0 = 1; return; } else { } if (ldv_timer_21_1 == 0 || ldv_timer_21_1 == 2) { ldv_timer_list_21_1 = timer; ldv_timer_list_21_1->data = data; ldv_timer_21_1 = 1; return; } else { } if (ldv_timer_21_2 == 0 || ldv_timer_21_2 == 2) { ldv_timer_list_21_2 = timer; ldv_timer_list_21_2->data = data; ldv_timer_21_2 = 1; return; } else { } if (ldv_timer_21_3 == 0 || ldv_timer_21_3 == 2) { ldv_timer_list_21_3 = timer; ldv_timer_list_21_3->data = data; ldv_timer_21_3 = 1; return; } else { } return; } } void activate_pending_timer_21(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_21_0 == (unsigned long )timer) { if (ldv_timer_21_0 == 2 || pending_flag != 0) { ldv_timer_list_21_0 = timer; ldv_timer_list_21_0->data = data; ldv_timer_21_0 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_21_1 == (unsigned long )timer) { if (ldv_timer_21_1 == 2 || pending_flag != 0) { ldv_timer_list_21_1 = timer; ldv_timer_list_21_1->data = data; ldv_timer_21_1 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_21_2 == (unsigned long )timer) { if (ldv_timer_21_2 == 2 || pending_flag != 0) { ldv_timer_list_21_2 = timer; ldv_timer_list_21_2->data = data; ldv_timer_21_2 = 1; } else { } return; } else { } if ((unsigned long )ldv_timer_list_21_3 == (unsigned long )timer) { if (ldv_timer_21_3 == 2 || pending_flag != 0) { ldv_timer_list_21_3 = timer; ldv_timer_list_21_3->data = data; ldv_timer_21_3 = 1; } else { } return; } else { } activate_suitable_timer_21(timer, data); return; } } void choose_timer_21(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_timer_21_0 == 1) { ldv_timer_21_0 = 2; ldv_timer_21(ldv_timer_21_0, ldv_timer_list_21_0); } else { } goto ldv_49562; case 1: ; if (ldv_timer_21_1 == 1) { ldv_timer_21_1 = 2; ldv_timer_21(ldv_timer_21_1, ldv_timer_list_21_1); } else { } goto ldv_49562; case 2: ; if (ldv_timer_21_2 == 1) { ldv_timer_21_2 = 2; ldv_timer_21(ldv_timer_21_2, ldv_timer_list_21_2); } else { } goto ldv_49562; case 3: ; if (ldv_timer_21_3 == 1) { ldv_timer_21_3 = 2; ldv_timer_21(ldv_timer_21_3, ldv_timer_list_21_3); } else { } goto ldv_49562; default: ldv_stop(); } ldv_49562: ; return; } } int reg_timer_21(struct timer_list *timer , void (*function)(unsigned long ) , unsigned long data ) { { if ((unsigned long )function == (unsigned long )(& intel_uncore_fw_release_timer)) { activate_suitable_timer_21(timer, data); } else { } return (0); } } void disable_suitable_timer_21(struct timer_list *timer ) { { if (ldv_timer_21_0 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_21_0) { ldv_timer_21_0 = 0; return; } else { } if (ldv_timer_21_1 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_21_1) { ldv_timer_21_1 = 0; return; } else { } if (ldv_timer_21_2 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_21_2) { ldv_timer_21_2 = 0; return; } else { } if (ldv_timer_21_3 != 0 && (unsigned long )timer == (unsigned long )ldv_timer_list_21_3) { ldv_timer_21_3 = 0; return; } else { } return; } } void ldv_timer_21(int state , struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; intel_uncore_fw_release_timer(timer->data); LDV_IN_INTERRUPT = 1; return; } } void timer_init_21(void) { { ldv_timer_21_0 = 0; ldv_timer_21_1 = 0; ldv_timer_21_2 = 0; ldv_timer_21_3 = 0; return; } } bool ldv_queue_work_on_443(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_444(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_445(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_446(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_447(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } int ldv_mod_timer_pinned_448(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___9 ldv_func_res ; int tmp ; { tmp = mod_timer_pinned(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_21(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_del_timer_sync_449(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___10 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_21(ldv_func_arg1); return (ldv_func_res); } } bool ldv_queue_work_on_461(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_463(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_462(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_465(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_464(struct workqueue_struct *ldv_func_arg1 ) ; static u32 const gen6_null_state_relocs[6U] = { 32U, 36U, 44U, 480U, 484U, 4294967295U}; static u32 const gen6_null_state_batch[275U] = { 1761869824U, 2030895105U, 0U, 0U, 2014838784U, 1U, 1627455496U, 0U, 1U, 1U, 0U, 1U, 0U, 1U, 0U, 1U, 1627521024U, 0U, 2013593601U, 24U, 0U, 2014121986U, 0U, 0U, 1056U, 2014642179U, 0U, 0U, 0U, 0U, 2014314500U, 0U, 0U, 0U, 0U, 0U, 2014707715U, 0U, 0U, 0U, 0U, 2014380037U, 0U, 0U, 0U, 0U, 0U, 0U, 2014445570U, 0U, 0U, 0U, 2014773251U, 0U, 0U, 0U, 0U, 2030370821U, 3758358528U, 0U, 0U, 0U, 0U, 0U, 2031091712U, 0U, 2030043138U, 4294967295U, 0U, 0U, 2014183426U, 1089U, 1025U, 1025U, 2013401090U, 0U, 0U, 1024U, 2014511122U, 4196368U, 0U, 536870912U, 67108864U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014576647U, 640U, 134742016U, 0U, 393216U, 1309147138U, 1049600U, 0U, 0U, 2013855749U, 33554432U, 572653568U, 49676288U, 288555008U, 42270724U, 287440896U, 2013335554U, 0U, 0U, 512U, 2013790211U, 8192U, 1096U, 1096U, 0U, 83886080U, 0U, 0U, 0U, 0U, 544U, 576U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 6291546U, 541095870U, 192U, 9240640U, 6291546U, 543193022U, 192U, 9240704U, 6291546U, 545290174U, 208U, 9240640U, 6291546U, 547387326U, 208U, 9240704U, 513U, 537395297U, 0U, 0U, 6291457U, 538968098U, 9240576U, 0U, 41943089U, 566238409U, 32U, 176816129U, 6291457U, 541066174U, 9241024U, 0U, 6291457U, 543163326U, 9241056U, 0U, 6291457U, 545260478U, 9241088U, 0U, 6291457U, 547357630U, 9241120U, 0U, 6291457U, 549454782U, 9241152U, 0U, 6291457U, 551551934U, 9241184U, 0U, 6291457U, 553649086U, 9241216U, 0U, 6291457U, 555746238U, 9241248U, 0U, 92274737U, 603987144U, 64U, 2416021504U, 126U, 0U, 0U, 0U, 126U, 0U, 0U, 0U, 126U, 0U, 0U, 0U, 126U, 0U, 0U, 0U, 126U, 0U, 0U, 0U, 126U, 0U, 0U, 0U, 126U, 0U, 0U, 0U, 126U, 0U, 0U, 0U, 805306368U, 292U, 0U, 0U, 0U, 0U, 0U, 0U, 4187624204U, 2040140556U, 0U, 0U, 0U, 0U, 0U, 0U, 2147483697U, 3U, 0U}; struct intel_renderstate_rodata const gen6_null_state = {(u32 const *)(& gen6_null_state_relocs), (u32 const *)(& gen6_null_state_batch), 275U}; bool ldv_queue_work_on_461(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_462(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_463(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_464(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_465(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_475(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_477(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_476(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_479(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_478(struct workqueue_struct *ldv_func_arg1 ) ; static u32 const gen7_null_state_relocs[5U] = { 12U, 16U, 24U, 492U, 4294967295U}; static u32 const gen7_null_state_batch[240U] = { 1761869824U, 1627455496U, 0U, 1U, 1U, 0U, 1U, 0U, 1U, 0U, 1U, 2030895106U, 0U, 0U, 0U, 2014838784U, 1U, 2031484928U, 8U, 2016411648U, 33620032U, 2016477184U, 67108864U, 2016542720U, 67108864U, 2016608256U, 33554432U, 2014314500U, 0U, 0U, 0U, 0U, 0U, 2015035397U, 0U, 0U, 0U, 0U, 0U, 0U, 2015100930U, 0U, 0U, 0U, 2015166468U, 0U, 0U, 0U, 0U, 0U, 2014380037U, 0U, 0U, 0U, 0U, 0U, 0U, 2014445570U, 0U, 0U, 0U, 2015428608U, 0U, 2014511109U, 0U, 536870912U, 67108864U, 0U, 0U, 0U, 2014576641U, 536872960U, 0U, 2015232001U, 0U, 0U, 2013593605U, 3758358528U, 0U, 0U, 0U, 0U, 0U, 2013528065U, 0U, 0U, 2015625216U, 576U, 2015559680U, 608U, 2016346112U, 640U, 2015297548U, 4196368U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2015363078U, 704U, 134742016U, 0U, 671089666U, 393216U, 0U, 0U, 2013855749U, 33554432U, 572653568U, 49676288U, 287506432U, 49676292U, 287506432U, 2013790211U, 24584U, 832U, 4294967295U, 0U, 2016018432U, 864U, 2030043138U, 4294967295U, 0U, 0U, 2063597573U, 15U, 3U, 0U, 1U, 0U, 0U, 83886080U, 0U, 0U, 0U, 0U, 49U, 3U, 0U, 0U, 0U, 0U, 0U, 0U, 4187624204U, 2040140556U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 1170U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 8388698U, 773879741U, 192U, 9240640U, 8388698U, 778074045U, 208U, 9240640U, 41943089U, 562044841U, 9244192U, 142868481U, 8388609U, 773850045U, 9240960U, 0U, 8388609U, 778044349U, 9241024U, 0U, 8388609U, 782238653U, 9241088U, 0U, 8388609U, 786432957U, 9241152U, 0U, 92274737U, 536879016U, 9244192U, 2416119808U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 896U, 928U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U}; struct intel_renderstate_rodata const gen7_null_state = {(u32 const *)(& gen7_null_state_relocs), (u32 const *)(& gen7_null_state_batch), 240U}; bool ldv_queue_work_on_475(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_476(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_477(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_478(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_479(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_489(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_491(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_490(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_493(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_492(struct workqueue_struct *ldv_func_arg1 ) ; static u32 const gen8_null_state_relocs[5U] = { 1944U, 1956U, 1964U, 1980U, 4294967295U}; static u32 const gen8_null_state_batch[944U] = { 2046820356U, 16777216U, 0U, 0U, 0U, 0U, 1761869824U, 2014576640U, 67108864U, 2015363082U, 0U, 0U, 2147483648U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014511106U, 0U, 0U, 33560584U, 2015297538U, 0U, 0U, 0U, 2018574345U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014314503U, 0U, 0U, 65536U, 0U, 0U, 0U, 0U, 0U, 2015035399U, 0U, 0U, 0U, 0U, 0U, 0U, 2048U, 0U, 2014380040U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2015232003U, 0U, 0U, 0U, 0U, 2015166471U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014445570U, 0U, 0U, 0U, 2018508803U, 0U, 0U, 0U, 0U, 2015100930U, 0U, 0U, 0U, 2014052352U, 0U, 2018639875U, 0U, 0U, 0U, 0U, 2016411648U, 134283328U, 2016477184U, 503316480U, 2016542720U, 503316480U, 2016608256U, 503316480U, 2031681538U, 0U, 0U, 0U, 2031747074U, 0U, 0U, 0U, 2031812610U, 0U, 0U, 0U, 2031222784U, 0U, 2031288320U, 0U, 2031353856U, 0U, 2031419392U, 0U, 2031484928U, 0U, 2014642185U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014904329U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014969865U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014707721U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014773257U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2018050049U, 0U, 0U, 2018115584U, 0U, 2018181120U, 4U, 2031550721U, 0U, 128U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2031616006U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2031616006U, 536870912U, 0U, 0U, 0U, 0U, 0U, 0U, 2031616006U, 1073741824U, 0U, 0U, 0U, 0U, 0U, 0U, 2031616006U, 1610612736U, 0U, 0U, 0U, 0U, 0U, 0U, 1627455502U, 1U, 0U, 0U, 1U, 0U, 1U, 0U, 1U, 0U, 1U, 0U, 4097U, 4097U, 1U, 4097U, 1627521025U, 0U, 0U, 2030043138U, 0U, 0U, 0U, 2013593606U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2030305282U, 0U, 0U, 0U, 2030305282U, 1073741824U, 0U, 0U, 2030305282U, 2147483648U, 0U, 0U, 2030305282U, 3221225472U, 0U, 0U, 2030567425U, 0U, 0U, 2030698497U, 0U, 0U, 2013659139U, 0U, 0U, 0U, 0U, 2013724675U, 0U, 0U, 0U, 0U, 2013528065U, 0U, 0U, 2031157248U, 0U, 2014117888U, 0U, 2030436352U, 0U, 2030501919U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2030174223U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2030829583U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2013921283U, 0U, 0U, 0U, 0U, 2013790339U, 16384U, 0U, 0U, 0U, 67125248U, 0U, 0U, 0U, 134234112U, 0U, 0U, 0U, 201342976U, 0U, 0U, 0U, 268451840U, 0U, 0U, 0U, 335560704U, 0U, 0U, 0U, 402669568U, 0U, 0U, 0U, 469778432U, 0U, 0U, 0U, 536887296U, 0U, 0U, 0U, 603996160U, 0U, 0U, 0U, 671105024U, 0U, 0U, 0U, 738213888U, 0U, 0U, 0U, 805322752U, 0U, 0U, 0U, 872431616U, 0U, 0U, 0U, 939540480U, 0U, 0U, 0U, 1006649344U, 0U, 0U, 0U, 1073758208U, 0U, 0U, 0U, 1140867072U, 0U, 0U, 0U, 1207975936U, 0U, 0U, 0U, 1275084800U, 0U, 0U, 0U, 1342193664U, 0U, 0U, 0U, 1409302528U, 0U, 0U, 0U, 1476411392U, 0U, 0U, 0U, 1543520256U, 0U, 0U, 0U, 1610629120U, 0U, 0U, 0U, 1677737984U, 0U, 0U, 0U, 1744846848U, 0U, 0U, 0U, 1811955712U, 0U, 0U, 0U, 1879064576U, 0U, 0U, 0U, 1946173440U, 0U, 0U, 0U, 2013282304U, 0U, 0U, 0U, 2080391168U, 0U, 0U, 0U, 2147500032U, 0U, 0U, 0U, 2013855811U, 33554432U, 572653568U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 1745551361U, 2015756288U, 0U, 2015821824U, 0U, 2015887360U, 0U, 2015952896U, 0U, 2016018432U, 0U, 2014183424U, 3521U, 2015625216U, 3585U, 2018443264U, 2147483904U, 2018312192U, 1073741824U, 2016083968U, 0U, 2016149504U, 0U, 2016215040U, 0U, 2016280576U, 0U, 2016346112U, 0U, 2014248960U, 0U, 2015559680U, 3680U, 2015428608U, 3712U, 2063597573U, 4U, 1U, 0U, 1U, 0U, 0U, 83886080U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 1065353216U, 1065353216U, 1065353216U, 1065353216U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U}; struct intel_renderstate_rodata const gen8_null_state = {(u32 const *)(& gen8_null_state_relocs), (u32 const *)(& gen8_null_state_batch), 944U}; bool ldv_queue_work_on_489(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_490(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_491(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_492(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_493(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_503(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_505(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_504(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_507(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_506(struct workqueue_struct *ldv_func_arg1 ) ; static u32 const gen9_null_state_relocs[5U] = { 1960U, 1972U, 1980U, 1996U, 4294967295U}; static u32 const gen9_null_state_batch[960U] = { 2046820356U, 16777216U, 0U, 0U, 0U, 0U, 1761870592U, 2014576640U, 67108864U, 2015363082U, 0U, 0U, 2147483648U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014511106U, 0U, 0U, 33560584U, 2015297540U, 0U, 0U, 0U, 0U, 0U, 2018574345U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014314503U, 0U, 0U, 65536U, 0U, 0U, 0U, 0U, 0U, 2015035399U, 0U, 0U, 0U, 0U, 0U, 0U, 2048U, 0U, 2014380040U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2015232003U, 0U, 0U, 0U, 0U, 2015166473U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014445570U, 0U, 0U, 0U, 2018508803U, 0U, 0U, 0U, 0U, 2015100930U, 0U, 0U, 0U, 2014052352U, 0U, 2018639875U, 0U, 0U, 0U, 0U, 2016411648U, 134283328U, 2016477184U, 503316480U, 2016542720U, 503316480U, 2016608256U, 503316480U, 2031681538U, 0U, 0U, 0U, 2031747074U, 0U, 0U, 0U, 2031812610U, 0U, 0U, 0U, 2031222784U, 0U, 2031288320U, 0U, 2031353856U, 0U, 2031419392U, 0U, 2031484928U, 0U, 2014642185U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014904329U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014969865U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014707721U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2014773257U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2018050049U, 0U, 0U, 2018115584U, 0U, 2018181120U, 4U, 2031550721U, 0U, 128U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2031616006U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2031616006U, 536870912U, 0U, 0U, 0U, 0U, 0U, 0U, 2031616006U, 1073741824U, 0U, 0U, 0U, 0U, 0U, 0U, 2031616006U, 1610612736U, 0U, 0U, 0U, 0U, 0U, 0U, 1627455505U, 1U, 0U, 0U, 1U, 0U, 1U, 0U, 1U, 0U, 1U, 0U, 4097U, 4097U, 1U, 4097U, 0U, 0U, 0U, 1627521025U, 0U, 0U, 2030043138U, 0U, 0U, 0U, 2013593606U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2030305282U, 0U, 0U, 0U, 2030305282U, 1073741824U, 0U, 0U, 2030305282U, 2147483648U, 0U, 0U, 2030305282U, 3221225472U, 0U, 0U, 2030567425U, 0U, 0U, 2030698497U, 0U, 0U, 2013659139U, 0U, 0U, 0U, 0U, 2013724675U, 0U, 0U, 0U, 0U, 2013528065U, 0U, 0U, 2031157248U, 0U, 2014117888U, 0U, 2030436352U, 0U, 2030501919U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2030174223U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2030829583U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2013921283U, 0U, 0U, 0U, 0U, 2013790339U, 16384U, 0U, 0U, 0U, 67125248U, 0U, 0U, 0U, 134234112U, 0U, 0U, 0U, 201342976U, 0U, 0U, 0U, 268451840U, 0U, 0U, 0U, 335560704U, 0U, 0U, 0U, 402669568U, 0U, 0U, 0U, 469778432U, 0U, 0U, 0U, 536887296U, 0U, 0U, 0U, 603996160U, 0U, 0U, 0U, 671105024U, 0U, 0U, 0U, 738213888U, 0U, 0U, 0U, 805322752U, 0U, 0U, 0U, 872431616U, 0U, 0U, 0U, 939540480U, 0U, 0U, 0U, 1006649344U, 0U, 0U, 0U, 1073758208U, 0U, 0U, 0U, 1140867072U, 0U, 0U, 0U, 1207975936U, 0U, 0U, 0U, 1275084800U, 0U, 0U, 0U, 1342193664U, 0U, 0U, 0U, 1409302528U, 0U, 0U, 0U, 1476411392U, 0U, 0U, 0U, 1543520256U, 0U, 0U, 0U, 1610629120U, 0U, 0U, 0U, 1677737984U, 0U, 0U, 0U, 1744846848U, 0U, 0U, 0U, 1811955712U, 0U, 0U, 0U, 1879064576U, 0U, 0U, 0U, 1946173440U, 0U, 0U, 0U, 2013282304U, 0U, 0U, 0U, 2080391168U, 0U, 0U, 0U, 2147500032U, 0U, 0U, 0U, 2013855811U, 33554432U, 572653568U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 2018836483U, 0U, 0U, 0U, 0U, 1745551361U, 2014183424U, 3585U, 2015625216U, 3649U, 2018443264U, 2147483904U, 2018312192U, 1073741824U, 2016083968U, 0U, 2016149504U, 0U, 2016215040U, 0U, 2016280576U, 0U, 2016346112U, 0U, 2014248960U, 0U, 2015559680U, 3744U, 2015428608U, 3776U, 2015756288U, 0U, 2015821824U, 0U, 2015887360U, 0U, 2015952896U, 0U, 2016018432U, 0U, 2063597573U, 4U, 1U, 0U, 1U, 0U, 0U, 83886080U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 1065353216U, 1065353216U, 1065353216U, 1065353216U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U}; struct intel_renderstate_rodata const gen9_null_state = {(u32 const *)(& gen9_null_state_relocs), (u32 const *)(& gen9_null_state_batch), 960U}; bool ldv_queue_work_on_503(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_504(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_505(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_506(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_507(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_517(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_519(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_518(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_521(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_520(struct workqueue_struct *ldv_func_arg1 ) ; extern int component_add(struct device * , struct component_ops const * ) ; extern void component_del(struct device * , struct component_ops const * ) ; void intel_init_audio(struct drm_device *dev ) ; void intel_audio_codec_enable(struct intel_encoder *intel_encoder ) ; void intel_audio_codec_disable(struct intel_encoder *encoder ) ; void i915_audio_component_init(struct drm_i915_private *dev_priv ) ; void i915_audio_component_cleanup(struct drm_i915_private *dev_priv ) ; bool intel_pipe_has_type(struct intel_crtc *crtc , enum intel_output_type type ) ; extern int drm_av_sync_delay(struct drm_connector * , struct drm_display_mode * ) ; extern struct drm_connector *drm_select_eld(struct drm_encoder * , struct drm_display_mode * ) ; __inline static int drm_eld_size(uint8_t const *eld ) { { return (((int )*(eld + 2UL) + 1) * 4); } } static struct __anonstruct_hdmi_audio_clock_444 const hdmi_audio_clock[10U] = { {25175, 0U}, {25200, 65536U}, {27000, 131072U}, {27027, 196608U}, {54000, 262144U}, {54054, 327680U}, {74176, 393216U}, {74250, 458752U}, {148352, 524288U}, {148500, 589824U}}; static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode ) { int i ; long tmp ; long tmp___0 ; { i = 0; goto ldv_48214; ldv_48213: ; if (mode->clock == (int )hdmi_audio_clock[i].clock) { goto ldv_48212; } else { } i = i + 1; ldv_48214: ; if ((unsigned int )i <= 9U) { goto ldv_48213; } else { } ldv_48212: ; if (i == 10) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("audio_config_hdmi_pixel_clock", "HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock); } else { } i = 1; } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("audio_config_hdmi_pixel_clock", "Configuring HDMI audio for pixel clock %d (0x%08x)\n", hdmi_audio_clock[i].clock, hdmi_audio_clock[i].config); } else { } return ((u32 )hdmi_audio_clock[i].config); } } static bool intel_eld_uptodate(struct drm_connector *connector , int reg_eldv , uint32_t bits_eldv , int reg_elda , uint32_t bits_elda , int reg_edid ) { struct drm_i915_private *dev_priv ; uint8_t *eld ; uint32_t tmp ; int i ; uint32_t tmp___0 ; int tmp___1 ; { dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; eld = (uint8_t *)(& connector->eld); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg_eldv, 1); tmp = tmp & bits_eldv; if (tmp == 0U) { return (0); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg_elda, 1); tmp = ~ bits_elda & tmp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg_elda, tmp, 1); i = 0; goto ldv_48231; ldv_48230: tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg_edid, 1); if (tmp___0 != *((uint32_t *)eld + (unsigned long )i)) { return (0); } else { } i = i + 1; ldv_48231: tmp___1 = drm_eld_size((uint8_t const *)eld); if (tmp___1 / 4 > i) { goto ldv_48230; } else { } return (1); } } static void g4x_audio_codec_disable(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; uint32_t eldv ; uint32_t tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("g4x_audio_codec_disable", "Disable audio codec\n"); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 401440U), 1); if (tmp == 2156275713U || tmp == 2156276219U) { eldv = 8192U; } else { eldv = 16384U; } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 401588L, 1); tmp = ~ eldv & tmp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 401588L, tmp, 1); return; } } static void g4x_audio_codec_enable(struct drm_connector *connector , struct intel_encoder *encoder , struct drm_display_mode *mode ) { struct drm_i915_private *dev_priv ; uint8_t *eld ; uint32_t eldv ; uint32_t tmp ; int len ; int i ; long tmp___0 ; bool tmp___1 ; int _min1 ; int tmp___2 ; int _min2 ; long tmp___3 ; { dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; eld = (uint8_t *)(& connector->eld); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("g4x_audio_codec_enable", "Enable audio codec, %u bytes ELD\n", (int )*(eld + 2UL)); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 401440U), 1); if (tmp == 2156275713U || tmp == 2156276219U) { eldv = 8192U; } else { eldv = 16384U; } tmp___1 = intel_eld_uptodate(connector, 401588, eldv, 401588, 480U, 401676); if ((int )tmp___1) { return; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 401588L, 1); tmp = ~ (eldv | 480U) & tmp; len = (int )(tmp >> 9) & 31; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 401588L, tmp, 1); tmp___2 = drm_eld_size((uint8_t const *)eld); _min1 = tmp___2 / 4; _min2 = len; len = _min1 < _min2 ? _min1 : _min2; tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("g4x_audio_codec_enable", "ELD size %d\n", len); } else { } i = 0; goto ldv_48256; ldv_48255: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 401676L, *((uint32_t *)eld + (unsigned long )i), 1); i = i + 1; ldv_48256: ; if (i < len) { goto ldv_48255; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 401588L, 1); tmp = tmp | eldv; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 401588L, tmp, 1); return; } } static void hsw_audio_codec_disable(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; uint32_t tmp ; long tmp___0 ; bool tmp___1 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("hsw_audio_codec_disable", "Disable audio codec on pipe %c\n", (int )pipe + 65); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe + 1616) * 256), 1); tmp = tmp & 3758096383U; tmp = tmp | 268435456U; tmp = tmp & 4027580415U; tmp = tmp & 4294901775U; tmp___1 = intel_pipe_has_type(intel_crtc, 7); if ((int )tmp___1) { tmp = tmp | 536870912U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )pipe + 1616) * 256), tmp, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 413888L, 1); tmp = (uint32_t )(~ (1 << (int )pipe * 4)) & tmp; tmp = (uint32_t )(~ (4 << (int )pipe * 4)) & tmp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 413888L, tmp, 1); return; } } static void hsw_audio_codec_enable(struct drm_connector *connector , struct intel_encoder *encoder , struct drm_display_mode *mode ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; uint8_t const *eld ; uint32_t tmp ; int len ; int i ; int tmp___0 ; long tmp___1 ; int _min1 ; int tmp___2 ; int _min2 ; u32 tmp___3 ; bool tmp___4 ; { dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; eld = (uint8_t const *)(& connector->eld); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { tmp___0 = drm_eld_size(eld); drm_ut_debug_printk("hsw_audio_codec_enable", "Enable audio codec on pipe %c, %u bytes ELD\n", (int )pipe + 65, tmp___0); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 413888L, 1); tmp = (uint32_t )(4 << (int )pipe * 4) | tmp; tmp = (uint32_t )(~ (1 << (int )pipe * 4)) & tmp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 413888L, tmp, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 256 + 413876), 1); tmp = tmp & 4294966303U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 256 + 413876), tmp, 1); tmp___2 = drm_eld_size(eld); _min1 = tmp___2; _min2 = 84; len = _min1 < _min2 ? _min1 : _min2; i = 0; goto ldv_48287; ldv_48286: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 256 + 413776), *((uint32_t *)eld + (unsigned long )i), 1); i = i + 1; ldv_48287: ; if (len / 4 > i) { goto ldv_48286; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 413888L, 1); tmp = (uint32_t )(1 << (int )pipe * 4) | tmp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 413888L, tmp, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe + 1616) * 256), 1); tmp = tmp & 3758096383U; tmp = tmp & 4026531839U; tmp = tmp & 4293984255U; tmp___4 = intel_pipe_has_type(intel_crtc, 7); if ((int )tmp___4) { tmp = tmp | 536870912U; } else { tmp___3 = audio_config_hdmi_pixel_clock(mode); tmp = tmp___3 | tmp; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )pipe + 1616) * 256), tmp, 1); return; } } static void ilk_audio_codec_disable(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; enum port port ; enum pipe pipe ; uint32_t tmp___0 ; uint32_t eldv ; int aud_config ; int aud_cntrl_st2 ; long tmp___1 ; int __ret_warn_on ; long tmp___2 ; long tmp___3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; bool tmp___4 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_dig_port(& encoder->base); intel_dig_port = tmp; port = intel_dig_port->port; pipe = intel_crtc->pipe; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ilk_audio_codec_disable", "Disable audio codec on port %c, pipe %c\n", (unsigned int )port + 65U, (int )pipe + 65); } else { } __ret_warn_on = (unsigned int )port == 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_audio.c", 272, "WARN_ON(port == PORT_A)"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { return; } else { } __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p___0->pch_type == 1U) { aud_config = ((int )pipe + 3616) * 256; aud_cntrl_st2 = 925888; } else { __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { aud_config = ((int )pipe + 7712) * 256; aud_cntrl_st2 = 1974464; } else { aud_config = ((int )pipe + 3664) * 256; aud_cntrl_st2 = 938176; } } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )aud_config, 1); tmp___0 = tmp___0 & 3758096383U; tmp___0 = tmp___0 | 268435456U; tmp___0 = tmp___0 & 4027580415U; tmp___0 = tmp___0 & 4294901775U; tmp___4 = intel_pipe_has_type(intel_crtc, 7); if ((int )tmp___4) { tmp___0 = tmp___0 | 536870912U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )aud_config, tmp___0, 1); eldv = (uint32_t )(1 << (int )(((unsigned int )port + 1073741823U) * 4U)); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )aud_cntrl_st2, 1); tmp___0 = ~ eldv & tmp___0; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )aud_cntrl_st2, tmp___0, 1); return; } } static void ilk_audio_codec_enable(struct drm_connector *connector , struct intel_encoder *encoder , struct drm_display_mode *mode ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; enum port port ; enum pipe pipe ; uint8_t *eld ; uint32_t eldv ; uint32_t tmp___0 ; int len ; int i ; int hdmiw_hdmiedid ; int aud_config ; int aud_cntl_st ; int aud_cntrl_st2 ; int tmp___1 ; long tmp___2 ; int __ret_warn_on ; long tmp___3 ; long tmp___4 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int _min1 ; int tmp___5 ; int _min2 ; u32 tmp___6 ; bool tmp___7 ; { dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_dig_port(& encoder->base); intel_dig_port = tmp; port = intel_dig_port->port; pipe = intel_crtc->pipe; eld = (uint8_t *)(& connector->eld); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { tmp___1 = drm_eld_size((uint8_t const *)eld); drm_ut_debug_printk("ilk_audio_codec_enable", "Enable audio codec on port %c, pipe %c, %u bytes ELD\n", (unsigned int )port + 65U, (int )pipe + 65, tmp___1); } else { } __ret_warn_on = (unsigned int )port == 0U; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_audio.c", 326, "WARN_ON(port == PORT_A)"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { return; } else { } __p___0 = to_i915((struct drm_device const *)connector->dev); if ((unsigned int )__p___0->pch_type == 1U) { hdmiw_hdmiedid = (int )pipe * 256 + 925776; aud_config = ((int )pipe + 3616) * 256; aud_cntl_st = (int )pipe * 256 + 925876; aud_cntrl_st2 = 925888; } else { __p = to_i915((struct drm_device const *)connector->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { hdmiw_hdmiedid = (int )pipe * 256 + 1974352; aud_config = ((int )pipe + 7712) * 256; aud_cntl_st = (int )pipe * 256 + 1974452; aud_cntrl_st2 = 1974464; } else { hdmiw_hdmiedid = (int )pipe * 256 + 938064; aud_config = ((int )pipe + 3664) * 256; aud_cntl_st = (int )pipe * 256 + 938164; aud_cntrl_st2 = 938176; } } eldv = (uint32_t )(1 << (int )(((unsigned int )port + 1073741823U) * 4U)); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )aud_cntrl_st2, 1); tmp___0 = ~ eldv & tmp___0; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )aud_cntrl_st2, tmp___0, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )aud_cntl_st, 1); tmp___0 = tmp___0 & 4294966303U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )aud_cntl_st, tmp___0, 1); tmp___5 = drm_eld_size((uint8_t const *)eld); _min1 = tmp___5; _min2 = 84; len = _min1 < _min2 ? _min1 : _min2; i = 0; goto ldv_48358; ldv_48357: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )hdmiw_hdmiedid, *((uint32_t *)eld + (unsigned long )i), 1); i = i + 1; ldv_48358: ; if (len / 4 > i) { goto ldv_48357; } else { } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )aud_cntrl_st2, 1); tmp___0 = tmp___0 | eldv; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )aud_cntrl_st2, tmp___0, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )aud_config, 1); tmp___0 = tmp___0 & 3758096383U; tmp___0 = tmp___0 & 4026531839U; tmp___0 = tmp___0 & 4293984255U; tmp___7 = intel_pipe_has_type(intel_crtc, 7); if ((int )tmp___7) { tmp___0 = tmp___0 | 536870912U; } else { tmp___6 = audio_config_hdmi_pixel_clock(mode); tmp___0 = tmp___6 | tmp___0; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )aud_config, tmp___0, 1); return; } } void intel_audio_codec_enable(struct intel_encoder *intel_encoder ) { struct drm_encoder *encoder ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode *mode ; struct drm_connector *connector ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; long tmp ; bool tmp___0 ; int tmp___1 ; { encoder = & intel_encoder->base; __mptr = (struct drm_crtc const *)encoder->crtc; crtc = (struct intel_crtc *)__mptr; mode = & (crtc->config)->base.adjusted_mode; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; connector = drm_select_eld(encoder, mode); if ((unsigned long )connector == (unsigned long )((struct drm_connector *)0)) { return; } else { } tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_audio_codec_enable", "ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, (connector->encoder)->base.id, (connector->encoder)->name); } else { } connector->eld[5] = (unsigned int )connector->eld[5] & 243U; tmp___0 = intel_pipe_has_type(crtc, 7); if ((int )tmp___0) { connector->eld[5] = (uint8_t )((unsigned int )connector->eld[5] | 4U); } else { } tmp___1 = drm_av_sync_delay(connector, mode); connector->eld[6] = (uint8_t )(tmp___1 / 2); if ((unsigned long )dev_priv->display.audio_codec_enable != (unsigned long )((void (*)(struct drm_connector * , struct intel_encoder * , struct drm_display_mode * ))0)) { (*(dev_priv->display.audio_codec_enable))(connector, intel_encoder, mode); } else { } return; } } void intel_audio_codec_disable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->display.audio_codec_disable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(dev_priv->display.audio_codec_disable))(encoder); } else { } return; } } void intel_init_audio(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) != 0U) { dev_priv->display.audio_codec_enable = & g4x_audio_codec_enable; dev_priv->display.audio_codec_disable = & g4x_audio_codec_disable; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { dev_priv->display.audio_codec_enable = & ilk_audio_codec_enable; dev_priv->display.audio_codec_disable = & ilk_audio_codec_disable; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { dev_priv->display.audio_codec_enable = & hsw_audio_codec_enable; dev_priv->display.audio_codec_disable = & hsw_audio_codec_disable; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 7U) { dev_priv->display.audio_codec_enable = & hsw_audio_codec_enable; dev_priv->display.audio_codec_disable = & hsw_audio_codec_disable; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { dev_priv->display.audio_codec_enable = & ilk_audio_codec_enable; dev_priv->display.audio_codec_disable = & ilk_audio_codec_disable; } else { } } } } } return; } } static void i915_audio_component_get_power(struct device *dev ) { struct drm_i915_private *tmp ; { tmp = dev_to_i915(dev); intel_display_power_get(tmp, 22); return; } } static void i915_audio_component_put_power(struct device *dev ) { struct drm_i915_private *tmp ; { tmp = dev_to_i915(dev); intel_display_power_put(tmp, 22); return; } } static void i915_audio_component_codec_wake_override(struct device *dev , bool enable ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; u32 tmp___0 ; struct drm_i915_private *__p ; { tmp = dev_to_i915(dev); dev_priv = tmp; __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { return; } else { } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 417552L, 1); tmp___0 = tmp___0 & 4294934527U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 417552L, tmp___0, 1); usleep_range(1000UL, 1500UL); if ((int )enable) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 417552L, 1); tmp___0 = tmp___0 | 32768U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 417552L, tmp___0, 1); usleep_range(1000UL, 1500UL); } else { } return; } } static int i915_audio_component_get_cdclk_freq(struct device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; int ret ; bool __warned ; int __ret_warn_once ; struct drm_i915_private *__p ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp = dev_to_i915(dev); dev_priv = tmp; __p = dev_priv; __ret_warn_once = (unsigned int )*((unsigned char *)__p + 46UL) == 0U; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_audio.c", 505, "WARN_ON_ONCE(!HAS_DDI(dev_priv))"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } tmp___3 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___3 != 0L) { return (-19); } else { } intel_display_power_get(dev_priv, 22); ret = (*(dev_priv->display.get_display_clock_speed))(dev_priv->dev); intel_display_power_put(dev_priv, 22); return (ret); } } static struct i915_audio_component_ops const i915_audio_component_ops = {& __this_module, & i915_audio_component_get_power, & i915_audio_component_put_power, & i915_audio_component_codec_wake_override, & i915_audio_component_get_cdclk_freq}; static int i915_audio_component_bind(struct device *i915_dev , struct device *hda_dev , void *data ) { struct i915_audio_component *acomp ; int __ret_warn_on ; long tmp ; long tmp___0 ; { acomp = (struct i915_audio_component *)data; __ret_warn_on = (unsigned long )acomp->ops != (unsigned long )((struct i915_audio_component_ops const *)0) || (unsigned long )acomp->dev != (unsigned long )((struct device *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_audio.c", 529, "WARN_ON(acomp->ops || acomp->dev)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-17); } else { } acomp->ops = & i915_audio_component_ops; acomp->dev = i915_dev; return (0); } } static void i915_audio_component_unbind(struct device *i915_dev , struct device *hda_dev , void *data ) { struct i915_audio_component *acomp ; { acomp = (struct i915_audio_component *)data; acomp->ops = (struct i915_audio_component_ops const *)0; acomp->dev = (struct device *)0; return; } } static struct component_ops const i915_audio_component_bind_ops = {& i915_audio_component_bind, & i915_audio_component_unbind}; void i915_audio_component_init(struct drm_i915_private *dev_priv ) { int ret ; { ret = component_add((dev_priv->dev)->dev, & i915_audio_component_bind_ops); if (ret < 0) { drm_err("failed to add audio component (%d)\n", ret); return; } else { } dev_priv->audio_component_registered = 1; return; } } void i915_audio_component_cleanup(struct drm_i915_private *dev_priv ) { { if (! dev_priv->audio_component_registered) { return; } else { } component_del((dev_priv->dev)->dev, & i915_audio_component_bind_ops); dev_priv->audio_component_registered = 0; return; } } int ldv_retval_5 ; void ldv_initialize_component_ops_73(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1416UL); i915_audio_component_bind_ops_group0 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1416UL); i915_audio_component_bind_ops_group1 = (struct device *)tmp___0; return; } } void ldv_initialize_i915_audio_component_ops_74(void) { void *tmp ; { tmp = ldv_init_zalloc(1416UL); i915_audio_component_ops_group0 = (struct device *)tmp; return; } } void ldv_main_exported_74(void) { bool ldvarg291 ; int tmp ; { ldv_memset((void *)(& ldvarg291), 0, 1UL); tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_74 == 1) { i915_audio_component_get_power(i915_audio_component_ops_group0); ldv_state_variable_74 = 1; } else { } goto ldv_48480; case 1: ; if (ldv_state_variable_74 == 1) { i915_audio_component_get_cdclk_freq(i915_audio_component_ops_group0); ldv_state_variable_74 = 1; } else { } goto ldv_48480; case 2: ; if (ldv_state_variable_74 == 1) { i915_audio_component_put_power(i915_audio_component_ops_group0); ldv_state_variable_74 = 1; } else { } goto ldv_48480; case 3: ; if (ldv_state_variable_74 == 1) { i915_audio_component_codec_wake_override(i915_audio_component_ops_group0, (int )ldvarg291); ldv_state_variable_74 = 1; } else { } goto ldv_48480; default: ldv_stop(); } ldv_48480: ; return; } } void ldv_main_exported_73(void) { void *ldvarg125 ; void *tmp ; void *ldvarg124 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg125 = tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg124 = tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_73 == 2) { i915_audio_component_unbind(i915_audio_component_bind_ops_group1, i915_audio_component_bind_ops_group0, ldvarg125); ldv_state_variable_73 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48491; case 1: ; if (ldv_state_variable_73 == 1) { ldv_retval_5 = i915_audio_component_bind(i915_audio_component_bind_ops_group1, i915_audio_component_bind_ops_group0, ldvarg124); if (ldv_retval_5 == 0) { ldv_state_variable_73 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_48491; default: ldv_stop(); } ldv_48491: ; return; } } bool ldv_queue_work_on_517(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_518(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_519(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_520(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_521(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern void *kmemdup(void const * , size_t , gfp_t ) ; bool ldv_queue_work_on_531(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_533(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_532(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_535(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_534(struct workqueue_struct *ldv_func_arg1 ) ; extern int dmi_check_system(struct dmi_system_id const * ) ; extern void *pci_map_rom(struct pci_dev * , size_t * ) ; extern void pci_unmap_rom(struct pci_dev * , void * ) ; extern void drm_mode_debug_printmodeline(struct drm_display_mode const * ) ; extern void drm_mode_set_name(struct drm_display_mode * ) ; void intel_setup_bios(struct drm_device *dev ) ; int intel_parse_bios(struct drm_device *dev ) ; bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv , unsigned int pin ) ; static int panel_type ; static void const *find_section(void const *_bdb , int section_id ) { struct bdb_header const *bdb ; u8 const *base ; int index ; u16 total ; u16 current_size ; u8 current_id ; { bdb = (struct bdb_header const *)_bdb; base = (u8 const *)_bdb; index = 0; index = (int )bdb->header_size + index; total = bdb->bdb_size; goto ldv_48103; ldv_48102: current_id = *(base + (unsigned long )index); index = index + 1; current_size = *((u16 const *)base + (unsigned long )index); index = index + 2; if ((int )current_size + index > (int )total) { return ((void const *)0); } else { } if ((int )current_id == section_id) { return ((void const *)base + (unsigned long )index); } else { } index = (int )current_size + index; ldv_48103: ; if (index + 3 < (int )total) { goto ldv_48102; } else { } return ((void const *)0); } } static u16 get_blocksize(void const *p ) { u16 *block_ptr ; u16 block_size ; { block_ptr = (u16 *)p + 0xfffffffffffffffeUL; block_size = *block_ptr; return (block_size); } } static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode , struct lvds_dvo_timing const *dvo_timing ) { { panel_fixed_mode->hdisplay = ((int )dvo_timing->hactive_hi << 8) | (int )dvo_timing->hactive_lo; panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + (((int )dvo_timing->hsync_off_hi << 8) | (int )dvo_timing->hsync_off_lo); panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + (int )dvo_timing->hsync_pulse_width; panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + (((int )dvo_timing->hblank_hi << 8) | (int )dvo_timing->hblank_lo); panel_fixed_mode->vdisplay = ((int )dvo_timing->vactive_hi << 8) | (int )dvo_timing->vactive_lo; panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + (int )dvo_timing->vsync_off; panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + (int )dvo_timing->vsync_pulse_width; panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + (((int )dvo_timing->vblank_hi << 8) | (int )dvo_timing->vblank_lo); panel_fixed_mode->clock = (int )dvo_timing->clock * 10; panel_fixed_mode->type = 8U; if ((unsigned int )*((unsigned char *)dvo_timing + 17UL) != 0U) { panel_fixed_mode->flags = panel_fixed_mode->flags | 1U; } else { panel_fixed_mode->flags = panel_fixed_mode->flags | 2U; } if ((unsigned int )*((unsigned char *)dvo_timing + 17UL) != 0U) { panel_fixed_mode->flags = panel_fixed_mode->flags | 4U; } else { panel_fixed_mode->flags = panel_fixed_mode->flags | 8U; } if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) { panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; } else { } if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) { panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; } else { } drm_mode_set_name(panel_fixed_mode); return; } } static bool lvds_dvo_timing_equal_size(struct lvds_dvo_timing const *a , struct lvds_dvo_timing const *b ) { { if ((int const )a->hactive_hi != (int const )b->hactive_hi || (int )((unsigned char )a->hactive_lo) != (int )((unsigned char )b->hactive_lo)) { return (0); } else { } if ((int const )a->hsync_off_hi != (int const )b->hsync_off_hi || (int )((unsigned char )a->hsync_off_lo) != (int )((unsigned char )b->hsync_off_lo)) { return (0); } else { } if ((int )((unsigned char )a->hsync_pulse_width) != (int )((unsigned char )b->hsync_pulse_width)) { return (0); } else { } if ((int const )a->hblank_hi != (int const )b->hblank_hi || (int )((unsigned char )a->hblank_lo) != (int )((unsigned char )b->hblank_lo)) { return (0); } else { } if ((int const )a->vactive_hi != (int const )b->vactive_hi || (int )((unsigned char )a->vactive_lo) != (int )((unsigned char )b->vactive_lo)) { return (0); } else { } if ((int const )a->vsync_off != (int const )b->vsync_off) { return (0); } else { } if ((int const )a->vsync_pulse_width != (int const )b->vsync_pulse_width) { return (0); } else { } if ((int const )a->vblank_hi != (int const )b->vblank_hi || (int )((unsigned char )a->vblank_lo) != (int )((unsigned char )b->vblank_lo)) { return (0); } else { } return (1); } } static struct lvds_dvo_timing const *get_lvds_dvo_timing(struct bdb_lvds_lfp_data const *lvds_lfp_data , struct bdb_lvds_lfp_data_ptrs const *lvds_lfp_data_ptrs , int index ) { int lfp_data_size ; int dvo_timing_offset ; char *entry ; { lfp_data_size = (int )lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - (int )lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; dvo_timing_offset = (int )lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - (int )lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; entry = (char *)(& lvds_lfp_data->data) + (unsigned long )(lfp_data_size * index); return ((struct lvds_dvo_timing const *)entry + (unsigned long )dvo_timing_offset); } } static struct lvds_fp_timing const *get_lvds_fp_timing(struct bdb_header const *bdb , struct bdb_lvds_lfp_data const *data , struct bdb_lvds_lfp_data_ptrs const *ptrs , int index ) { size_t data_ofs ; u16 data_size ; size_t ofs ; { data_ofs = (size_t )((long )data - (long )bdb); data_size = *((u16 const *)data + 0xffffffffffffffffUL); if ((unsigned int )index > 15U) { return ((struct lvds_fp_timing const *)0); } else { } ofs = (size_t )ptrs->ptr[index].fp_timing_offset; if (ofs < data_ofs || ofs + 46UL > (size_t )data_size + data_ofs) { return ((struct lvds_fp_timing const *)0); } else { } return ((struct lvds_fp_timing const *)bdb + ofs); } } static void parse_lfp_panel_data(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct bdb_lvds_options const *lvds_options ; struct bdb_lvds_lfp_data const *lvds_lfp_data ; struct bdb_lvds_lfp_data_ptrs const *lvds_lfp_data_ptrs ; struct lvds_dvo_timing const *panel_dvo_timing ; struct lvds_fp_timing const *fp_timing ; struct drm_display_mode *panel_fixed_mode ; int i ; int downclock ; int drrs_mode ; void const *tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; void const *tmp___3 ; void const *tmp___4 ; void *tmp___5 ; long tmp___6 ; struct lvds_dvo_timing const *dvo_timing ; bool tmp___7 ; long tmp___8 ; long tmp___9 ; { tmp = find_section((void const *)bdb, 40); lvds_options = (struct bdb_lvds_options const *)tmp; if ((unsigned long )lvds_options == (unsigned long )((struct bdb_lvds_options const *)0)) { return; } else { } dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; if ((unsigned int )((unsigned char )lvds_options->panel_type) == 255U) { return; } else { } panel_type = (int )lvds_options->panel_type; drrs_mode = (int )(lvds_options->dps_panel_type_bits >> panel_type * 2) & 3; switch (drrs_mode) { case 0: dev_priv->vbt.drrs_type = 1; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_lfp_panel_data", "DRRS supported mode is static\n"); } else { } goto ldv_48152; case 2: dev_priv->vbt.drrs_type = 2; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("parse_lfp_panel_data", "DRRS supported mode is seamless\n"); } else { } goto ldv_48152; default: dev_priv->vbt.drrs_type = 0; tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("parse_lfp_panel_data", "DRRS not supported (VBT input)\n"); } else { } goto ldv_48152; } ldv_48152: tmp___3 = find_section((void const *)bdb, 42); lvds_lfp_data = (struct bdb_lvds_lfp_data const *)tmp___3; if ((unsigned long )lvds_lfp_data == (unsigned long )((struct bdb_lvds_lfp_data const *)0)) { return; } else { } tmp___4 = find_section((void const *)bdb, 41); lvds_lfp_data_ptrs = (struct bdb_lvds_lfp_data_ptrs const *)tmp___4; if ((unsigned long )lvds_lfp_data_ptrs == (unsigned long )((struct bdb_lvds_lfp_data_ptrs const *)0)) { return; } else { } dev_priv->vbt.lvds_vbt = 1U; panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, (int )lvds_options->panel_type); tmp___5 = kzalloc(208UL, 208U); panel_fixed_mode = (struct drm_display_mode *)tmp___5; if ((unsigned long )panel_fixed_mode == (unsigned long )((struct drm_display_mode *)0)) { return; } else { } fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("parse_lfp_panel_data", "Found panel mode in BIOS VBT tables:\n"); } else { } drm_mode_debug_printmodeline((struct drm_display_mode const *)panel_fixed_mode); downclock = (int )panel_dvo_timing->clock; i = 0; goto ldv_48157; ldv_48156: dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, i); tmp___7 = lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing); if ((int )tmp___7 && (int )dvo_timing->clock < downclock) { downclock = (int )dvo_timing->clock; } else { } i = i + 1; ldv_48157: ; if (i <= 15) { goto ldv_48156; } else { } if ((int )panel_dvo_timing->clock > downclock && i915.lvds_downclock != 0U) { dev_priv->lvds_downclock_avail = 1; dev_priv->lvds_downclock = downclock * 10; tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("parse_lfp_panel_data", "LVDS downclock is found in VBT. Normal Clock %dKHz, downclock %dKHz\n", panel_fixed_mode->clock, downclock * 10); } else { } } else { } fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, lvds_lfp_data_ptrs, (int )lvds_options->panel_type); if ((unsigned long )fp_timing != (unsigned long )((struct lvds_fp_timing const *)0)) { if ((int )fp_timing->x_res == panel_fixed_mode->hdisplay && (int )fp_timing->y_res == panel_fixed_mode->vdisplay) { dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val; tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("parse_lfp_panel_data", "VBT initial LVDS value %x\n", dev_priv->vbt.bios_lvds_val); } else { } } else { } } else { } return; } } static void parse_lfp_backlight(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct bdb_lfp_backlight_data const *backlight_data ; struct bdb_lfp_backlight_data_entry const *entry ; void const *tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = find_section((void const *)bdb, 43); backlight_data = (struct bdb_lfp_backlight_data const *)tmp; if ((unsigned long )backlight_data == (unsigned long )((struct bdb_lfp_backlight_data const *)0)) { return; } else { } if ((unsigned int )((unsigned char )backlight_data->entry_size) != 6U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_lfp_backlight", "Unsupported backlight data entry size %u\n", (int )backlight_data->entry_size); } else { } return; } else { } entry = (struct bdb_lfp_backlight_data_entry const *)(& backlight_data->data) + (unsigned long )panel_type; dev_priv->vbt.backlight.present = (unsigned int )*((unsigned char *)entry + 0UL) == 2U; if (! dev_priv->vbt.backlight.present) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("parse_lfp_backlight", "PWM backlight not present in VBT (type %u)\n", (int )entry->type); } else { } return; } else { } dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.active_low_pwm = (int )entry->active_low_pwm != 0; dev_priv->vbt.backlight.min_brightness = entry->min_brightness; tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("parse_lfp_backlight", "VBT backlight PWM modulation frequency %u Hz, active %s, min brightness %u, level %u\n", (int )dev_priv->vbt.backlight.pwm_freq_hz, (int )dev_priv->vbt.backlight.active_low_pwm ? (char *)"low" : (char *)"high", (int )dev_priv->vbt.backlight.min_brightness, (int )backlight_data->level[panel_type]); } else { } return; } } static void parse_sdvo_panel_data(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct lvds_dvo_timing const *dvo_timing ; struct drm_display_mode *panel_fixed_mode ; int index ; long tmp ; struct bdb_sdvo_lvds_options const *sdvo_lvds_options ; void const *tmp___0 ; void const *tmp___1 ; void *tmp___2 ; long tmp___3 ; { index = i915.vbt_sdvo_panel_type; if (index == -2) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("parse_sdvo_panel_data", "Ignore SDVO panel mode from BIOS VBT tables.\n"); } else { } return; } else { } if (index == -1) { tmp___0 = find_section((void const *)bdb, 22); sdvo_lvds_options = (struct bdb_sdvo_lvds_options const *)tmp___0; if ((unsigned long )sdvo_lvds_options == (unsigned long )((struct bdb_sdvo_lvds_options const *)0)) { return; } else { } index = (int )sdvo_lvds_options->panel_type; } else { } tmp___1 = find_section((void const *)bdb, 23); dvo_timing = (struct lvds_dvo_timing const *)tmp___1; if ((unsigned long )dvo_timing == (unsigned long )((struct lvds_dvo_timing const *)0)) { return; } else { } tmp___2 = kzalloc(208UL, 208U); panel_fixed_mode = (struct drm_display_mode *)tmp___2; if ((unsigned long )panel_fixed_mode == (unsigned long )((struct drm_display_mode *)0)) { return; } else { } fill_detail_timing_data(panel_fixed_mode, dvo_timing + (unsigned long )index); dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode; tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("parse_sdvo_panel_data", "Found SDVO panel mode in BIOS VBT tables:\n"); } else { } drm_mode_debug_printmodeline((struct drm_display_mode const *)panel_fixed_mode); return; } } static int intel_bios_ssc_frequency(struct drm_device *dev , bool alternate ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); switch ((int )__p->info.gen) { case 2: ; return ((int )alternate ? 66667 : 48000); case 3: ; case 4: ; return ((int )alternate ? 100000 : 96000); default: ; return ((int )alternate ? 100000 : 120000); } } } static void parse_general_features(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct drm_device *dev ; struct bdb_general_features const *general ; void const *tmp ; long tmp___0 ; { dev = dev_priv->dev; tmp = find_section((void const *)bdb, 1); general = (struct bdb_general_features const *)tmp; if ((unsigned long )general != (unsigned long )((struct bdb_general_features const *)0)) { dev_priv->vbt.int_tv_support = general->int_tv_support; dev_priv->vbt.int_crt_support = general->int_crt_support; dev_priv->vbt.lvds_use_ssc = general->enable_ssc; dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, (unsigned int )*((unsigned char *)general + 1UL) != 0U); dev_priv->vbt.display_clock_mode = general->display_clock_mode; dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_general_features", "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", (int )dev_priv->vbt.int_tv_support, (int )dev_priv->vbt.int_crt_support, (int )dev_priv->vbt.lvds_use_ssc, dev_priv->vbt.lvds_ssc_freq, (int )dev_priv->vbt.display_clock_mode, (int )dev_priv->vbt.fdi_rx_polarity_inverted); } else { } } else { } return; } } static void parse_general_definitions(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct bdb_general_definitions const *general ; void const *tmp ; u16 block_size ; u16 tmp___0 ; int bus_pin ; long tmp___1 ; bool tmp___2 ; long tmp___3 ; { tmp = find_section((void const *)bdb, 2); general = (struct bdb_general_definitions const *)tmp; if ((unsigned long )general != (unsigned long )((struct bdb_general_definitions const *)0)) { tmp___0 = get_blocksize((void const *)general); block_size = tmp___0; if ((unsigned int )block_size > 4U) { bus_pin = (int )general->crt_ddc_gmbus_pin; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("parse_general_definitions", "crt_ddc_bus_pin: %d\n", bus_pin); } else { } tmp___2 = intel_gmbus_is_valid_pin(dev_priv, (unsigned int )bus_pin); if ((int )tmp___2) { dev_priv->vbt.crt_ddc_pin = bus_pin; } else { } } else { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("parse_general_definitions", "BDB_GD too small (%d). Invalid.\n", (int )block_size); } else { } } } else { } return; } } static union child_device_config const *child_device_ptr(struct bdb_general_definitions const *p_defs , int i ) { { return ((union child_device_config const *)(& p_defs->devices) + (unsigned long )((int )p_defs->child_dev_size * i)); } } static void parse_sdvo_device_mapping(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct sdvo_device_mapping *p_mapping ; struct bdb_general_definitions const *p_defs ; union child_device_config const *p_child ; int i ; int child_device_num ; int count ; u16 block_size ; void const *tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; { tmp = find_section((void const *)bdb, 2); p_defs = (struct bdb_general_definitions const *)tmp; if ((unsigned long )p_defs == (unsigned long )((struct bdb_general_definitions const *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_sdvo_device_mapping", "No general definition block is found, unable to construct sdvo mapping.\n"); } else { } return; } else { } if ((unsigned int )((unsigned char )p_defs->child_dev_size) != 33U) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("parse_sdvo_device_mapping", "different child size is found. Invalid.\n"); } else { } return; } else { } block_size = get_blocksize((void const *)p_defs); child_device_num = (int )(((unsigned long )block_size - 5UL) / (unsigned long )p_defs->child_dev_size); count = 0; i = 0; goto ldv_48222; ldv_48221: p_child = child_device_ptr(p_defs, i); if ((unsigned int )((unsigned short )p_child->old.device_type) == 0U) { goto ldv_48220; } else { } if ((unsigned int )((unsigned char )p_child->old.slave_addr) != 112U && (unsigned int )((unsigned char )p_child->old.slave_addr) != 114U) { goto ldv_48220; } else { } if ((unsigned int )((unsigned char )p_child->old.dvo_port) != 1U && (unsigned int )((unsigned char )p_child->old.dvo_port) != 2U) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("parse_sdvo_device_mapping", "Incorrect SDVO port. Skip it\n"); } else { } goto ldv_48220; } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("parse_sdvo_device_mapping", "the SDVO device with slave addr %2x is found on %s port\n", (int )p_child->old.slave_addr, (unsigned int )((unsigned char )p_child->old.dvo_port) == 1U ? (char *)"SDVOB" : (char *)"SDVOC"); } else { } p_mapping = (struct sdvo_device_mapping *)(& dev_priv->sdvo_mappings) + ((unsigned long )p_child->old.dvo_port + 0xffffffffffffffffUL); if ((unsigned int )p_mapping->initialized == 0U) { p_mapping->dvo_port = p_child->old.dvo_port; p_mapping->slave_addr = p_child->old.slave_addr; p_mapping->dvo_wiring = p_child->old.dvo_wiring; p_mapping->ddc_pin = p_child->old.ddc_pin; p_mapping->i2c_pin = p_child->old.i2c_pin; p_mapping->initialized = 1U; tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("parse_sdvo_device_mapping", "SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", (int )p_mapping->dvo_port, (int )p_mapping->slave_addr, (int )p_mapping->dvo_wiring, (int )p_mapping->ddc_pin, (int )p_mapping->i2c_pin); } else { } } else { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("parse_sdvo_device_mapping", "Maybe one SDVO port is shared by two SDVO device.\n"); } else { } } if ((unsigned int )((unsigned char )p_child->old.slave2_addr) != 0U) { tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("parse_sdvo_device_mapping", "there exists the slave2_addr. Maybe this is a SDVO device with multiple inputs.\n"); } else { } } else { } count = count + 1; ldv_48220: i = i + 1; ldv_48222: ; if (i < child_device_num) { goto ldv_48221; } else { } if (count == 0) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("parse_sdvo_device_mapping", "No SDVO device info is found in VBT\n"); } else { } } else { } return; } } static void parse_driver_features(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct bdb_driver_features const *driver___0 ; void const *tmp ; long tmp___0 ; { tmp = find_section((void const *)bdb, 12); driver___0 = (struct bdb_driver_features const *)tmp; if ((unsigned long )driver___0 == (unsigned long )((struct bdb_driver_features const *)0)) { return; } else { } if ((unsigned int )*((unsigned char *)driver___0 + 8UL) == 24U) { dev_priv->vbt.edp_support = 1; } else { } if ((unsigned int )*((unsigned char *)driver___0 + 7UL) != 0U) { dev_priv->render_reclock_avail = 1; } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_driver_features", "DRRS State Enabled:%d\n", (int )driver___0->drrs_enabled); } else { } if ((unsigned int )*((unsigned char *)driver___0 + 17UL) == 0U) { dev_priv->vbt.drrs_type = 0; } else { } return; } } static void parse_edp(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct bdb_edp const *edp ; struct edp_power_seq const *edp_pps ; struct edp_link_params const *edp_link_params ; void const *tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; uint8_t vswing ; { tmp = find_section((void const *)bdb, 27); edp = (struct bdb_edp const *)tmp; if ((unsigned long )edp == (unsigned long )((struct bdb_edp const *)0)) { if ((int )dev_priv->vbt.edp_support) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_edp", "No eDP BDB found but eDP panel supported.\n"); } else { } } else { } return; } else { } switch ((unsigned int )(edp->color_depth >> panel_type * 2) & 3U) { case 0U: dev_priv->vbt.edp_bpp = 18; goto ldv_48239; case 1U: dev_priv->vbt.edp_bpp = 24; goto ldv_48239; case 2U: dev_priv->vbt.edp_bpp = 30; goto ldv_48239; } ldv_48239: edp_pps = (struct edp_power_seq const *)(& edp->power_seqs) + (unsigned long )panel_type; edp_link_params = (struct edp_link_params const *)(& edp->link_params) + (unsigned long )panel_type; dev_priv->vbt.edp_pps = *edp_pps; switch ((int )edp_link_params->rate) { case 0: dev_priv->vbt.edp_rate = 6; goto ldv_48243; case 1: dev_priv->vbt.edp_rate = 10; goto ldv_48243; default: tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("parse_edp", "VBT has unknown eDP link rate value %u\n", (int )edp_link_params->rate); } else { } goto ldv_48243; } ldv_48243: ; switch ((int )edp_link_params->lanes) { case 0: dev_priv->vbt.edp_lanes = 1; goto ldv_48247; case 1: dev_priv->vbt.edp_lanes = 2; goto ldv_48247; case 3: dev_priv->vbt.edp_lanes = 4; goto ldv_48247; default: tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("parse_edp", "VBT has unknown eDP lane count value %u\n", (int )edp_link_params->lanes); } else { } goto ldv_48247; } ldv_48247: ; switch ((int )edp_link_params->preemphasis) { case 0: dev_priv->vbt.edp_preemphasis = 0; goto ldv_48252; case 1: dev_priv->vbt.edp_preemphasis = 8; goto ldv_48252; case 2: dev_priv->vbt.edp_preemphasis = 16; goto ldv_48252; case 3: dev_priv->vbt.edp_preemphasis = 24; goto ldv_48252; default: tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("parse_edp", "VBT has unknown eDP pre-emphasis value %u\n", (int )edp_link_params->preemphasis); } else { } goto ldv_48252; } ldv_48252: ; switch ((int )edp_link_params->vswing) { case 0: dev_priv->vbt.edp_vswing = 0; goto ldv_48258; case 1: dev_priv->vbt.edp_vswing = 1; goto ldv_48258; case 2: dev_priv->vbt.edp_vswing = 2; goto ldv_48258; case 3: dev_priv->vbt.edp_vswing = 3; goto ldv_48258; default: tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("parse_edp", "VBT has unknown eDP voltage swing value %u\n", (int )edp_link_params->vswing); } else { } goto ldv_48258; } ldv_48258: ; if ((unsigned int )((unsigned short )bdb->version) > 172U) { if (i915.edp_vswing != 0) { dev_priv->edp_low_vswing = i915.edp_vswing == 1; } else { vswing = (unsigned int )((uint8_t )(edp->edp_vswing_preemph >> panel_type * 4)) & 15U; dev_priv->edp_low_vswing = (unsigned int )vswing == 0U; } } else { } return; } } static void parse_psr(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct bdb_psr const *psr ; struct psr_table const *psr_table ; void const *tmp ; long tmp___0 ; long tmp___1 ; { tmp = find_section((void const *)bdb, 9); psr = (struct bdb_psr const *)tmp; if ((unsigned long )psr == (unsigned long )((struct bdb_psr const *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_psr", "No PSR BDB found.\n"); } else { } return; } else { } psr_table = (struct psr_table const *)(& psr->psr_table) + (unsigned long )panel_type; dev_priv->vbt.psr.full_link = (int )psr_table->full_link != 0; dev_priv->vbt.psr.require_aux_wakeup = (int )psr_table->require_aux_to_wakeup != 0; dev_priv->vbt.psr.idle_frames = (int )psr_table->idle_frames >= 0 ? (15 < (int )psr_table->idle_frames ? 15 : (int )psr_table->idle_frames) : 0; switch ((int )psr_table->lines_to_wait) { case 0: dev_priv->vbt.psr.lines_to_wait = 0; goto ldv_48272; case 1: dev_priv->vbt.psr.lines_to_wait = 1; goto ldv_48272; case 2: dev_priv->vbt.psr.lines_to_wait = 2; goto ldv_48272; case 3: dev_priv->vbt.psr.lines_to_wait = 3; goto ldv_48272; default: tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("parse_psr", "VBT has unknown PSR lines to wait %u\n", (int )psr_table->lines_to_wait); } else { } goto ldv_48272; } ldv_48272: dev_priv->vbt.psr.tp1_wakeup_time = (int )psr_table->tp1_wakeup_time; dev_priv->vbt.psr.tp2_tp3_wakeup_time = (int )psr_table->tp2_tp3_wakeup_time; return; } } static u8 *goto_next_sequence(u8 *data , int *size ) { u16 len ; int tmp ; { tmp = *size; tmp = tmp - 1; if (tmp < 0) { return ((u8 *)0U); } else { } data = data + 1; ldv_48289: ; switch ((int )*data) { case 1: tmp = tmp + -5; if (tmp < 0) { return ((u8 *)0U); } else { } data = data + 3UL; len = *((u16 *)data); tmp = tmp - (int )len; if (tmp < 0) { return ((u8 *)0U); } else { } data = data + ((unsigned long )len + 2UL); goto ldv_48284; case 2: tmp = tmp + -5; if (tmp < 0) { return ((u8 *)0U); } else { } data = data + 5UL; goto ldv_48284; case 3: tmp = tmp + -3; if (tmp < 0) { return ((u8 *)0U); } else { } data = data + 3UL; goto ldv_48284; default: drm_err("Unknown element\n"); return ((u8 *)0U); } ldv_48284: ; if ((unsigned int )*data == 0U) { goto ldv_48288; } else { } goto ldv_48289; ldv_48288: tmp = tmp - 1; if (tmp < 0) { return ((u8 *)0U); } else { } data = data + 1; *size = tmp; return (data); } } static void parse_mipi(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct bdb_mipi_config const *start ; struct bdb_mipi_sequence const *sequence ; struct mipi_config const *config ; struct mipi_pps_data const *pps ; u8 *data ; u8 const *seq_data ; int i ; int panel_id ; int seq_size ; u16 block_size ; void const *tmp ; long tmp___0 ; long tmp___1 ; void *tmp___2 ; void *tmp___3 ; void const *tmp___4 ; long tmp___5 ; long tmp___6 ; void *tmp___7 ; int seq_id ; long tmp___8 ; long tmp___9 ; { if ((unsigned int )*((unsigned char *)dev_priv + 41280UL) == 0U) { return; } else { } dev_priv->vbt.dsi.panel_id = 0U; tmp = find_section((void const *)bdb, 52); start = (struct bdb_mipi_config const *)tmp; if ((unsigned long )start == (unsigned long )((struct bdb_mipi_config const *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_mipi", "No MIPI config BDB found"); } else { } return; } else { } tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("parse_mipi", "Found MIPI Config block, panel index = %d\n", panel_type); } else { } config = (struct mipi_config const *)(& start->config) + (unsigned long )panel_type; pps = (struct mipi_pps_data const *)(& start->pps) + (unsigned long )panel_type; tmp___2 = kmemdup((void const *)config, 122UL, 208U); dev_priv->vbt.dsi.config = (struct mipi_config *)tmp___2; if ((unsigned long )dev_priv->vbt.dsi.config == (unsigned long )((struct mipi_config *)0)) { return; } else { } tmp___3 = kmemdup((void const *)pps, 10UL, 208U); dev_priv->vbt.dsi.pps = (struct mipi_pps_data *)tmp___3; if ((unsigned long )dev_priv->vbt.dsi.pps == (unsigned long )((struct mipi_pps_data *)0)) { kfree((void const *)dev_priv->vbt.dsi.config); return; } else { } dev_priv->vbt.dsi.panel_id = 1U; tmp___4 = find_section((void const *)bdb, 53); sequence = (struct bdb_mipi_sequence const *)tmp___4; if ((unsigned long )sequence == (unsigned long )((struct bdb_mipi_sequence const *)0)) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("parse_mipi", "No MIPI Sequence found, parsing complete\n"); } else { } return; } else { } tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("parse_mipi", "Found MIPI sequence block\n"); } else { } block_size = get_blocksize((void const *)sequence); dev_priv->vbt.dsi.seq_version = sequence->version; seq_data = (u8 const *)(& sequence->data); i = 0; goto ldv_48307; ldv_48306: panel_id = (int )*seq_data; seq_size = (int )*((u16 *)seq_data + 1U); if (panel_id == panel_type) { goto ldv_48305; } else { } seq_data = seq_data + ((unsigned long )seq_size + 3UL); if ((long )seq_data - (long )(& sequence->data) > (long )block_size) { drm_err("Sequence start is beyond sequence block size, corrupted sequence block\n"); return; } else { } i = i + 1; ldv_48307: ; if (i <= 5) { goto ldv_48306; } else { } ldv_48305: ; if (i == 6) { drm_err("Sequence block detected but no valid configuration\n"); return; } else { } if ((int )block_size < seq_size) { drm_err("Corrupted sequence/size, bailing out\n"); return; } else { } tmp___7 = kmemdup((void const *)seq_data + 3U, (size_t )seq_size, 208U); dev_priv->vbt.dsi.data = (u8 *)tmp___7; if ((unsigned long )dev_priv->vbt.dsi.data == (unsigned long )((u8 *)0U)) { return; } else { } data = dev_priv->vbt.dsi.data; dev_priv->vbt.dsi.size = (u32 )seq_size; ldv_48311: seq_id = (int )*data; if (seq_id <= 5 && seq_id > 0) { dev_priv->vbt.dsi.sequence[seq_id] = data; tmp___8 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("parse_mipi", "Found mipi sequence - %d\n", seq_id); } else { } } else { drm_err("undefined sequence\n"); goto err; } data = goto_next_sequence(data, & seq_size); if ((unsigned long )data == (unsigned long )((u8 *)0U)) { drm_err("Sequence elements going beyond block itself. Sequence block parsing failed\n"); goto err; } else { } if ((unsigned int )*data == 0U) { goto ldv_48310; } else { } goto ldv_48311; ldv_48310: tmp___9 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("parse_mipi", "MIPI related vbt parsing complete\n"); } else { } return; err: kfree((void const *)dev_priv->vbt.dsi.data); dev_priv->vbt.dsi.data = (u8 *)0U; memset((void *)(& dev_priv->vbt.dsi.sequence), 0, 48UL); return; } } static void parse_ddi_port(struct drm_i915_private *dev_priv , enum port port , struct bdb_header const *bdb ) { union child_device_config *it ; union child_device_config *child ; struct ddi_vbt_port_info *info ; uint8_t hdmi_level_shift ; int i ; int j ; bool is_dvi ; bool is_hdmi ; bool is_dp ; bool is_edp___0 ; bool is_crt ; uint8_t aux_channel ; int dvo_ports[5U][2U] ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; long tmp___11 ; long tmp___12 ; long tmp___13 ; long tmp___14 ; { child = (union child_device_config *)0; info = (struct ddi_vbt_port_info *)(& dev_priv->vbt.ddi_port_info) + (unsigned long )port; dvo_ports[0][0] = 0; dvo_ports[0][1] = 10; dvo_ports[1][0] = 1; dvo_ports[1][1] = 7; dvo_ports[2][0] = 2; dvo_ports[2][1] = 8; dvo_ports[3][0] = 3; dvo_ports[3][1] = 9; dvo_ports[4][0] = 6; dvo_ports[4][1] = -1; i = 0; goto ldv_48335; ldv_48334: it = dev_priv->vbt.child_dev + (unsigned long )i; j = 0; goto ldv_48333; ldv_48332: ; if (dvo_ports[(unsigned int )port][j] == -1) { goto ldv_48330; } else { } if ((int )it->common.dvo_port == dvo_ports[(unsigned int )port][j]) { if ((unsigned long )child != (unsigned long )((union child_device_config *)0)) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("parse_ddi_port", "More than one child device for port %c in VBT.\n", (unsigned int )port + 65U); } else { } return; } else { } child = it; } else { } j = j + 1; ldv_48333: ; if (j <= 1) { goto ldv_48332; } else { } ldv_48330: i = i + 1; ldv_48335: ; if (dev_priv->vbt.child_dev_num > i) { goto ldv_48334; } else { } if ((unsigned long )child == (unsigned long )((union child_device_config *)0)) { return; } else { } aux_channel = child->raw[25]; is_dvi = ((int )child->common.device_type & 16) != 0; is_dp = ((int )child->common.device_type & 4) != 0; is_crt = ((int )child->common.device_type & 1) != 0; is_hdmi = (bool )((int )is_dvi && ((int )child->common.device_type & 2048) == 0); is_edp___0 = (bool )((int )is_dp && ((int )child->common.device_type & 4096) != 0); info->supports_dvi = (unsigned char )is_dvi; info->supports_hdmi = (unsigned char )is_hdmi; info->supports_dp = (unsigned char )is_dp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n", (unsigned int )port + 65U, (int )is_dp, (int )is_hdmi, (int )is_dvi, (int )is_edp___0, (int )is_crt); } else { } if ((int )is_edp___0 && (int )is_dvi) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Internal DP port %c is TMDS compatible\n", (unsigned int )port + 65U); } else { } } else { } if ((int )is_crt && (unsigned int )port != 4U) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Port %c is analog\n", (unsigned int )port + 65U); } else { } } else { } if ((int )is_crt && ((int )is_dvi || (int )is_dp)) { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Analog port %c is also DP or TMDS compatible\n", (unsigned int )port + 65U); } else { } } else { } if ((int )is_dvi && ((unsigned int )port == 0U || (unsigned int )port == 4U)) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Port %c is TMDS compatible\n", (unsigned int )port + 65U); } else { } } else { } if ((! is_dvi && ! is_dp) && ! is_crt) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Port %c is not DP/TMDS/CRT compatible\n", (unsigned int )port + 65U); } else { } } else { } if ((int )is_edp___0 && (((unsigned int )port == 1U || (unsigned int )port == 2U) || (unsigned int )port == 4U)) { tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Port %c is internal DP\n", (unsigned int )port + 65U); } else { } } else { } if ((int )is_dvi) { if ((unsigned int )child->common.ddc_pin == 5U && (unsigned int )port != 1U) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Unexpected DDC pin for port B\n"); } else { } } else { } if ((unsigned int )child->common.ddc_pin == 4U && (unsigned int )port != 2U) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Unexpected DDC pin for port C\n"); } else { } } else { } if ((unsigned int )child->common.ddc_pin == 6U && (unsigned int )port != 3U) { tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Unexpected DDC pin for port D\n"); } else { } } else { } } else { } if ((int )is_dp) { if ((unsigned int )aux_channel == 64U && (unsigned int )port != 0U) { tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Unexpected AUX channel for port A\n"); } else { } } else { } if ((unsigned int )aux_channel == 16U && (unsigned int )port != 1U) { tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Unexpected AUX channel for port B\n"); } else { } } else { } if ((unsigned int )aux_channel == 32U && (unsigned int )port != 2U) { tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Unexpected AUX channel for port C\n"); } else { } } else { } if ((unsigned int )aux_channel == 48U && (unsigned int )port != 3U) { tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("parse_ddi_port", "Unexpected AUX channel for port D\n"); } else { } } else { } } else { } if ((unsigned int )((unsigned short )bdb->version) > 157U) { hdmi_level_shift = (unsigned int )child->raw[7] & 15U; tmp___14 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("parse_ddi_port", "VBT HDMI level shift for port %c: %d\n", (unsigned int )port + 65U, (int )hdmi_level_shift); } else { } info->hdmi_level_shift = hdmi_level_shift; } else { } return; } } static void parse_ddi_ports(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct drm_device *dev ; enum port port ; struct drm_i915_private *__p ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { return; } else { } if (dev_priv->vbt.child_dev_num == 0) { return; } else { } if ((unsigned int )((unsigned short )bdb->version) <= 154U) { return; } else { } port = 0; goto ldv_48350; ldv_48349: parse_ddi_port(dev_priv, port, bdb); port = (enum port )((unsigned int )port + 1U); ldv_48350: ; if ((unsigned int )port <= 4U) { goto ldv_48349; } else { } return; } } static void parse_device_mapping(struct drm_i915_private *dev_priv , struct bdb_header const *bdb ) { struct bdb_general_definitions const *p_defs ; union child_device_config const *p_child ; union child_device_config *child_dev_ptr ; int i ; int child_device_num ; int count ; u16 block_size ; void const *tmp ; long tmp___0 ; long tmp___1 ; void *tmp___2 ; long tmp___3 ; long tmp___4 ; { tmp = find_section((void const *)bdb, 2); p_defs = (struct bdb_general_definitions const *)tmp; if ((unsigned long )p_defs == (unsigned long )((struct bdb_general_definitions const *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("parse_device_mapping", "No general definition block is found, no devices defined.\n"); } else { } return; } else { } if ((unsigned int )((unsigned char )p_defs->child_dev_size) <= 32U) { drm_err("General definiton block child device size is too small.\n"); return; } else { } block_size = get_blocksize((void const *)p_defs); child_device_num = (int )(((unsigned long )block_size - 5UL) / (unsigned long )p_defs->child_dev_size); count = 0; i = 0; goto ldv_48366; ldv_48365: p_child = child_device_ptr(p_defs, i); if ((unsigned int )((unsigned short )p_child->common.device_type) == 0U) { goto ldv_48364; } else { } count = count + 1; ldv_48364: i = i + 1; ldv_48366: ; if (i < child_device_num) { goto ldv_48365; } else { } if (count == 0) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("parse_device_mapping", "no child dev is parsed from VBT\n"); } else { } return; } else { } tmp___2 = kcalloc((size_t )count, 33UL, 208U); dev_priv->vbt.child_dev = (union child_device_config *)tmp___2; if ((unsigned long )dev_priv->vbt.child_dev == (unsigned long )((union child_device_config *)0)) { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("parse_device_mapping", "No memory space for child device\n"); } else { } return; } else { } dev_priv->vbt.child_dev_num = count; count = 0; i = 0; goto ldv_48370; ldv_48369: p_child = child_device_ptr(p_defs, i); if ((unsigned int )((unsigned short )p_child->common.device_type) == 0U) { goto ldv_48368; } else { } if (((unsigned int )((unsigned char )p_child->common.dvo_port) > 20U && (unsigned int )((unsigned char )p_child->common.dvo_port) <= 24U) && ((int )p_child->common.device_type & 1024) != 0) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("parse_device_mapping", "Found MIPI as LFP\n"); } else { } dev_priv->vbt.has_mipi = 1U; dev_priv->vbt.dsi.port = (u16 )p_child->common.dvo_port; } else { } child_dev_ptr = dev_priv->vbt.child_dev + (unsigned long )count; count = count + 1; memcpy((void *)child_dev_ptr, (void const *)p_child, 33UL); ldv_48368: i = i + 1; ldv_48370: ; if (i < child_device_num) { goto ldv_48369; } else { } return; } } static void init_vbt_defaults(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; enum port port ; struct drm_i915_private *__p ; long tmp ; struct ddi_vbt_port_info *info ; { dev = dev_priv->dev; dev_priv->vbt.crt_ddc_pin = 2; dev_priv->vbt.backlight.present = 1; dev_priv->vbt.lvds_dither = 1U; dev_priv->vbt.lvds_vbt = 0U; dev_priv->vbt.sdvo_lvds_vbt_mode = (struct drm_display_mode *)0; dev_priv->vbt.int_tv_support = 1U; dev_priv->vbt.int_crt_support = 1U; dev_priv->vbt.lvds_use_ssc = 1U; __p = to_i915((struct drm_device const *)dev); dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, (unsigned int )__p->pch_type == 0U); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("init_vbt_defaults", "Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq); } else { } port = 0; goto ldv_48386; ldv_48385: info = (struct ddi_vbt_port_info *)(& dev_priv->vbt.ddi_port_info) + (unsigned long )port; info->hdmi_level_shift = 255U; info->supports_dvi = (unsigned char )((unsigned int )port != 0U && (unsigned int )port != 4U); info->supports_hdmi = info->supports_dvi; info->supports_dp = (unsigned int )port != 4U; port = (enum port )((unsigned int )port + 1U); ldv_48386: ; if ((unsigned int )port <= 4U) { goto ldv_48385; } else { } return; } } static int intel_no_opregion_vbt_callback(struct dmi_system_id const *id ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_no_opregion_vbt_callback", "Falling back to manually reading VBT from VBIOS ROM for %s\n", id->ident); } else { } return (1); } } static struct dmi_system_id const intel_no_opregion_vbt[2U] = { {& intel_no_opregion_vbt_callback, "ThinkCentre A57", {{4U, (unsigned char)0, {'L', 'E', 'N', 'O', 'V', 'O', '\000'}}, {5U, (unsigned char)0, {'9', '7', '0', '2', '7', 'R', 'G', '\000'}}}, 0}}; static struct bdb_header const *validate_vbt(void const *_base , size_t size , void const *_vbt , char const *source ) { void const *base ; size_t offset ; struct vbt_header const *vbt ; struct bdb_header const *bdb ; long tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; { base = _base; offset = (size_t )((long )_vbt - (long )_base); vbt = (struct vbt_header const *)(base + offset); if (offset + 48UL > size) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("validate_vbt", "VBT header incomplete\n"); } else { } return ((struct bdb_header const *)0); } else { } tmp___1 = memcmp((void const *)(& vbt->signature), (void const *)"$VBT", 4UL); if (tmp___1 != 0) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("validate_vbt", "VBT invalid signature\n"); } else { } return ((struct bdb_header const *)0); } else { } offset = (size_t )vbt->bdb_offset + offset; if (offset + 22UL > size) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("validate_vbt", "BDB header incomplete\n"); } else { } return ((struct bdb_header const *)0); } else { } bdb = (struct bdb_header const *)(base + offset); if ((size_t )bdb->bdb_size + offset > size) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("validate_vbt", "BDB incomplete\n"); } else { } return ((struct bdb_header const *)0); } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("validate_vbt", "Using VBT from %s: %20s\n", source, (u8 const *)(& vbt->signature)); } else { } return (bdb); } } static struct bdb_header const *find_vbt(void *bios , size_t size ) { struct bdb_header const *bdb ; size_t i ; unsigned int tmp ; { bdb = (struct bdb_header const *)0; i = 0UL; goto ldv_48412; ldv_48411: tmp = ioread32(bios + i); if (tmp == (unsigned int )*((u32 const *)"$VBT")) { bdb = validate_vbt((void const *)bios, size, (void const *)(bios + i), "PCI ROM"); goto ldv_48410; } else { } i = i + 1UL; ldv_48412: ; if (i + 4UL < size) { goto ldv_48411; } else { } ldv_48410: ; return (bdb); } } int intel_parse_bios(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct pci_dev *pdev ; struct bdb_header const *bdb ; u8 *bios ; struct drm_i915_private *__p ; int tmp ; size_t size ; void *tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pdev = dev->pdev; bdb = (struct bdb_header const *)0; bios = (u8 *)0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 5U) { return (-19); } else { } init_vbt_defaults(dev_priv); tmp = dmi_check_system((struct dmi_system_id const *)(& intel_no_opregion_vbt)); if (tmp == 0 && (unsigned long )dev_priv->opregion.vbt != (unsigned long )((void *)0)) { bdb = validate_vbt((void const *)dev_priv->opregion.header, 8192UL, (void const *)dev_priv->opregion.vbt, "OpRegion"); } else { } if ((unsigned long )bdb == (unsigned long )((struct bdb_header const *)0)) { tmp___0 = pci_map_rom(pdev, & size); bios = (u8 *)tmp___0; if ((unsigned long )bios == (unsigned long )((u8 *)0U)) { return (-1); } else { } bdb = find_vbt((void *)bios, size); if ((unsigned long )bdb == (unsigned long )((struct bdb_header const *)0)) { pci_unmap_rom(pdev, (void *)bios); return (-1); } else { } } else { } parse_general_features(dev_priv, bdb); parse_general_definitions(dev_priv, bdb); parse_lfp_panel_data(dev_priv, bdb); parse_lfp_backlight(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb); parse_sdvo_device_mapping(dev_priv, bdb); parse_device_mapping(dev_priv, bdb); parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); parse_psr(dev_priv, bdb); parse_mipi(dev_priv, bdb); parse_ddi_ports(dev_priv, bdb); if ((unsigned long )bios != (unsigned long )((u8 *)0U)) { pci_unmap_rom(pdev, (void *)bios); } else { } return (0); } } void intel_setup_bios(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; uint32_t tmp ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397832L, 1); if (tmp == 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397836L, 1); if (tmp___0 == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397832L, 26216400U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397836L, 22939600U, 1); } else { } } else { } } else { } return; } } bool ldv_queue_work_on_531(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_532(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_533(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_534(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_535(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static int fls64(__u64 x ) { int bitpos ; { bitpos = -1; __asm__ ("bsrq %1,%q0": "+r" (bitpos): "rm" (x)); return (bitpos + 1); } } __inline static unsigned int fls_long(unsigned long l ) { int tmp___0 ; { tmp___0 = fls64((__u64 )l); return ((unsigned int )tmp___0); } } __inline static unsigned long __roundup_pow_of_two(unsigned long n ) { unsigned int tmp ; { tmp = fls_long(n - 1UL); return (1UL << (int )tmp); } } __inline static unsigned long arch_local_save_flags___13(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static void atomic_dec(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0": "+m" (v->counter)); return; } } __inline static int __atomic_add_unless___3(atomic_t *v , int a , int u ) { int c ; int old ; long tmp ; long tmp___0 ; { c = atomic_read((atomic_t const *)v); ldv_5708: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5707; } else { } old = atomic_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5707; } else { } c = old; goto ldv_5708; ldv_5707: ; return (c); } } __inline static int atomic_add_unless___3(atomic_t *v , int a , int u ) { int tmp ; { tmp = __atomic_add_unless___3(v, a, u); return (tmp != u); } } __inline static bool static_key_false___10(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int rcu_read_lock_sched_held___10(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___13(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } bool ldv_queue_work_on_545(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_547(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_546(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_549(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_548(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_flush_workqueue_551(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_550(struct work_struct *ldv_func_arg1 ) ; __inline static bool queue_work___2(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_545(8192, wq, work); return (tmp); } } __inline static bool schedule_work___1(struct work_struct *work ) { bool tmp ; { tmp = queue_work___2(system_wq, work); return (tmp); } } __inline static void flush_scheduled_work(void) { { ldv_flush_workqueue_548(system_wq); return; } } __inline static void kref_get___11(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___13(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___13(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___13(kref, 1U, release); return (tmp); } } __inline static int kref_put_mutex___3(struct kref *kref , void (*release)(struct kref * ) , struct mutex *lock ) { int __ret_warn_on ; long tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; long tmp___3 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 138); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = atomic_add_unless___3(& kref->refcount, -1, 1); tmp___3 = ldv__builtin_expect(tmp___2 == 0, 0L); if (tmp___3 != 0L) { mutex_lock_nested(lock, 0U); tmp___0 = atomic_dec_and_test(& kref->refcount); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { mutex_unlock(lock); return (0); } else { } (*release)(kref); return (1); } else { } return (0); } } void call_and_disable_all_14(int state ) ; void activate_work_13(struct work_struct *work , int state ) ; void call_and_disable_work_14(struct work_struct *work ) ; void call_and_disable_work_13(struct work_struct *work ) ; void disable_work_14(struct work_struct *work ) ; void invoke_work_14(void) ; void disable_work_13(struct work_struct *work ) ; void invoke_work_13(void) ; void activate_work_14(struct work_struct *work , int state ) ; void call_and_disable_all_13(int state ) ; __inline static bool ww_mutex_is_locked(struct ww_mutex *lock ) { int tmp ; { tmp = mutex_is_locked(& lock->base); return (tmp != 0); } } extern void drm_modeset_backoff(struct drm_modeset_acquire_ctx * ) ; __inline static bool drm_modeset_is_locked(struct drm_modeset_lock *lock ) { bool tmp ; { tmp = ww_mutex_is_locked(& lock->mutex); return (tmp); } } extern void drm_mode_set_crtcinfo(struct drm_display_mode * , int ) ; extern void drm_mode_copy(struct drm_display_mode * , struct drm_display_mode const * ) ; extern int drm_crtc_init_with_planes(struct drm_device * , struct drm_crtc * , struct drm_plane * , struct drm_plane * , struct drm_crtc_funcs const * ) ; extern void drm_crtc_cleanup(struct drm_crtc * ) ; extern unsigned int drm_crtc_index(struct drm_crtc * ) ; __inline static uint32_t drm_crtc_mask(struct drm_crtc *crtc ) { unsigned int tmp ; { tmp = drm_crtc_index(crtc); return ((uint32_t )(1 << (int )tmp)); } } extern void drm_connector_unregister(struct drm_connector * ) ; __inline static bool drm_encoder_crtc_ok(struct drm_encoder *encoder , struct drm_crtc *crtc ) { uint32_t tmp ; { tmp = drm_crtc_mask(crtc); return ((encoder->possible_crtcs & tmp) != 0U); } } extern int drm_universal_plane_init(struct drm_device * , struct drm_plane * , unsigned long , struct drm_plane_funcs const * , uint32_t const * , uint32_t , enum drm_plane_type ) ; extern void drm_plane_cleanup(struct drm_plane * ) ; extern unsigned int drm_plane_index(struct drm_plane * ) ; extern void drm_crtc_get_hv_timing(struct drm_display_mode const * , int * , int * ) ; extern void drm_encoder_cleanup(struct drm_encoder * ) ; extern void drm_mode_config_init(struct drm_device * ) ; extern void drm_mode_config_cleanup(struct drm_device * ) ; extern int drm_object_property_set_value(struct drm_mode_object * , struct drm_property * , uint64_t ) ; extern int drm_framebuffer_init(struct drm_device * , struct drm_framebuffer * , struct drm_framebuffer_funcs const * ) ; extern void drm_framebuffer_unreference(struct drm_framebuffer * ) ; extern void drm_framebuffer_reference(struct drm_framebuffer * ) ; extern void drm_framebuffer_cleanup(struct drm_framebuffer * ) ; extern void drm_framebuffer_unregister_private(struct drm_framebuffer * ) ; extern void drm_object_attach_property(struct drm_mode_object * , struct drm_property * , uint64_t ) ; extern int drm_mode_connector_attach_encoder(struct drm_connector * , struct drm_encoder * ) ; extern int drm_mode_crtc_set_gamma_size(struct drm_crtc * , int ) ; extern struct drm_mode_object *drm_mode_object_find(struct drm_device * , uint32_t , uint32_t ) ; extern uint32_t drm_mode_legacy_fb_format(uint32_t , uint32_t ) ; extern char const *drm_get_format_name(uint32_t ) ; extern struct drm_property *drm_mode_create_rotation_property(struct drm_device * , unsigned int ) ; __inline static struct drm_crtc *drm_crtc_find(struct drm_device *dev , uint32_t id ) { struct drm_mode_object *mo ; struct drm_mode_object const *__mptr ; struct drm_crtc *tmp ; { mo = drm_mode_object_find(dev, id, 3435973836U); if ((unsigned long )mo != (unsigned long )((struct drm_mode_object *)0)) { __mptr = (struct drm_mode_object const *)mo; tmp = (struct drm_crtc *)__mptr + 0xffffffffffffff20UL; } else { tmp = (struct drm_crtc *)0; } return (tmp); } } extern u32 drm_vblank_count(struct drm_device * , int ) ; extern void drm_send_vblank_event(struct drm_device * , int , struct drm_pending_vblank_event * ) ; extern int drm_crtc_vblank_get(struct drm_crtc * ) ; extern void drm_crtc_vblank_put(struct drm_crtc * ) ; extern void drm_crtc_vblank_off(struct drm_crtc * ) ; extern void drm_crtc_vblank_reset(struct drm_crtc * ) ; extern void drm_crtc_vblank_on(struct drm_crtc * ) ; extern void drm_calc_timestamping_constants(struct drm_crtc * , struct drm_display_mode const * ) ; __inline static bool drm_can_sleep___5(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_40513; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40513; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40513; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40513; default: __bad_percpu_size(); } ldv_40513: pscr_ret__ = pfo_ret__; goto ldv_40519; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40523; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40523; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40523; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40523; default: __bad_percpu_size(); } ldv_40523: pscr_ret__ = pfo_ret_____0; goto ldv_40519; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40532; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40532; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40532; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40532; default: __bad_percpu_size(); } ldv_40532: pscr_ret__ = pfo_ret_____1; goto ldv_40519; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40541; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40541; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40541; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40541; default: __bad_percpu_size(); } ldv_40541: pscr_ret__ = pfo_ret_____2; goto ldv_40519; default: __bad_size_call_parameter(); goto ldv_40519; } ldv_40519: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___13(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } __inline static struct drm_i915_gem_request *intel_ring_get_request___2(struct intel_engine_cs *ring ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )ring->outstanding_lazy_request == (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/intel_ringbuffer.h"), "i" (450), "i" (12UL)); ldv_41629: ; goto ldv_41629; } else { } return (ring->outstanding_lazy_request); } } __inline static void drm_gem_object_reference___5(struct drm_gem_object *obj ) { { kref_get___11(& obj->refcount); return; } } __inline static void drm_gem_object_unreference___12(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___13(& obj->refcount, & drm_gem_object_free); } else { } return; } } __inline static void drm_gem_object_unreference_unlocked___2(struct drm_gem_object *obj ) { struct drm_device *dev ; int tmp ; { if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) { return; } else { } dev = obj->dev; tmp = kref_put_mutex___3(& obj->refcount, & drm_gem_object_free, & dev->struct_mutex); if (tmp != 0) { mutex_unlock(& dev->struct_mutex); } else { lock_acquire(& dev->struct_mutex.dep_map, 0U, 0, 0, 1, (struct lockdep_map *)0, 0UL); lock_release(& dev->struct_mutex.dep_map, 0, 0UL); } return; } } void intel_link_compute_m_n(int bits_per_pixel , int nlanes , int pixel_clock , int link_clock , struct intel_link_m_n *m_n ) ; __inline static bool skl_ddb_entry_equal(struct skl_ddb_entry const *e1 , struct skl_ddb_entry const *e2 ) { { if ((int )((unsigned short )e1->start) == (int )((unsigned short )e2->start) && (int )((unsigned short )e1->end) == (int )((unsigned short )e2->end)) { return (1); } else { } return (0); } } __inline static struct drm_i915_gem_request *i915_gem_request_reference___4(struct drm_i915_gem_request *req ) { { if ((unsigned long )req != (unsigned long )((struct drm_i915_gem_request *)0)) { kref_get___11(& req->ref); } else { } return (req); } } __inline static void i915_gem_request_unreference___3(struct drm_i915_gem_request *req ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = mutex_is_locked(& ((req->ring)->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2216, "WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); kref_put___13(& req->ref, & i915_gem_request_free); return; } } __inline static void i915_gem_request_unreference__unlocked___1(struct drm_i915_gem_request *req ) { struct drm_device *dev ; int tmp ; { if ((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0)) { return; } else { } dev = (req->ring)->dev; tmp = kref_put_mutex___3(& req->ref, & i915_gem_request_free, & dev->struct_mutex); if (tmp != 0) { mutex_unlock(& dev->struct_mutex); } else { } return; } } __inline static void i915_gem_request_assign___3(struct drm_i915_gem_request **pdst , struct drm_i915_gem_request *src ) { { if ((unsigned long )src != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_reference___4(src); } else { } if ((unsigned long )*pdst != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_unreference___3(*pdst); } else { } *pdst = src; return; } } __inline static void trace_i915_flip_request(int plane , struct drm_i915_gem_object *obj ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_405 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_407___0 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___10(& __tracepoint_i915_flip_request.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_flip_request.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___10(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 610, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45408: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , int , struct drm_i915_gem_object * ))it_func))(__data, plane, obj); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45408; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_flip_request.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___10(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 610, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } __inline static void trace_i915_flip_complete(int plane , struct drm_i915_gem_object *obj ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_409___0 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_411 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___10(& __tracepoint_i915_flip_complete.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_flip_complete.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___10(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 628, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_45464: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , int , struct drm_i915_gem_object * ))it_func))(__data, plane, obj); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_45464; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_flip_complete.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___10(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 628, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } __inline static bool i915_gem_request_completed___3(struct drm_i915_gem_request *req , bool lazy_coherency ) { u32 seqno ; long tmp ; bool tmp___0 ; { tmp = ldv__builtin_expect((unsigned long )req == (unsigned long )((struct drm_i915_gem_request *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h"), "i" (2806), "i" (12UL)); ldv_46218: ; goto ldv_46218; } else { } seqno = (*((req->ring)->get_seqno))(req->ring, (int )lazy_coherency); tmp___0 = i915_seqno_passed(seqno, req->seqno); return (tmp___0); } } int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder , bool enable ) ; void intel_unregister_dsm_handler(void) ; void intel_modeset_init(struct drm_device *dev ) ; void intel_modeset_gem_init(struct drm_device *dev ) ; void intel_modeset_cleanup(struct drm_device *dev ) ; void intel_connector_unregister(struct intel_connector *intel_connector ) ; int intel_modeset_vga_set_state(struct drm_device *dev , bool state ) ; int intel_trans_dp_port_sel(struct drm_crtc *crtc ) ; void vlv_cck_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) ; u32 vlv_bunit_read(struct drm_i915_private *dev_priv , u32 reg ) ; void vlv_bunit_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) ; u32 vlv_dpio_read(struct drm_i915_private *dev_priv , enum pipe pipe , int reg ) ; void vlv_dpio_write(struct drm_i915_private *dev_priv , enum pipe pipe , int reg , u32 val ) ; u32 intel_sbi_read(struct drm_i915_private *dev_priv , u16 reg , enum intel_sbi_destination destination ) ; void intel_sbi_write(struct drm_i915_private *dev_priv , u16 reg , u32 value , enum intel_sbi_destination destination ) ; __inline static uint32_t i915_vgacntrl_reg(struct drm_device *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { return (2036736U); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { return (266240U); } else { return (463872U); } } } } extern void drm_helper_move_panel_connectors_to_head(struct drm_device * ) ; extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer * , struct drm_mode_fb_cmd2 * ) ; __inline static void drm_crtc_helper_add(struct drm_crtc *crtc , struct drm_crtc_helper_funcs const *funcs ) { { crtc->helper_private = (void const *)funcs; return; } } extern void drm_kms_helper_poll_fini(struct drm_device * ) ; __inline static int drm_rect_width(struct drm_rect const *r ) { { return ((int )r->x2 - (int )r->x1); } } __inline static int drm_rect_height(struct drm_rect const *r ) { { return ((int )r->y2 - (int )r->y1); } } extern struct drm_atomic_state *drm_atomic_state_alloc(struct drm_device * ) ; extern void drm_atomic_state_free(struct drm_atomic_state * ) ; extern struct drm_crtc_state *drm_atomic_get_crtc_state(struct drm_atomic_state * , struct drm_crtc * ) ; extern struct drm_plane_state *drm_atomic_get_plane_state(struct drm_atomic_state * , struct drm_plane * ) ; extern struct drm_connector_state *drm_atomic_get_connector_state(struct drm_atomic_state * , struct drm_connector * ) ; extern int drm_atomic_set_crtc_for_plane(struct drm_plane_state * , struct drm_crtc * ) ; extern void drm_atomic_set_fb_for_plane(struct drm_plane_state * , struct drm_framebuffer * ) ; extern int drm_atomic_set_crtc_for_connector(struct drm_connector_state * , struct drm_crtc * ) ; extern int drm_atomic_add_affected_connectors(struct drm_atomic_state * , struct drm_crtc * ) ; extern int drm_atomic_connectors_for_crtc(struct drm_atomic_state * , struct drm_crtc * ) ; __inline static int vlv_pipe_to_channel(enum pipe pipe ) { { switch ((int )pipe) { case 0: ; case 2: ; return (0); case 1: ; return (1); default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/intel_drv.h"), "i" (790), "i" (12UL)); ldv_48039: ; goto ldv_48039; } } } __inline static struct intel_dp_mst_encoder *enc_to_mst(struct drm_encoder *encoder ) { struct drm_encoder const *__mptr ; { __mptr = (struct drm_encoder const *)encoder; return ((struct intel_dp_mst_encoder *)__mptr); } } bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv , enum pipe pipe , bool enable ) ; bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv , enum transcoder pch_transcoder , bool enable ) ; void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv ) ; void intel_crt_init(struct drm_device *dev ) ; void hsw_fdi_link_train(struct drm_crtc *crtc ) ; void intel_ddi_init(struct drm_device *dev , enum port port ) ; void intel_ddi_pll_init(struct drm_device *dev ) ; void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc ) ; void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv , enum transcoder cpu_transcoder ) ; void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc ) ; void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc ) ; bool intel_ddi_pll_select(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state ) ; void intel_ddi_set_pipe_settings(struct drm_crtc *crtc ) ; void intel_ddi_fdi_disable(struct drm_crtc *crtc ) ; struct intel_encoder *intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state ) ; void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc , bool state ) ; void intel_frontbuffer_flip_prepare(struct drm_device *dev , unsigned int frontbuffer_bits ) ; void intel_frontbuffer_flip_complete(struct drm_device *dev , unsigned int frontbuffer_bits ) ; void intel_frontbuffer_flush(struct drm_device *dev , unsigned int frontbuffer_bits ) ; __inline static void intel_frontbuffer_flip(struct drm_device *dev , unsigned int frontbuffer_bits ) { { intel_frontbuffer_flush(dev, frontbuffer_bits); return; } } unsigned int intel_fb_align_height(struct drm_device *dev , unsigned int height , uint32_t pixel_format , uint64_t fb_format_modifier ) ; u32 intel_fb_stride_alignment(struct drm_device *dev , uint64_t fb_modifier , uint32_t pixel_format ) ; struct drm_plane_funcs const intel_plane_funcs ; int intel_pch_rawclk(struct drm_device *dev ) ; void intel_crtc_restore_mode(struct drm_crtc *crtc ) ; void intel_crtc_update_dpms(struct drm_crtc *crtc ) ; void intel_encoder_destroy(struct drm_encoder *encoder ) ; int intel_connector_init(struct intel_connector *connector ) ; struct intel_connector *intel_connector_alloc(void) ; void intel_connector_dpms(struct drm_connector *connector , int mode ) ; bool intel_connector_get_hw_state(struct intel_connector *connector ) ; void intel_modeset_check_state(struct drm_device *dev ) ; bool ibx_digital_port_connected(struct drm_i915_private *dev_priv , struct intel_digital_port *port ) ; void intel_connector_attach_encoder(struct intel_connector *connector , struct intel_encoder *encoder ) ; struct drm_encoder *intel_best_encoder(struct drm_connector *connector ) ; struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev , struct drm_crtc *crtc ) ; enum pipe intel_get_pipe_from_connector(struct intel_connector *connector ) ; int intel_get_pipe_from_crtc_id(struct drm_device *dev , void *data , struct drm_file *file ) ; enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv , enum pipe pipe ) ; int ironlake_get_lanes_required(int target_clock , int link_bw , int bpp ) ; void vlv_wait_port_ready(struct drm_i915_private *dev_priv , struct intel_digital_port *dport , unsigned int expected_mask ) ; bool intel_get_load_detect_pipe(struct drm_connector *connector , struct drm_display_mode *mode , struct intel_load_detect_pipe *old , struct drm_modeset_acquire_ctx *ctx ) ; void intel_release_load_detect_pipe(struct drm_connector *connector , struct intel_load_detect_pipe *old , struct drm_modeset_acquire_ctx *ctx ) ; int intel_pin_and_fence_fb_obj(struct drm_plane *plane , struct drm_framebuffer *fb , struct drm_plane_state const *plane_state , struct intel_engine_cs *pipelined ) ; struct drm_framebuffer *__intel_framebuffer_create(struct drm_device *dev , struct drm_mode_fb_cmd2 *mode_cmd , struct drm_i915_gem_object *obj ) ; int intel_prepare_plane_fb(struct drm_plane *plane , struct drm_framebuffer *fb , struct drm_plane_state const *new_state ) ; void intel_cleanup_plane_fb(struct drm_plane *plane , struct drm_framebuffer *fb , struct drm_plane_state const *old_state ) ; int intel_plane_atomic_get_property(struct drm_plane *plane , struct drm_plane_state const *state , struct drm_property *property , uint64_t *val ) ; int intel_plane_atomic_set_property(struct drm_plane *plane , struct drm_plane_state *state , struct drm_property *property , uint64_t val ) ; void intel_create_rotation_property(struct drm_device *dev , struct intel_plane *plane ) ; bool intel_wm_need_update(struct drm_plane *plane , struct drm_plane_state *state ) ; struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc ) ; void assert_shared_dpll(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll , bool state ) ; struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc , struct intel_crtc_state *crtc_state ) ; void intel_put_shared_dpll(struct intel_crtc *crtc ) ; void vlv_force_pll_on(struct drm_device *dev , enum pipe pipe , struct dpll const *dpll ) ; void vlv_force_pll_off(struct drm_device *dev , enum pipe pipe ) ; void assert_panel_unlocked(struct drm_i915_private *dev_priv , enum pipe pipe ) ; void assert_fdi_rx_pll(struct drm_i915_private *dev_priv , enum pipe pipe , bool state ) ; void assert_pipe(struct drm_i915_private *dev_priv , enum pipe pipe , bool state ) ; unsigned long intel_gen4_compute_page_offset(int *x , int *y , unsigned int tiling_mode , unsigned int cpp , unsigned int pitch ) ; void broxton_set_cdclk(struct drm_device *dev , int frequency ) ; void intel_dp_get_m_n(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) ; void intel_dp_set_m_n(struct intel_crtc *crtc , enum link_m_n_set m_n ) ; int intel_dotclock_calculate(int link_freq , struct intel_link_m_n const *m_n ) ; void ironlake_check_encoder_dotclock(struct intel_crtc_state const *pipe_config , int dotclock ) ; bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state , int target_clock , intel_clock_t *best_clock ) ; enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder ) ; void intel_mode_from_pipe_config(struct drm_display_mode *mode , struct intel_crtc_state *pipe_config ) ; void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc ) ; void intel_modeset_preclose(struct drm_device *dev , struct drm_file *file ) ; void skl_detach_scalers(struct intel_crtc *intel_crtc ) ; int skl_update_scaler_users(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state , struct intel_plane *intel_plane , struct intel_plane_state *plane_state , int force_detach ) ; int skl_max_scale(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state ) ; unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane , struct drm_i915_gem_object *obj ) ; u32 skl_plane_ctl_format(uint32_t pixel_format ) ; u32 skl_plane_ctl_tiling(uint64_t fb_modifier ) ; u32 skl_plane_ctl_rotation(unsigned int rotation ) ; void intel_dp_init(struct drm_device *dev , int output_reg , enum port port ) ; bool intel_dp_is_edp(struct drm_device *dev , enum port port ) ; void intel_plane_destroy(struct drm_plane *plane ) ; void intel_dsi_init(struct drm_device *dev ) ; void intel_dvo_init(struct drm_device *dev ) ; void intel_fbdev_output_poll_changed(struct drm_device *dev ) ; void intel_fbc_update(struct drm_device *dev ) ; void intel_hdmi_init(struct drm_device *dev , int hdmi_reg , enum port port ) ; void intel_lvds_init(struct drm_device *dev ) ; bool intel_is_dual_link_lvds(struct drm_device *dev ) ; void intel_setup_overlay(struct drm_device *dev ) ; void intel_cleanup_overlay(struct drm_device *dev ) ; int intel_overlay_switch_off(struct intel_overlay *overlay ) ; void intel_panel_destroy_backlight(struct drm_connector *connector ) ; void intel_panel_init_backlight_funcs(struct drm_device *dev ) ; void intel_backlight_register(struct drm_device *dev ) ; void intel_backlight_unregister(struct drm_device *dev ) ; void intel_psr_init(struct drm_device *dev ) ; bool intel_sdvo_init(struct drm_device *dev , uint32_t sdvo_reg , bool is_sdvob ) ; int intel_plane_init(struct drm_device *dev , enum pipe pipe , int plane ) ; int intel_plane_restore(struct drm_plane *plane ) ; bool intel_pipe_update_start(struct intel_crtc *crtc , uint32_t *start_vbl_count ) ; void intel_pipe_update_end(struct intel_crtc *crtc , u32 start_vbl_count ) ; void intel_tv_init(struct drm_device *dev ) ; int intel_atomic_check(struct drm_device *dev , struct drm_atomic_state *state ) ; int intel_atomic_commit(struct drm_device *dev , struct drm_atomic_state *state , bool async ) ; struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc ) ; void intel_crtc_destroy_state(struct drm_crtc *crtc , struct drm_crtc_state *state ) ; __inline static struct intel_crtc_state *intel_atomic_get_crtc_state(struct drm_atomic_state *state , struct intel_crtc *crtc ) { struct drm_crtc_state *crtc_state ; void *tmp ; bool tmp___0 ; struct drm_crtc_state const *__mptr ; { crtc_state = drm_atomic_get_crtc_state(state, & crtc->base); tmp___0 = IS_ERR((void const *)crtc_state); if ((int )tmp___0) { tmp = ERR_CAST((void const *)crtc_state); return ((struct intel_crtc_state *)tmp); } else { } __mptr = (struct drm_crtc_state const *)crtc_state; return ((struct intel_crtc_state *)__mptr); } } int intel_atomic_setup_scalers(struct drm_device *dev , struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state ) ; struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane ) ; struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane ) ; void intel_plane_destroy_state(struct drm_plane *plane , struct drm_plane_state *state ) ; struct drm_plane_helper_funcs const intel_plane_helper_funcs ; extern int drm_atomic_helper_check_modeset(struct drm_device * , struct drm_atomic_state * ) ; extern int drm_atomic_helper_check_planes(struct drm_device * , struct drm_atomic_state * ) ; extern int drm_atomic_helper_prepare_planes(struct drm_device * , struct drm_atomic_state * ) ; extern void drm_atomic_helper_commit_planes(struct drm_device * , struct drm_atomic_state * ) ; extern void drm_atomic_helper_cleanup_planes(struct drm_device * , struct drm_atomic_state * ) ; extern void drm_atomic_helper_swap_state(struct drm_device * , struct drm_atomic_state * ) ; extern int drm_atomic_helper_update_plane(struct drm_plane * , struct drm_crtc * , struct drm_framebuffer * , int , int , unsigned int , unsigned int , uint32_t , uint32_t , uint32_t , uint32_t ) ; extern int drm_atomic_helper_disable_plane(struct drm_plane * ) ; extern int drm_atomic_helper_plane_set_property(struct drm_plane * , struct drm_property * , uint64_t ) ; __inline static void drm_plane_helper_add(struct drm_plane *plane , struct drm_plane_helper_funcs const *funcs ) { { plane->helper_private = (void const *)funcs; return; } } extern int drm_plane_helper_check_update(struct drm_plane * , struct drm_crtc * , struct drm_framebuffer * , struct drm_rect * , struct drm_rect * , struct drm_rect const * , int , int , bool , bool , bool * ) ; extern int drm_plane_helper_disable(struct drm_plane * ) ; static uint32_t const i8xx_primary_formats[4U] = { 538982467U, 909199186U, 892424792U, 875713112U}; static uint32_t const i965_primary_formats[6U] = { 538982467U, 909199186U, 875713112U, 875709016U, 808669784U, 808665688U}; static uint32_t const skl_primary_formats[8U] = { 538982467U, 909199186U, 875713112U, 875709016U, 875713089U, 875708993U, 808669784U, 808665688U}; static uint32_t const intel_cursor_formats[1U] = { 875713089U}; static void intel_crtc_update_cursor(struct drm_crtc *crtc , bool on ) ; static void i9xx_crtc_clock_get(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) ; static void ironlake_pch_clock_get(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) ; static int intel_set_mode(struct drm_crtc *crtc , struct drm_atomic_state *state , bool force_restore ) ; static int intel_framebuffer_init(struct drm_device *dev , struct intel_framebuffer *intel_fb , struct drm_mode_fb_cmd2 *mode_cmd , struct drm_i915_gem_object *obj ) ; static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc ) ; static void intel_set_pipe_timings(struct intel_crtc *intel_crtc ) ; static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc , struct intel_link_m_n *m_n , struct intel_link_m_n *m2_n2 ) ; static void ironlake_set_pipeconf(struct drm_crtc *crtc ) ; static void haswell_set_pipeconf(struct drm_crtc *crtc ) ; static void intel_set_pipe_csc(struct drm_crtc *crtc ) ; static void vlv_prepare_pll(struct intel_crtc *crtc , struct intel_crtc_state const *pipe_config ) ; static void chv_prepare_pll(struct intel_crtc *crtc , struct intel_crtc_state const *pipe_config ) ; static void intel_begin_crtc_commit(struct drm_crtc *crtc ) ; static void intel_finish_crtc_commit(struct drm_crtc *crtc ) ; static void skl_init_scalers(struct drm_device *dev , struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state ) ; static int i9xx_get_refclk(struct intel_crtc_state const *crtc_state , int num_connectors ) ; static void intel_crtc_enable_planes(struct drm_crtc *crtc ) ; static void intel_crtc_disable_planes(struct drm_crtc *crtc ) ; static struct intel_encoder *intel_find_encoder(struct intel_connector *connector , int pipe ) { { if ((unsigned long )connector->mst_port == (unsigned long )((struct intel_dp *)0)) { return (connector->encoder); } else { return (& ((connector->mst_port)->mst_encoders[pipe])->base); } } } int intel_pch_rawclk(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); __ret_warn_on = (unsigned int )__p->pch_type == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 145, "WARN_ON(!HAS_PCH_SPLIT(dev))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811524L, 1); return ((int )tmp___0 & 1023); } } __inline static u32 intel_fdi_link_freq(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 5U) { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286720L, 1); return ((tmp & 255U) + 2U); } else { return (27U); } } } static struct intel_limit const intel_limits_i8xx_dac = {{25000, 350000}, {908000, 1512000}, {2, 16}, {96, 140}, {18, 26}, {6, 16}, {4, 128}, {2, 33}, {165000, 4, 2}}; static struct intel_limit const intel_limits_i8xx_dvo = {{25000, 350000}, {908000, 1512000}, {2, 16}, {96, 140}, {18, 26}, {6, 16}, {4, 128}, {2, 33}, {165000, 4, 4}}; static struct intel_limit const intel_limits_i8xx_lvds = {{25000, 350000}, {908000, 1512000}, {2, 16}, {96, 140}, {18, 26}, {6, 16}, {4, 128}, {1, 6}, {165000, 14, 7}}; static struct intel_limit const intel_limits_i9xx_sdvo = {{20000, 400000}, {1400000, 2800000}, {1, 6}, {70, 120}, {8, 18}, {3, 7}, {5, 80}, {1, 8}, {200000, 10, 5}}; static struct intel_limit const intel_limits_i9xx_lvds = {{20000, 400000}, {1400000, 2800000}, {1, 6}, {70, 120}, {8, 18}, {3, 7}, {7, 98}, {1, 8}, {112000, 14, 7}}; static struct intel_limit const intel_limits_g4x_sdvo = {{25000, 270000}, {1750000, 3500000}, {1, 4}, {104, 138}, {17, 23}, {5, 11}, {10, 30}, {1, 3}, {270000, 10, 10}}; static struct intel_limit const intel_limits_g4x_hdmi = {{22000, 400000}, {1750000, 3500000}, {1, 4}, {104, 138}, {16, 23}, {5, 11}, {5, 80}, {1, 8}, {165000, 10, 5}}; static struct intel_limit const intel_limits_g4x_single_channel_lvds = {{20000, 115000}, {1750000, 3500000}, {1, 3}, {104, 138}, {17, 23}, {5, 11}, {28, 112}, {2, 8}, {0, 14, 14}}; static struct intel_limit const intel_limits_g4x_dual_channel_lvds = {{80000, 224000}, {1750000, 3500000}, {1, 3}, {104, 138}, {17, 23}, {5, 11}, {14, 42}, {2, 6}, {0, 7, 7}}; static struct intel_limit const intel_limits_pineview_sdvo = {{20000, 400000}, {1700000, 3500000}, {3, 6}, {2, 256}, {0, 0}, {0, 254}, {5, 80}, {1, 8}, {200000, 10, 5}}; static struct intel_limit const intel_limits_pineview_lvds = {{20000, 400000}, {1700000, 3500000}, {3, 6}, {2, 256}, {0, 0}, {0, 254}, {7, 112}, {1, 8}, {112000, 14, 14}}; static struct intel_limit const intel_limits_ironlake_dac = {{25000, 350000}, {1760000, 3510000}, {1, 5}, {79, 127}, {12, 22}, {5, 9}, {5, 80}, {1, 8}, {225000, 10, 5}}; static struct intel_limit const intel_limits_ironlake_single_lvds = {{25000, 350000}, {1760000, 3510000}, {1, 3}, {79, 118}, {12, 22}, {5, 9}, {28, 112}, {2, 8}, {225000, 14, 14}}; static struct intel_limit const intel_limits_ironlake_dual_lvds = {{25000, 350000}, {1760000, 3510000}, {1, 3}, {79, 127}, {12, 22}, {5, 9}, {14, 56}, {2, 8}, {225000, 7, 7}}; static struct intel_limit const intel_limits_ironlake_single_lvds_100m = {{25000, 350000}, {1760000, 3510000}, {1, 2}, {79, 126}, {12, 22}, {5, 9}, {28, 112}, {2, 8}, {225000, 14, 14}}; static struct intel_limit const intel_limits_ironlake_dual_lvds_100m = {{25000, 350000}, {1760000, 3510000}, {1, 3}, {79, 126}, {12, 22}, {5, 9}, {14, 42}, {2, 6}, {225000, 7, 7}}; static struct intel_limit const intel_limits_vlv = {{125000, 1350000}, {4000000, 6000000}, {1, 7}, {0, 0}, {2, 3}, {11, 156}, {0, 0}, {2, 3}, {0, 2, 20}}; static struct intel_limit const intel_limits_chv = {{125000, 2700000}, {4800000, 6480000}, {1, 1}, {0, 0}, {2, 2}, {100663296, 734003200}, {0, 0}, {2, 4}, {0, 1, 14}}; static struct intel_limit const intel_limits_bxt = {{0, 2147483647}, {4800000, 6480000}, {1, 1}, {0, 0}, {2, 2}, {8388608, 1069547520}, {0, 0}, {2, 4}, {0, 1, 20}}; static void vlv_clock(int refclk , intel_clock_t *clock ) { int __ret_warn_on ; long tmp ; long tmp___0 ; int __x ; int __d ; int __x___0 ; int __d___0 ; { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; __ret_warn_on = clock->n == 0 || clock->p == 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 429, "WARN_ON(clock->n == 0 || clock->p == 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } __x = clock->m * refclk; __d = clock->n; clock->vco = __x > 0 ? (__d / 2 + __x) / __d : (__x - __d / 2) / __d; __x___0 = clock->vco; __d___0 = clock->p; clock->dot = __x___0 > 0 ? (__d___0 / 2 + __x___0) / __d___0 : (__x___0 - __d___0 / 2) / __d___0; return; } } bool intel_pipe_has_type(struct intel_crtc *crtc , enum intel_output_type type ) { struct drm_device *dev ; struct intel_encoder *encoder ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = crtc->base.dev; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_49197; ldv_49196: ; if ((unsigned long )encoder->base.crtc == (unsigned long )(& crtc->base)) { if ((unsigned int )encoder->type == (unsigned int )type) { return (1); } else { } } else { } __mptr___0 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49197: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_49196; } else { } return (0); } } static bool intel_pipe_will_have_type(struct intel_crtc_state const *crtc_state , int type ) { struct drm_atomic_state *state ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; struct intel_encoder *encoder ; int i ; int num_connectors ; struct drm_encoder const *__mptr ; int __ret_warn_on ; long tmp ; { state = crtc_state->base.state; num_connectors = 0; i = 0; goto ldv_49213; ldv_49212: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )((struct drm_crtc *)crtc_state->base.crtc)) { goto ldv_49209; } else { } num_connectors = num_connectors + 1; __mptr = (struct drm_encoder const *)connector_state->best_encoder; encoder = (struct intel_encoder *)__mptr; if ((unsigned int )encoder->type == (unsigned int )type) { return (1); } else { } } else { } ldv_49209: i = i + 1; ldv_49213: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_49212; } else { } __ret_warn_on = num_connectors == 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 476, "WARN_ON(num_connectors == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (0); } } static intel_limit_t const *intel_ironlake_limit(struct intel_crtc_state *crtc_state , int refclk ) { struct drm_device *dev ; intel_limit_t const *limit ; bool tmp ; bool tmp___0 ; { dev = (crtc_state->base.crtc)->dev; tmp___0 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___0) { tmp = intel_is_dual_link_lvds(dev); if ((int )tmp) { if (refclk == 100000) { limit = & intel_limits_ironlake_dual_lvds_100m; } else { limit = & intel_limits_ironlake_dual_lvds; } } else if (refclk == 100000) { limit = & intel_limits_ironlake_single_lvds_100m; } else { limit = & intel_limits_ironlake_single_lvds; } } else { limit = & intel_limits_ironlake_dac; } return (limit); } } static intel_limit_t const *intel_g4x_limit(struct intel_crtc_state *crtc_state ) { struct drm_device *dev ; intel_limit_t const *limit ; bool tmp ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; bool tmp___3 ; { dev = (crtc_state->base.crtc)->dev; tmp___3 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___3) { tmp = intel_is_dual_link_lvds(dev); if ((int )tmp) { limit = & intel_limits_g4x_dual_channel_lvds; } else { limit = & intel_limits_g4x_single_channel_lvds; } } else { tmp___1 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 6); if ((int )tmp___1) { limit = & intel_limits_g4x_hdmi; } else { tmp___2 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 1); if ((int )tmp___2) { limit = & intel_limits_g4x_hdmi; } else { tmp___0 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 3); if ((int )tmp___0) { limit = & intel_limits_g4x_sdvo; } else { limit = & intel_limits_i9xx_sdvo; } } } } return (limit); } } static intel_limit_t const *intel_limit(struct intel_crtc_state *crtc_state , int refclk ) { struct drm_device *dev ; intel_limit_t const *limit ; bool tmp ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; { dev = (crtc_state->base.crtc)->dev; __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) == 0U) { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) == 9U) { limit = & intel_limits_bxt; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___5->pch_type != 0U) { limit = intel_ironlake_limit(crtc_state, refclk); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 44UL) != 0U) { limit = intel_g4x_limit(crtc_state); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) != 0U) { tmp = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp) { limit = & intel_limits_pineview_lvds; } else { limit = & intel_limits_pineview_sdvo; } } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { limit = & intel_limits_chv; } else { goto _L; } } else { _L: /* CIL Label */ __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { limit = & intel_limits_vlv; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { tmp___0 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___0) { limit = & intel_limits_i9xx_lvds; } else { limit = & intel_limits_i9xx_sdvo; } } else { tmp___2 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___2) { limit = & intel_limits_i8xx_lvds; } else { tmp___1 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 2); if ((int )tmp___1) { limit = & intel_limits_i8xx_dvo; } else { limit = & intel_limits_i8xx_dac; } } } } } } } } } return (limit); } } static void pineview_clock(int refclk , intel_clock_t *clock ) { int __ret_warn_on ; long tmp ; long tmp___0 ; int __x ; int __d ; int __x___0 ; int __d___0 ; { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; __ret_warn_on = clock->n == 0 || clock->p == 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 569, "WARN_ON(clock->n == 0 || clock->p == 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } __x = clock->m * refclk; __d = clock->n; clock->vco = __x > 0 ? (__d / 2 + __x) / __d : (__x - __d / 2) / __d; __x___0 = clock->vco; __d___0 = clock->p; clock->dot = __x___0 > 0 ? (__d___0 / 2 + __x___0) / __d___0 : (__x___0 - __d___0 / 2) / __d___0; return; } } static uint32_t i9xx_dpll_compute_m(struct dpll *dpll ) { { return ((uint32_t )((dpll->m1 * 5 + 10) + (dpll->m2 + 2))); } } static void i9xx_clock(int refclk , intel_clock_t *clock ) { uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; int __x ; int __d ; int __x___0 ; int __d___0 ; { tmp = i9xx_dpll_compute_m(clock); clock->m = (int )tmp; clock->p = clock->p1 * clock->p2; __ret_warn_on = clock->n == -2 || clock->p == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 584, "WARN_ON(clock->n + 2 == 0 || clock->p == 0)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return; } else { } __x = clock->m * refclk; __d = clock->n + 2; clock->vco = __x > 0 ? (__d / 2 + __x) / __d : (__x - __d / 2) / __d; __x___0 = clock->vco; __d___0 = clock->p; clock->dot = __x___0 > 0 ? (__d___0 / 2 + __x___0) / __d___0 : (__x___0 - __d___0 / 2) / __d___0; return; } } static void chv_clock(int refclk , intel_clock_t *clock ) { int __ret_warn_on ; long tmp ; long tmp___0 ; int __d ; unsigned long long _tmp ; uint32_t __base ; uint32_t __rem ; int __x ; int __d___0 ; { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; __ret_warn_on = clock->n == 0 || clock->p == 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 594, "WARN_ON(clock->n == 0 || clock->p == 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } __d = clock->n << 22; _tmp = (unsigned long long )refclk * (unsigned long long )clock->m + (unsigned long long )(__d / 2); __base = (uint32_t )__d; __rem = (uint32_t )(_tmp % (unsigned long long )__base); _tmp = _tmp / (unsigned long long )__base; clock->vco = (int )_tmp; __x = clock->vco; __d___0 = clock->p; clock->dot = __x > 0 ? (__d___0 / 2 + __x) / __d___0 : (__x - __d___0 / 2) / __d___0; return; } } static bool intel_PLL_is_valid(struct drm_device *dev , intel_limit_t const *limit , intel_clock_t const *clock ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { if ((int )clock->n < (int )limit->n.min || (int )limit->n.max < (int )clock->n) { return (0); } else { } if ((int )clock->p1 < (int )limit->p1.min || (int )limit->p1.max < (int )clock->p1) { return (0); } else { } if ((int )clock->m2 < (int )limit->m2.min || (int )limit->m2.max < (int )clock->m2) { return (0); } else { } if ((int )clock->m1 < (int )limit->m1.min || (int )limit->m1.max < (int )clock->m1) { return (0); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 9U) { _L: /* CIL Label */ if ((int )clock->m1 <= (int )clock->m2) { return (0); } else { } } else { } } } else { } } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { goto _L___0; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) != 9U) { _L___0: /* CIL Label */ if ((int )clock->p < (int )limit->p.min || (int )limit->p.max < (int )clock->p) { return (0); } else { } if ((int )clock->m < (int )limit->m.min || (int )limit->m.max < (int )clock->m) { return (0); } else { } } else { } } } else { } if ((int )clock->vco < (int )limit->vco.min || (int )limit->vco.max < (int )clock->vco) { return (0); } else { } if ((int )clock->dot < (int )limit->dot.min || (int )limit->dot.max < (int )clock->dot) { return (0); } else { } return (1); } } static bool i9xx_find_best_dpll(intel_limit_t const *limit , struct intel_crtc_state *crtc_state , int target , int refclk , intel_clock_t *match_clock , intel_clock_t *best_clock ) { struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; intel_clock_t clock ; int err ; bool tmp ; bool tmp___0 ; int this_err ; bool tmp___1 ; int tmp___2 ; long ret ; int __x___0 ; { __mptr = (struct drm_crtc const *)crtc_state->base.crtc; crtc = (struct intel_crtc *)__mptr; dev = crtc->base.dev; err = target; tmp___0 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___0) { tmp = intel_is_dual_link_lvds(dev); if ((int )tmp) { clock.p2 = limit->p2.p2_fast; } else { clock.p2 = limit->p2.p2_slow; } } else if ((int )limit->p2.dot_limit > target) { clock.p2 = limit->p2.p2_slow; } else { clock.p2 = limit->p2.p2_fast; } memset((void *)best_clock, 0, 36UL); clock.m1 = limit->m1.min; goto ldv_49407; ldv_49406: clock.m2 = limit->m2.min; goto ldv_49405; ldv_49404: ; if (clock.m2 >= clock.m1) { goto ldv_49391; } else { } clock.n = limit->n.min; goto ldv_49402; ldv_49401: clock.p1 = limit->p1.min; goto ldv_49399; ldv_49398: i9xx_clock(refclk, & clock); tmp___1 = intel_PLL_is_valid(dev, limit, (intel_clock_t const *)(& clock)); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto ldv_49393; } else { } if ((unsigned long )match_clock != (unsigned long )((intel_clock_t *)0) && clock.p != match_clock->p) { goto ldv_49393; } else { } __x___0 = clock.dot - target; ret = (long )(__x___0 < 0 ? - __x___0 : __x___0); this_err = (int )ret; if (this_err < err) { *best_clock = clock; err = this_err; } else { } ldv_49393: clock.p1 = clock.p1 + 1; ldv_49399: ; if (clock.p1 <= (int )limit->p1.max) { goto ldv_49398; } else { } clock.n = clock.n + 1; ldv_49402: ; if (clock.n <= (int )limit->n.max) { goto ldv_49401; } else { } clock.m2 = clock.m2 + 1; ldv_49405: ; if (clock.m2 <= (int )limit->m2.max) { goto ldv_49404; } else { } ldv_49391: clock.m1 = clock.m1 + 1; ldv_49407: ; if (clock.m1 <= (int )limit->m1.max) { goto ldv_49406; } else { } return (err != target); } } static bool pnv_find_best_dpll(intel_limit_t const *limit , struct intel_crtc_state *crtc_state , int target , int refclk , intel_clock_t *match_clock , intel_clock_t *best_clock ) { struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; intel_clock_t clock ; int err ; bool tmp ; bool tmp___0 ; int this_err ; bool tmp___1 ; int tmp___2 ; long ret ; int __x___0 ; { __mptr = (struct drm_crtc const *)crtc_state->base.crtc; crtc = (struct intel_crtc *)__mptr; dev = crtc->base.dev; err = target; tmp___0 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___0) { tmp = intel_is_dual_link_lvds(dev); if ((int )tmp) { clock.p2 = limit->p2.p2_fast; } else { clock.p2 = limit->p2.p2_slow; } } else if ((int )limit->p2.dot_limit > target) { clock.p2 = limit->p2.p2_slow; } else { clock.p2 = limit->p2.p2_fast; } memset((void *)best_clock, 0, 36UL); clock.m1 = limit->m1.min; goto ldv_49439; ldv_49438: clock.m2 = limit->m2.min; goto ldv_49436; ldv_49435: clock.n = limit->n.min; goto ldv_49433; ldv_49432: clock.p1 = limit->p1.min; goto ldv_49430; ldv_49429: pineview_clock(refclk, & clock); tmp___1 = intel_PLL_is_valid(dev, limit, (intel_clock_t const *)(& clock)); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto ldv_49424; } else { } if ((unsigned long )match_clock != (unsigned long )((intel_clock_t *)0) && clock.p != match_clock->p) { goto ldv_49424; } else { } __x___0 = clock.dot - target; ret = (long )(__x___0 < 0 ? - __x___0 : __x___0); this_err = (int )ret; if (this_err < err) { *best_clock = clock; err = this_err; } else { } ldv_49424: clock.p1 = clock.p1 + 1; ldv_49430: ; if (clock.p1 <= (int )limit->p1.max) { goto ldv_49429; } else { } clock.n = clock.n + 1; ldv_49433: ; if (clock.n <= (int )limit->n.max) { goto ldv_49432; } else { } clock.m2 = clock.m2 + 1; ldv_49436: ; if (clock.m2 <= (int )limit->m2.max) { goto ldv_49435; } else { } clock.m1 = clock.m1 + 1; ldv_49439: ; if (clock.m1 <= (int )limit->m1.max) { goto ldv_49438; } else { } return (err != target); } } static bool g4x_find_best_dpll(intel_limit_t const *limit , struct intel_crtc_state *crtc_state , int target , int refclk , intel_clock_t *match_clock , intel_clock_t *best_clock ) { struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; intel_clock_t clock ; int max_n ; bool found ; int err_most ; bool tmp ; bool tmp___0 ; int this_err ; bool tmp___1 ; int tmp___2 ; long ret ; int __x___0 ; { __mptr = (struct drm_crtc const *)crtc_state->base.crtc; crtc = (struct intel_crtc *)__mptr; dev = crtc->base.dev; err_most = (target >> 8) + (target >> 9); found = 0; tmp___0 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___0) { tmp = intel_is_dual_link_lvds(dev); if ((int )tmp) { clock.p2 = limit->p2.p2_fast; } else { clock.p2 = limit->p2.p2_slow; } } else if ((int )limit->p2.dot_limit > target) { clock.p2 = limit->p2.p2_slow; } else { clock.p2 = limit->p2.p2_fast; } memset((void *)best_clock, 0, 36UL); max_n = limit->n.max; clock.n = limit->n.min; goto ldv_49473; ldv_49472: clock.m1 = limit->m1.max; goto ldv_49470; ldv_49469: clock.m2 = limit->m2.max; goto ldv_49467; ldv_49466: clock.p1 = limit->p1.max; goto ldv_49464; ldv_49463: i9xx_clock(refclk, & clock); tmp___1 = intel_PLL_is_valid(dev, limit, (intel_clock_t const *)(& clock)); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto ldv_49458; } else { } __x___0 = clock.dot - target; ret = (long )(__x___0 < 0 ? - __x___0 : __x___0); this_err = (int )ret; if (this_err < err_most) { *best_clock = clock; err_most = this_err; max_n = clock.n; found = 1; } else { } ldv_49458: clock.p1 = clock.p1 - 1; ldv_49464: ; if (clock.p1 >= (int )limit->p1.min) { goto ldv_49463; } else { } clock.m2 = clock.m2 - 1; ldv_49467: ; if (clock.m2 >= (int )limit->m2.min) { goto ldv_49466; } else { } clock.m1 = clock.m1 - 1; ldv_49470: ; if (clock.m1 >= (int )limit->m1.min) { goto ldv_49469; } else { } clock.n = clock.n + 1; ldv_49473: ; if (clock.n <= max_n) { goto ldv_49472; } else { } return (found); } } static bool vlv_PLL_is_optimal(struct drm_device *dev , int target_freq , intel_clock_t const *calculated_clock , intel_clock_t const *best_clock , unsigned int best_error_ppm , unsigned int *error_ppm ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long ret ; int __x___0 ; u64 tmp___3 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { *error_ppm = 0U; return ((int )calculated_clock->p > (int )best_clock->p); } else { } } else { } __ret_warn_once = target_freq == 0; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 845, "WARN_ON_ONCE(!target_freq)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { return (0); } else { } __x___0 = target_freq - (int )calculated_clock->dot; ret = (long )(__x___0 < 0 ? - __x___0 : __x___0); tmp___3 = div_u64((unsigned long long )ret * 1000000ULL, (u32 )target_freq); *error_ppm = (unsigned int )tmp___3; if (*error_ppm <= 99U && (int )calculated_clock->p > (int )best_clock->p) { *error_ppm = 0U; return (1); } else { } return (*error_ppm + 10U < best_error_ppm); } } static bool vlv_find_best_dpll(intel_limit_t const *limit , struct intel_crtc_state *crtc_state , int target , int refclk , intel_clock_t *match_clock , intel_clock_t *best_clock ) { struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; intel_clock_t clock ; unsigned int bestppm ; int max_n ; int _min1 ; int _min2 ; bool found ; unsigned int ppm ; int __x ; int __d ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { __mptr = (struct drm_crtc const *)crtc_state->base.crtc; crtc = (struct intel_crtc *)__mptr; dev = crtc->base.dev; bestppm = 1000000U; _min1 = limit->n.max; _min2 = refclk / 19200; max_n = _min1 < (int )((int const )_min2) ? _min1 : (int const )_min2; found = 0; target = target * 5; memset((void *)best_clock, 0, 36UL); clock.n = limit->n.min; goto ldv_49538; ldv_49537: clock.p1 = limit->p1.max; goto ldv_49535; ldv_49534: clock.p2 = limit->p2.p2_fast; goto ldv_49532; ldv_49531: clock.p = clock.p1 * clock.p2; clock.m1 = limit->m1.min; goto ldv_49529; ldv_49528: __x = (clock.p * target) * clock.n; __d = clock.m1 * refclk; clock.m2 = __x > 0 ? (__d / 2 + __x) / __d : (__x - __d / 2) / __d; vlv_clock(refclk, & clock); tmp = intel_PLL_is_valid(dev, limit, (intel_clock_t const *)(& clock)); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_49527; } else { } tmp___1 = vlv_PLL_is_optimal(dev, target, (intel_clock_t const *)(& clock), (intel_clock_t const *)best_clock, bestppm, & ppm); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto ldv_49527; } else { } *best_clock = clock; bestppm = ppm; found = 1; ldv_49527: clock.m1 = clock.m1 + 1; ldv_49529: ; if (clock.m1 <= (int )limit->m1.max) { goto ldv_49528; } else { } clock.p2 = clock.p2 - (clock.p2 > 10 ? 2 : 1); ldv_49532: ; if (clock.p2 >= (int )limit->p2.p2_slow) { goto ldv_49531; } else { } clock.p1 = clock.p1 - 1; ldv_49535: ; if (clock.p1 >= (int )limit->p1.min) { goto ldv_49534; } else { } clock.n = clock.n + 1; ldv_49538: ; if (clock.n <= max_n) { goto ldv_49537; } else { } return (found); } } static bool chv_find_best_dpll(intel_limit_t const *limit , struct intel_crtc_state *crtc_state , int target , int refclk , intel_clock_t *match_clock , intel_clock_t *best_clock ) { struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; unsigned int best_error_ppm ; intel_clock_t clock ; uint64_t m2 ; int found ; unsigned int error_ppm ; int __d ; unsigned long long _tmp ; uint32_t __base ; uint32_t __rem ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { __mptr = (struct drm_crtc const *)crtc_state->base.crtc; crtc = (struct intel_crtc *)__mptr; dev = crtc->base.dev; found = 0; memset((void *)best_clock, 0, 36UL); best_error_ppm = 1000000U; clock.n = 1; clock.m1 = 2; target = target * 5; clock.p1 = limit->p1.max; goto ldv_49568; ldv_49567: clock.p2 = limit->p2.p2_fast; goto ldv_49565; ldv_49564: clock.p = clock.p1 * clock.p2; __d = clock.m1 * refclk; _tmp = (((unsigned long long )target * (unsigned long long )clock.p) * (unsigned long long )clock.n << 22) + (unsigned long long )(__d / 2); __base = (uint32_t )__d; __rem = (uint32_t )(_tmp % (unsigned long long )__base); _tmp = _tmp / (unsigned long long )__base; m2 = _tmp; if ((uint64_t )(2147483647 / clock.m1) < m2) { goto ldv_49563; } else { } clock.m2 = (int )m2; chv_clock(refclk, & clock); tmp = intel_PLL_is_valid(dev, limit, (intel_clock_t const *)(& clock)); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_49563; } else { } tmp___1 = vlv_PLL_is_optimal(dev, target, (intel_clock_t const *)(& clock), (intel_clock_t const *)best_clock, best_error_ppm, & error_ppm); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto ldv_49563; } else { } *best_clock = clock; best_error_ppm = error_ppm; found = 1; ldv_49563: clock.p2 = clock.p2 - (clock.p2 > 10 ? 2 : 1); ldv_49565: ; if (clock.p2 >= (int )limit->p2.p2_slow) { goto ldv_49564; } else { } clock.p1 = clock.p1 - 1; ldv_49568: ; if (clock.p1 >= (int )limit->p1.min) { goto ldv_49567; } else { } return (found != 0); } } bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state , int target_clock , intel_clock_t *best_clock ) { int refclk ; int tmp ; intel_limit_t const *tmp___0 ; bool tmp___1 ; { tmp = i9xx_get_refclk((struct intel_crtc_state const *)crtc_state, 0); refclk = tmp; tmp___0 = intel_limit(crtc_state, refclk); tmp___1 = chv_find_best_dpll(tmp___0, crtc_state, target_clock, refclk, (intel_clock_t *)0, best_clock); return (tmp___1); } } bool intel_crtc_active(struct drm_crtc *crtc ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; return ((bool )(((int )intel_crtc->active && (unsigned long )((crtc->primary)->state)->fb != (unsigned long )((struct drm_framebuffer *)0)) && (intel_crtc->config)->base.adjusted_mode.crtc_clock != 0)); } } enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv , enum pipe pipe ) { struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; { crtc = dev_priv->pipe_to_crtc_mapping[(int )pipe]; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; return ((intel_crtc->config)->cpu_transcoder); } } static bool pipe_dsl_stopped(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *dev_priv ; u32 reg ; u32 line1 ; u32 line2 ; u32 line_mask ; struct drm_i915_private *__p ; uint32_t tmp ; unsigned long __ms ; unsigned long tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; reg = ((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458752U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { line_mask = 4095U; } else { line_mask = 8191U; } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); line1 = tmp & line_mask; if (1) { __const_udelay(21475000UL); } else { __ms = 5UL; goto ldv_49607; ldv_49606: __const_udelay(4295000UL); ldv_49607: tmp___0 = __ms; __ms = __ms - 1UL; if (tmp___0 != 0UL) { goto ldv_49606; } else { } } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); line2 = tmp___1 & line_mask; return (line1 == line2); } } static void intel_wait_for_pipe_off(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum transcoder cpu_transcoder ; enum pipe pipe ; int reg ; int __ret_warn_on ; long tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; int __ret_warn_on___0 ; long tmp___4 ; unsigned long timeout_____0 ; unsigned long tmp___5 ; int ret_____0 ; bool tmp___6 ; int tmp___7 ; bool tmp___8 ; bool tmp___9 ; int tmp___10 ; struct drm_i915_private *__p ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; cpu_transcoder = (crtc->config)->cpu_transcoder; pipe = crtc->pipe; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U); tmp___0 = msecs_to_jiffies(100U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_49633; ldv_49632: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___1 & 1073741824U) != 0U) { ret__ = -110; } else { } goto ldv_49631; } else { } tmp___2 = drm_can_sleep___5(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_49633: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___3 & 1073741824U) != 0U) { goto ldv_49632; } else { } ldv_49631: ; if (ret__ != 0) { __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1064, "pipe_off wait timed out\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } } else { tmp___5 = msecs_to_jiffies(100U); timeout_____0 = (tmp___5 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_49647; ldv_49646: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___6 = pipe_dsl_stopped(dev, pipe); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } if (tmp___7) { ret_____0 = -110; } else { } goto ldv_49645; } else { } tmp___8 = drm_can_sleep___5(); if ((int )tmp___8) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_49647: tmp___9 = pipe_dsl_stopped(dev, pipe); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } if (tmp___10) { goto ldv_49646; } else { } ldv_49645: ; if (ret_____0 != 0) { __ret_warn_on___0 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1068, "pipe_off wait timed out\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { } } return; } } bool ibx_digital_port_connected(struct drm_i915_private *dev_priv , struct intel_digital_port *port ) { u32 bit ; struct drm_i915_private *__p ; uint32_t tmp ; { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p->pch_type == 1U) { switch ((unsigned int )port->port) { case 1U: bit = 256U; goto ldv_49663; case 2U: bit = 512U; goto ldv_49663; case 3U: bit = 1024U; goto ldv_49663; default: ; return (1); } ldv_49663: ; } else { switch ((unsigned int )port->port) { case 1U: bit = 2097152U; goto ldv_49668; case 2U: bit = 4194304U; goto ldv_49668; case 3U: bit = 8388608U; goto ldv_49668; default: ; return (1); } ldv_49668: ; } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802816L, 1); return ((tmp & bit) != 0U); } } static char const *state_string(bool enabled ) { { return ((int )enabled ? "on" : "off"); } } void assert_pll(struct drm_i915_private *dev_priv , enum pipe pipe , bool state ) { int reg ; u32 val ; bool cur_state ; int __ret_warn_on ; int __ret_warn_on___0 ; char const *tmp ; char const *tmp___0 ; long tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; long tmp___4 ; { reg = (int )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); cur_state = (int )val < 0; __ret_warn_on = (int )cur_state != (int )state; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { tmp = state_string((int )cur_state); tmp___0 = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1135, "PLL state assertion failure (expected %s, current %s)\n", tmp___0, tmp); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___2 = state_string((int )cur_state); tmp___3 = state_string((int )state); drm_err("PLL state assertion failure (expected %s, current %s)\n", tmp___3, tmp___2); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void assert_dsi_pll(struct drm_i915_private *dev_priv , bool state ) { u32 val ; bool cur_state ; int __ret_warn_on ; int __ret_warn_on___0 ; char const *tmp ; char const *tmp___0 ; long tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; long tmp___4 ; { mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_cck_read(dev_priv, 72U); mutex_unlock(& dev_priv->sb_lock); cur_state = (val & 2147483648U) != 0U; __ret_warn_on = (int )cur_state != (int )state; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { tmp = state_string((int )cur_state); tmp___0 = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1151, "DSI PLL state assertion failure (expected %s, current %s)\n", tmp___0, tmp); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___2 = state_string((int )cur_state); tmp___3 = state_string((int )state); drm_err("DSI PLL state assertion failure (expected %s, current %s)\n", tmp___3, tmp___2); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)(crtc->base.dev)->dev_private; if ((int )(crtc->config)->shared_dpll < 0) { return ((struct intel_shared_dpll *)0); } else { } return ((struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )(crtc->config)->shared_dpll); } } void assert_shared_dpll(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll , bool state ) { bool cur_state ; struct intel_dpll_hw_state hw_state ; int __ret_warn_on ; char const *tmp ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; int __ret_warn_on___1 ; char const *tmp___2 ; char const *tmp___3 ; long tmp___4 ; char const *tmp___5 ; char const *tmp___6 ; long tmp___7 ; { __ret_warn_on = (unsigned long )pll == (unsigned long )((struct intel_shared_dpll *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { tmp = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1176, "asserting DPLL %s with no DPLL\n", tmp); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return; } else { } cur_state = (*(pll->get_hw_state))(dev_priv, pll, & hw_state); __ret_warn_on___0 = (int )cur_state != (int )state; tmp___7 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___7 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___1 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { tmp___2 = state_string((int )cur_state); tmp___3 = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1182, "%s assertion failure (expected %s, current %s)\n", pll->name, tmp___3, tmp___2); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); } else { tmp___5 = state_string((int )cur_state); tmp___6 = state_string((int )state); drm_err("%s assertion failure (expected %s, current %s)\n", pll->name, tmp___6, tmp___5); } } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); return; } } static void assert_fdi_tx(struct drm_i915_private *dev_priv , enum pipe pipe , bool state ) { int reg ; u32 val ; bool cur_state ; enum transcoder cpu_transcoder ; enum transcoder tmp ; struct drm_i915_private *__p ; int __ret_warn_on ; int __ret_warn_on___0 ; char const *tmp___0 ; char const *tmp___1 ; long tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; long tmp___5 ; { tmp = intel_pipe_to_cpu_transcoder(dev_priv, pipe); cpu_transcoder = tmp; __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { reg = (int )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); cur_state = (int )val < 0; } else { reg = (int )pipe * 4096 + 393472; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); cur_state = (int )val < 0; } __ret_warn_on = (int )cur_state != (int )state; tmp___5 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___5 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { tmp___0 = state_string((int )cur_state); tmp___1 = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1206, "FDI TX state assertion failure (expected %s, current %s)\n", tmp___1, tmp___0); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___3 = state_string((int )cur_state); tmp___4 = state_string((int )state); drm_err("FDI TX state assertion failure (expected %s, current %s)\n", tmp___4, tmp___3); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void assert_fdi_rx(struct drm_i915_private *dev_priv , enum pipe pipe , bool state ) { int reg ; u32 val ; bool cur_state ; int __ret_warn_on ; int __ret_warn_on___0 ; char const *tmp ; char const *tmp___0 ; long tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; long tmp___4 ; { reg = (int )pipe * 4096 + 983052; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); cur_state = (int )val < 0; __ret_warn_on = (int )cur_state != (int )state; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { tmp = state_string((int )cur_state); tmp___0 = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1223, "FDI RX state assertion failure (expected %s, current %s)\n", tmp___0, tmp); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___2 = state_string((int )cur_state); tmp___3 = state_string((int )state); drm_err("FDI RX state assertion failure (expected %s, current %s)\n", tmp___3, tmp___2); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv , enum pipe pipe ) { int reg ; u32 val ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int __ret_warn_on ; int __ret_warn_on___0 ; long tmp ; long tmp___0 ; { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) == 5U) { return; } else { } __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) != 0U) { return; } else { } reg = (int )pipe * 4096 + 393472; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __ret_warn_on = (val & 16384U) == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1244, "FDI TX PLL assertion failure, should be active but is disabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("FDI TX PLL assertion failure, should be active but is disabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } void assert_fdi_rx_pll(struct drm_i915_private *dev_priv , enum pipe pipe , bool state ) { int reg ; u32 val ; bool cur_state ; int __ret_warn_on ; int __ret_warn_on___0 ; char const *tmp ; char const *tmp___0 ; long tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; long tmp___4 ; { reg = (int )pipe * 4096 + 983052; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); cur_state = (val & 8192U) != 0U; __ret_warn_on = (int )cur_state != (int )state; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { tmp = state_string((int )cur_state); tmp___0 = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1259, "FDI RX PLL assertion failure (expected %s, current %s)\n", tmp___0, tmp); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___2 = state_string((int )cur_state); tmp___3 = state_string((int )state); drm_err("FDI RX PLL assertion failure (expected %s, current %s)\n", tmp___3, tmp___2); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } void assert_panel_unlocked(struct drm_i915_private *dev_priv , enum pipe pipe ) { struct drm_device *dev ; int pp_reg ; u32 val ; enum pipe panel_pipe ; bool locked ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp ; long tmp___0 ; u32 port_sel ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int __ret_warn_on___0 ; int __ret_warn_on___1 ; long tmp___4 ; long tmp___5 ; { dev = dev_priv->dev; panel_pipe = 0; locked = 1; __p = to_i915((struct drm_device const *)dev); __ret_warn_on = (unsigned int )*((unsigned char *)__p + 46UL) != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1271, "WARN_ON(HAS_DDI(dev))"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type != 0U) { pp_reg = 815620; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815624L, 1); port_sel = tmp___1 & 3221225472U; if (port_sel == 0U) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 921984L, 1); if ((tmp___2 & 1073741824U) != 0U) { panel_pipe = 1; } else { } } else { } } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { pp_reg = (int )pipe * 256 + 1970692; panel_pipe = pipe; } else { pp_reg = 397828; tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397696L, 1); if ((tmp___3 & 1073741824U) != 0U) { panel_pipe = 1; } else { } } } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_reg, 1); if ((val & 1U) == 0U || (val & 4294901760U) == 2882338816U) { locked = 0; } else { } __ret_warn_on___0 = (int )panel_pipe == (int )pipe && (int )locked; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___1 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1301, "panel assertion failure, pipe %c regs locked\n", (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); } else { drm_err("panel assertion failure, pipe %c regs locked\n", (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); return; } } static void assert_cursor(struct drm_i915_private *dev_priv , enum pipe pipe , bool state ) { struct drm_device *dev ; bool cur_state ; uint32_t tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int __ret_warn_on ; int __ret_warn_on___0 ; char const *tmp___1 ; char const *tmp___2 ; long tmp___3 ; char const *tmp___4 ; char const *tmp___5 ; long tmp___6 ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 9570U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 458880L, 1); cur_state = (tmp & 2147483648U) != 0U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 9586U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 458880L, 1); cur_state = (tmp & 2147483648U) != 0U; } else { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[(int )pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458880U), 1); cur_state = (tmp___0 & 39U) != 0U; } } __ret_warn_on = (int )cur_state != (int )state; tmp___6 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___6 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { tmp___1 = state_string((int )cur_state); tmp___2 = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1317, "cursor on pipe %c assertion failure (expected %s, current %s)\n", (int )pipe + 65, tmp___2, tmp___1); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___4 = state_string((int )cur_state); tmp___5 = state_string((int )state); drm_err("cursor on pipe %c assertion failure (expected %s, current %s)\n", (int )pipe + 65, tmp___5, tmp___4); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } void assert_pipe(struct drm_i915_private *dev_priv , enum pipe pipe , bool state ) { int reg ; u32 val ; bool cur_state ; enum transcoder cpu_transcoder ; enum transcoder tmp ; bool tmp___0 ; int tmp___1 ; int __ret_warn_on ; int __ret_warn_on___0 ; char const *tmp___2 ; char const *tmp___3 ; long tmp___4 ; char const *tmp___5 ; char const *tmp___6 ; long tmp___7 ; { tmp = intel_pipe_to_cpu_transcoder(dev_priv, pipe); cpu_transcoder = tmp; if (((int )pipe == 0 && (int )dev_priv->quirks & 1) || ((int )pipe == 1 && (dev_priv->quirks & 16UL) != 0UL)) { state = 1; } else { } tmp___0 = intel_display_power_is_enabled(dev_priv, (unsigned int )cpu_transcoder != 3U ? (enum intel_display_power_domain )((unsigned int )cpu_transcoder + 6U) : 9); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { cur_state = 0; } else { reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); cur_state = (int )val < 0; } __ret_warn_on = (int )cur_state != (int )state; tmp___7 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___7 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { tmp___2 = state_string((int )cur_state); tmp___3 = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1347, "pipe %c assertion failure (expected %s, current %s)\n", (int )pipe + 65, tmp___3, tmp___2); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___5 = state_string((int )cur_state); tmp___6 = state_string((int )state); drm_err("pipe %c assertion failure (expected %s, current %s)\n", (int )pipe + 65, tmp___6, tmp___5); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void assert_plane(struct drm_i915_private *dev_priv , enum plane plane , bool state ) { int reg ; u32 val ; bool cur_state ; int __ret_warn_on ; int __ret_warn_on___0 ; char const *tmp ; char const *tmp___0 ; long tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; long tmp___4 ; { reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); cur_state = (int )val < 0; __ret_warn_on = (int )cur_state != (int )state; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { tmp = state_string((int )cur_state); tmp___0 = state_string((int )state); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1362, "plane %c assertion failure (expected %s, current %s)\n", (unsigned int )plane + 65U, tmp___0, tmp); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___2 = state_string((int )cur_state); tmp___3 = state_string((int )state); drm_err("plane %c assertion failure (expected %s, current %s)\n", (unsigned int )plane + 65U, tmp___3, tmp___2); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void assert_planes_disabled(struct drm_i915_private *dev_priv , enum pipe pipe ) { struct drm_device *dev ; int reg ; int i ; u32 val ; int cur_pipe ; int __ret_warn_on ; int __ret_warn_on___0 ; long tmp ; long tmp___0 ; struct drm_i915_private *__p ; int __ret_warn_on___1 ; int __ret_warn_on___2 ; long tmp___1 ; long tmp___2 ; struct drm_i915_private *__p___0 ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __ret_warn_on = (int )val < 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1382, "plane %c assertion failure, should be disabled but not\n", (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("plane %c assertion failure, should be disabled but not\n", (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } else { } i = 0; goto ldv_49891; ldv_49890: reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); cur_pipe = (int )((val & 50331648U) >> 24); __ret_warn_on___1 = (int )val < 0 && (int )pipe == cur_pipe; tmp___2 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___2 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1394, "plane %c assertion failure, should be off on pipe %c but is still active\n", i + 65, (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("plane %c assertion failure, should be off on pipe %c but is still active\n", i + 65, (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); i = i + 1; ldv_49891: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > i) { goto ldv_49890; } else { } return; } } static void assert_sprites_disabled(struct drm_i915_private *dev_priv , enum pipe pipe ) { struct drm_device *dev ; int reg ; int sprite ; u32 val ; int __ret_warn_on ; int __ret_warn_on___0 ; long tmp ; long tmp___0 ; struct drm_i915_private *__p ; int __ret_warn_on___1 ; int __ret_warn_on___2 ; struct drm_i915_private *__p___0 ; long tmp___1 ; struct drm_i915_private *__p___1 ; long tmp___2 ; struct drm_i915_private *__p___2 ; int __ret_warn_on___3 ; int __ret_warn_on___4 ; long tmp___3 ; long tmp___4 ; int __ret_warn_on___5 ; int __ret_warn_on___6 ; long tmp___5 ; long tmp___6 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; { dev = dev_priv->dev; __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 8U) { sprite = 0; goto ldv_49918; ldv_49917: val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe * 4096 + (((int )pipe * 4096 + 459392) + ((int )pipe * -4096 + -459136)) * sprite) + 459136), 1); __ret_warn_on = (int )val < 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1410, "plane %d assertion failure, should be off on pipe %c but is still active\n", sprite, (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("plane %d assertion failure, should be off on pipe %c but is still active\n", sprite, (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); sprite = sprite + 1; ldv_49918: __p = dev_priv; if ((int )__p->info.num_sprites[(int )pipe] > sprite) { goto ldv_49917; } else { } } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { sprite = 0; goto ldv_49949; ldv_49948: reg = ((int )pipe * 2 + sprite) * 256 + 2040192; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __ret_warn_on___1 = (int )val < 0; tmp___2 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___2 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___1 != 0L) { __p___0 = to_i915((struct drm_device const *)dev); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1418, "sprite %c assertion failure, should be off on pipe %c but is still active\n", ((int )__p___0->info.num_sprites[(int )pipe] * (int )pipe + sprite) + 65, (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { __p___1 = to_i915((struct drm_device const *)dev); drm_err("sprite %c assertion failure, should be off on pipe %c but is still active\n", ((int )__p___1->info.num_sprites[(int )pipe] * (int )pipe + sprite) + 65, (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); sprite = sprite + 1; ldv_49949: __p___2 = dev_priv; if ((int )__p___2->info.num_sprites[(int )pipe] > sprite) { goto ldv_49948; } else { } } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 6U) { reg = (int )pipe * 4096 + 459392; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __ret_warn_on___3 = (int )val < 0; tmp___4 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___4 = 1; tmp___3 = ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1425, "sprite %c assertion failure, should be off on pipe %c but is still active\n", (int )pipe + 65, (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); } else { drm_err("sprite %c assertion failure, should be off on pipe %c but is still active\n", (int )pipe + 65, (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 4U) { reg = (int )pipe * 4096 + 467328; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __ret_warn_on___5 = (int )val < 0; tmp___6 = ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); if (tmp___6 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___6 = 1; tmp___5 = ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1431, "sprite %c assertion failure, should be off on pipe %c but is still active\n", (int )pipe + 65, (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); } else { drm_err("sprite %c assertion failure, should be off on pipe %c but is still active\n", (int )pipe + 65, (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); } else { } } } } return; } } static void assert_vblank_disabled(struct drm_crtc *crtc ) { int __ret_warn_on ; int tmp ; int __ret_warn_on___0 ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = drm_crtc_vblank_get(crtc); __ret_warn_on = tmp == 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1437, "WARN_ON(drm_crtc_vblank_get(crtc) == 0)\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("WARN_ON(drm_crtc_vblank_get(crtc) == 0)\n"); } } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { drm_crtc_vblank_put(crtc); } else { } return; } } static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv ) { u32 val ; bool enabled ; int __ret_warn_on ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp ; int __ret_warn_on___0 ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___1 ; int __ret_warn_on___2 ; long tmp___2 ; long tmp___3 ; { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p->pch_type != 1U) { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p___0->pch_type != 2U) { tmp = 1; } else { tmp = 0; } } else { tmp = 0; } __ret_warn_on = tmp; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1446, "WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)))\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)))\n"); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811520L, 1); enabled = (val & 8064U) != 0U; __ret_warn_on___1 = ! enabled; tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1451, "PCH refclk assertion failure, should be active but is disabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("PCH refclk assertion failure, should be active but is disabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); return; } } static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv , enum pipe pipe ) { int reg ; u32 val ; bool enabled ; int __ret_warn_on ; int __ret_warn_on___0 ; long tmp ; long tmp___0 ; { reg = (int )pipe * 4096 + 983048; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); enabled = (int )val < 0; __ret_warn_on = (int )enabled; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1466, "transcoder assertion failed, should be off on pipe %c but is still active\n", (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("transcoder assertion failed, should be off on pipe %c but is still active\n", (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static bool dp_pipe_enabled(struct drm_i915_private *dev_priv , enum pipe pipe , u32 port_sel , u32 val ) { u32 trans_dp_ctl_reg ; u32 trans_dp_ctl ; uint32_t tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { if ((int )val >= 0) { return (0); } else { } __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p___1->pch_type == 2U) { trans_dp_ctl_reg = (u32 )((int )pipe * 4096 + 918272); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )trans_dp_ctl_reg, 1); trans_dp_ctl = tmp; if ((trans_dp_ctl & 1610612736U) != port_sel) { return (0); } else { } } else { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { if ((val & 196608U) != (u32 )((int )pipe << 16)) { return (0); } else { } } else { goto _L; } } else _L: /* CIL Label */ if ((val & 1073741824U) != (u32 )((int )pipe << 30)) { return (0); } else { } } return (1); } } static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv , enum pipe pipe , u32 val ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { if ((int )val >= 0) { return (0); } else { } __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p___1->pch_type == 2U) { if ((val & 1610612736U) != (u32 )((int )pipe << 29)) { return (0); } else { } } else { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { if ((val & 50331648U) != (u32 )((int )pipe << 24)) { return (0); } else { } } else { goto _L; } } else _L: /* CIL Label */ if ((val & 1073741824U) != (u32 )((int )pipe << 30)) { return (0); } else { } } return (1); } } static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv , enum pipe pipe , u32 val ) { struct drm_i915_private *__p ; { if ((int )val >= 0) { return (0); } else { } __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p->pch_type == 2U) { if ((val & 1610612736U) != (u32 )((int )pipe << 29)) { return (0); } else { } } else if ((val & 1073741824U) != (u32 )((int )pipe << 30)) { return (0); } else { } return (1); } } static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv , enum pipe pipe , u32 val ) { struct drm_i915_private *__p ; { if ((int )val >= 0) { return (0); } else { } __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p->pch_type == 2U) { if ((val & 1610612736U) != (u32 )((int )pipe << 29)) { return (0); } else { } } else if ((val & 1073741824U) != (u32 )((int )pipe << 30)) { return (0); } else { } return (1); } } static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv , enum pipe pipe , int reg , u32 port_sel ) { u32 val ; uint32_t tmp ; int __ret_warn_on ; bool tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; long tmp___2 ; int __ret_warn_on___1 ; struct drm_i915_private *__p ; int __ret_warn_on___2 ; long tmp___3 ; long tmp___4 ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp; tmp___0 = dp_pipe_enabled(dev_priv, pipe, port_sel, val); __ret_warn_on = (int )tmp___0; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1546, "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p = to_i915((struct drm_device const *)dev_priv->dev); __ret_warn_on___1 = ((unsigned int )__p->pch_type == 1U && (int )val >= 0) && (val & 1073741824U) != 0U; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___3 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1550, "IBX PCH dp port still using transcoder B\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("IBX PCH dp port still using transcoder B\n"); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); return; } } static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv , enum pipe pipe , int reg ) { u32 val ; uint32_t tmp ; int __ret_warn_on ; bool tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; long tmp___2 ; int __ret_warn_on___1 ; struct drm_i915_private *__p ; int __ret_warn_on___2 ; long tmp___3 ; long tmp___4 ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp; tmp___0 = hdmi_pipe_enabled(dev_priv, pipe, val); __ret_warn_on = (int )tmp___0; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1559, "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", reg, (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", reg, (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p = to_i915((struct drm_device const *)dev_priv->dev); __ret_warn_on___1 = ((unsigned int )__p->pch_type == 1U && (int )val >= 0) && (val & 1073741824U) != 0U; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___3 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1563, "IBX PCH hdmi port still using transcoder B\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("IBX PCH hdmi port still using transcoder B\n"); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); return; } } static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv , enum pipe pipe ) { int reg ; u32 val ; int __ret_warn_on ; bool tmp ; int __ret_warn_on___0 ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___1 ; bool tmp___2 ; int __ret_warn_on___2 ; long tmp___3 ; long tmp___4 ; { assert_pch_dp_disabled(dev_priv, pipe, 934144, 0U); assert_pch_dp_disabled(dev_priv, pipe, 934400, 536870912U); assert_pch_dp_disabled(dev_priv, pipe, 934656, 1073741824U); reg = 921856; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); tmp = adpa_pipe_enabled(dev_priv, pipe, val); __ret_warn_on = (int )tmp; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1580, "PCH VGA enabled on transcoder %c, should be disabled\n", (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("PCH VGA enabled on transcoder %c, should be disabled\n", (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); reg = 921984; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); tmp___2 = lvds_pipe_enabled(dev_priv, pipe, val); __ret_warn_on___1 = (int )tmp___2; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___3 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1586, "PCH LVDS enabled on transcoder %c, should be disabled\n", (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("PCH LVDS enabled on transcoder %c, should be disabled\n", (int )pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); assert_pch_hdmi_disabled(dev_priv, pipe, 921920); assert_pch_hdmi_disabled(dev_priv, pipe, 921936); assert_pch_hdmi_disabled(dev_priv, pipe, 921952); return; } } static void intel_init_dpio(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { return; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { dev_priv->dpio_phy_iosf_port[0] = 26; dev_priv->dpio_phy_iosf_port[1] = 18; } else { dev_priv->dpio_phy_iosf_port[0] = 18; } } else { dev_priv->dpio_phy_iosf_port[0] = 18; } return; } } static void vlv_enable_pll(struct intel_crtc *crtc , struct intel_crtc_state const *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int reg ; u32 dpll ; struct drm_i915_private *__p ; long tmp ; struct drm_i915_private *__p___0 ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; reg = (int )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)); dpll = pipe_config->dpll_hw_state.dpll; assert_pipe(dev_priv, crtc->pipe, 0); __p = to_i915((struct drm_device const *)dev_priv->dev); tmp = ldv__builtin_expect((unsigned int )*((unsigned char *)__p + 45UL) == 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (1624), "i" (12UL)); ldv_50176: ; goto ldv_50176; } else { } __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { assert_panel_unlocked(dev_priv, crtc->pipe); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); tmp___0 = msecs_to_jiffies(1U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50193; ldv_50192: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___1 & 32768U) == 0U) { ret__ = -110; } else { } goto ldv_50191; } else { } tmp___2 = drm_can_sleep___5(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_50193: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___3 & 32768U) == 0U) { goto ldv_50192; } else { } ldv_50191: ; if (ret__ != 0) { drm_err("DPLL %d failed to lock\n", (int )crtc->pipe); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24604U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24608U : (unsigned int )dev_priv->info.display_mmio_offset + 24636U)), pipe_config->dpll_hw_state.dpll_md, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24604U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24608U : (unsigned int )dev_priv->info.display_mmio_offset + 24636U)), 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); return; } } static void chv_enable_pll(struct intel_crtc *crtc , struct intel_crtc_state const *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; enum dpio_channel port ; int tmp ; u32 tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp___1 ; long tmp___2 ; unsigned long timeout__ ; unsigned long tmp___3 ; int ret__ ; uint32_t tmp___4 ; bool tmp___5 ; uint32_t tmp___6 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; tmp = vlv_pipe_to_channel((enum pipe )pipe); port = (enum dpio_channel )tmp; assert_pipe(dev_priv, crtc->pipe, 0); __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { tmp___1 = 1; } else { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 8U) { tmp___1 = 1; } else { tmp___1 = 0; } } tmp___2 = ldv__builtin_expect((long )tmp___1, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (1663), "i" (12UL)); ldv_50216: ; goto ldv_50216; } else { } mutex_lock_nested(& dev_priv->sb_lock, 0U); tmp___0 = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 4294967116U + 33080U)); tmp___0 = tmp___0 | 8192U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 4294967116U + 33080U), tmp___0); mutex_unlock(& dev_priv->sb_lock); __const_udelay(4295UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : (pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), pipe_config->dpll_hw_state.dpll, 1); tmp___3 = msecs_to_jiffies(1U); timeout__ = (tmp___3 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50227; ldv_50226: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : (pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); if ((tmp___4 & 32768U) == 0U) { ret__ = -110; } else { } goto ldv_50225; } else { } tmp___5 = drm_can_sleep___5(); if ((int )tmp___5) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_50227: tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : (pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); if ((tmp___6 & 32768U) == 0U) { goto ldv_50226; } else { } ldv_50225: ; if (ret__ != 0) { drm_err("PLL %d failed to lock\n", pipe); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24604U : (pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24608U : (unsigned int )dev_priv->info.display_mmio_offset + 24636U)), pipe_config->dpll_hw_state.dpll_md, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24604U : (pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24608U : (unsigned int )dev_priv->info.display_mmio_offset + 24636U)), 0); return; } } static int intel_num_dvo_pipes(struct drm_device *dev ) { struct intel_crtc *crtc ; int count ; struct list_head const *__mptr ; bool tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; { count = 0; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_50239; ldv_50238: ; if ((int )crtc->active) { tmp = intel_pipe_has_type(crtc, 2); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } else { tmp___0 = 0; } count = tmp___0 + count; __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_50239: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50238; } else { } return (count); } } static void i9xx_enable_pll(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int reg ; u32 dpll ; struct drm_i915_private *__p ; long tmp ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; uint32_t tmp___0 ; struct drm_i915_private *__p___2 ; int tmp___1 ; struct drm_i915_private *__p___3 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; reg = (int )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)); dpll = (crtc->config)->dpll_hw_state.dpll; assert_pipe(dev_priv, crtc->pipe, 0); __p = to_i915((struct drm_device const *)dev); tmp = ldv__builtin_expect((unsigned int )((unsigned char )__p->info.gen) > 4U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (1713), "i" (12UL)); ldv_50254: ; goto ldv_50254; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) != 13687U) { assert_panel_unlocked(dev_priv, crtc->pipe); } else { } } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) == 13687U) { tmp___1 = intel_num_dvo_pipes(dev); if (tmp___1 > 0) { dpll = dpll | 1073741824U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe != 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe != 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), tmp___0 | 1073741824U, 1); } else { } } else { } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 3U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24604U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24608U : (unsigned int )dev_priv->info.display_mmio_offset + 24636U)), (crtc->config)->dpll_hw_state.dpll_md, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dpll, 1); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); return; } } static void i9xx_disable_pll(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum pipe pipe ; uint32_t tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; bool tmp___1 ; int tmp___2 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 13687U) { tmp___1 = intel_pipe_has_type(crtc, 2); if ((int )tmp___1) { tmp___2 = intel_num_dvo_pipes(dev); if (tmp___2 == 1) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24600U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24600U), tmp & 3221225471U, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24596U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 24596U), tmp___0 & 3221225471U, 1); } else { } } else { } } else { } if (((int )pipe == 0 && (int )dev_priv->quirks & 1) || ((int )pipe == 1 && (dev_priv->quirks & 16UL) != 0UL)) { return; } else { } assert_pipe(dev_priv, pipe, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 0); return; } } static void vlv_disable_pll(struct drm_i915_private *dev_priv , enum pipe pipe ) { u32 val ; { val = 0U; assert_pipe(dev_priv, pipe, 0); if ((int )pipe == 1) { val = 536887296U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 0); return; } } static void chv_disable_pll(struct drm_i915_private *dev_priv , enum pipe pipe ) { enum dpio_channel port ; int tmp ; u32 val ; { tmp = vlv_pipe_to_channel(pipe); port = (enum dpio_channel )tmp; assert_pipe(dev_priv, pipe, 0); val = 536879104U; if ((int )pipe != 0) { val = val | 16384U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 0); mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )port * 4294967116U + 33080U)); val = val & 4294959103U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )port * 4294967116U + 33080U), val); if ((int )pipe != 1) { val = vlv_dpio_read(dev_priv, pipe, 33044); val = val & 4279238655U; vlv_dpio_write(dev_priv, pipe, 33044, val); } else { val = vlv_dpio_read(dev_priv, pipe, 32900); val = val & 4293001215U; vlv_dpio_write(dev_priv, pipe, 32900, val); } mutex_unlock(& dev_priv->sb_lock); return; } } void vlv_wait_port_ready(struct drm_i915_private *dev_priv , struct intel_digital_port *dport , unsigned int expected_mask ) { u32 port_mask ; int dpll_reg ; int __ret_warn_on ; uint32_t tmp ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; { switch ((unsigned int )dport->port) { case 1U: port_mask = 15U; dpll_reg = (int )((unsigned int )dev_priv->info.display_mmio_offset + 24596U); goto ldv_50310; case 2U: port_mask = 240U; dpll_reg = (int )((unsigned int )dev_priv->info.display_mmio_offset + 24596U); expected_mask = expected_mask << 4; goto ldv_50310; case 3U: port_mask = 15U; dpll_reg = 1598016; goto ldv_50310; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (1873), "i" (12UL)); ldv_50314: ; goto ldv_50314; } ldv_50310: tmp___1 = msecs_to_jiffies(1000U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50325; ldv_50324: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dpll_reg, 1); if ((tmp___2 & port_mask) != expected_mask) { ret__ = -110; } else { } goto ldv_50323; } else { } tmp___3 = drm_can_sleep___5(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_50325: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dpll_reg, 1); if ((tmp___4 & port_mask) != expected_mask) { goto ldv_50324; } else { } ldv_50323: ; if (ret__ != 0) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dpll_reg, 1); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1878, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", (unsigned int )dport->port + 65U, tmp & port_mask, expected_mask); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } return; } } static void intel_prepare_shared_dpll(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_shared_dpll *pll ; struct intel_shared_dpll *tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; long tmp___3 ; int __ret_warn_on___1 ; long tmp___4 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_crtc_to_shared_dpll(crtc); pll = tmp; __ret_warn_on = (unsigned long )pll == (unsigned long )((struct intel_shared_dpll *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1887, "WARN_ON(pll == NULL)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return; } else { } __ret_warn_on___0 = pll->config.crtc_mask == 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1890, "WARN_ON(!pll->config.crtc_mask)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (pll->active == 0) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_prepare_shared_dpll", "setting up %s\n", pll->name); } else { } __ret_warn_on___1 = (int )pll->on; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1893, "WARN_ON(pll->on)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); assert_shared_dpll(dev_priv, pll, 0); (*(pll->mode_set))(dev_priv, pll); } else { } return; } } static void intel_enable_shared_dpll(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_shared_dpll *pll ; struct intel_shared_dpll *tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; int __ret_warn_on___1 ; long tmp___5 ; int tmp___6 ; int __ret_warn_on___2 ; long tmp___7 ; long tmp___8 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_crtc_to_shared_dpll(crtc); pll = tmp; __ret_warn_on = (unsigned long )pll == (unsigned long )((struct intel_shared_dpll *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1914, "WARN_ON(pll == NULL)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return; } else { } __ret_warn_on___0 = pll->config.crtc_mask == 0U; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1917, "WARN_ON(pll->config.crtc_mask == 0)"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { return; } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_enable_shared_dpll", "enable %s (active %d, on? %d) for crtc %d\n", pll->name, pll->active, (int )pll->on, crtc->base.base.id); } else { } tmp___6 = pll->active; pll->active = pll->active + 1; if (tmp___6 != 0) { __ret_warn_on___1 = ! pll->on; tmp___5 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1925, "WARN_ON(!pll->on)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); assert_shared_dpll(dev_priv, pll, 1); return; } else { } __ret_warn_on___2 = (int )pll->on; tmp___7 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___7 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1929, "WARN_ON(pll->on)"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); intel_display_power_get(dev_priv, 23); tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_enable_shared_dpll", "enabling %s\n", pll->name); } else { } (*(pll->enable))(dev_priv, pll); pll->on = 1; return; } } static void intel_disable_shared_dpll(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_shared_dpll *pll ; struct intel_shared_dpll *tmp ; struct drm_i915_private *__p ; long tmp___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; int __ret_warn_on___0 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; int __ret_warn_on___1 ; long tmp___6 ; long tmp___7 ; int __ret_warn_on___2 ; long tmp___8 ; long tmp___9 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_crtc_to_shared_dpll(crtc); pll = tmp; __p = to_i915((struct drm_device const *)dev); tmp___0 = ldv__builtin_expect((unsigned int )((unsigned char )__p->info.gen) <= 4U, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (1945), "i" (12UL)); ldv_50369: ; goto ldv_50369; } else { } __ret_warn_on = (unsigned long )pll == (unsigned long )((struct intel_shared_dpll *)0); tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1946, "WARN_ON(pll == NULL)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return; } else { } __ret_warn_on___0 = pll->config.crtc_mask == 0U; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1949, "WARN_ON(pll->config.crtc_mask == 0)"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { return; } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_disable_shared_dpll", "disable %s (active %d, on? %d) for crtc %d\n", pll->name, pll->active, (int )pll->on, crtc->base.base.id); } else { } __ret_warn_on___1 = pll->active == 0; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1956, "WARN_ON(pll->active == 0)"); } else { } tmp___7 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___7 != 0L) { assert_shared_dpll(dev_priv, pll, 0); return; } else { } assert_shared_dpll(dev_priv, pll, 1); __ret_warn_on___2 = ! pll->on; tmp___8 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 1962, "WARN_ON(!pll->on)"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); pll->active = pll->active - 1; if (pll->active != 0) { return; } else { } tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("intel_disable_shared_dpll", "disabling %s\n", pll->name); } else { } (*(pll->disable))(dev_priv, pll); pll->on = 0; intel_display_power_put(dev_priv, 23); return; } } static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv , enum pipe pipe ) { struct drm_device *dev ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; uint32_t reg ; uint32_t val ; uint32_t pipeconf_val ; struct drm_i915_private *__p ; long tmp ; struct intel_shared_dpll *tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; bool tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; { dev = dev_priv->dev; crtc = dev_priv->pipe_to_crtc_mapping[(int )pipe]; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; __p = to_i915((struct drm_device const *)dev); tmp = ldv__builtin_expect((unsigned int )__p->pch_type == 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (1982), "i" (12UL)); ldv_50397: ; goto ldv_50397; } else { } tmp___0 = intel_crtc_to_shared_dpll(intel_crtc); assert_shared_dpll(dev_priv, tmp___0, 1); assert_fdi_tx(dev_priv, pipe, 1); assert_fdi_rx(dev_priv, pipe, 1); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { reg = (uint32_t )((int )pipe * 4096 + 983140); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = val | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); } else { } reg = (uint32_t )((int )pipe * 4096 + 983048); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); pipeconf_val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p___1->pch_type == 1U) { val = val & 4294967071U; val = (pipeconf_val & 224U) | val; } else { } val = val & 4280287231U; if ((pipeconf_val & 14680064U) == 6291456U) { __p___2 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p___2->pch_type == 1U) { tmp___1 = intel_pipe_has_type(intel_crtc, 3); if ((int )tmp___1) { val = val | 4194304U; } else { val = val | 6291456U; } } else { val = val | 6291456U; } } else { val = val; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val | 2147483648U, 1); tmp___2 = msecs_to_jiffies(100U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50426; ldv_50425: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___3 & 1073741824U) == 0U) { ret__ = -110; } else { } goto ldv_50424; } else { } tmp___4 = drm_can_sleep___5(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_50426: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___5 & 1073741824U) == 0U) { goto ldv_50425; } else { } ldv_50424: ; if (ret__ != 0) { drm_err("failed to enable transcoder %c\n", (int )pipe + 65); } else { } return; } } static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv , enum transcoder cpu_transcoder ) { u32 val ; u32 pipeconf_val ; struct drm_i915_private *__p ; long tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; { __p = to_i915((struct drm_device const *)dev_priv->dev); tmp = ldv__builtin_expect((unsigned int )__p->pch_type == 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (2035), "i" (12UL)); ldv_50440: ; goto ldv_50440; } else { } assert_fdi_tx(dev_priv, (enum pipe )cpu_transcoder, 1); assert_fdi_rx(dev_priv, 0, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983140L, 1); val = val | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983140L, val, 1); val = 2147483648U; pipeconf_val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); if ((pipeconf_val & 6291456U) == 6291456U) { val = val | 6291456U; } else { val = val; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983048L, val, 1); tmp___0 = msecs_to_jiffies(100U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50451; ldv_50450: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983048L, 1); if ((tmp___1 & 1073741824U) == 0U) { ret__ = -110; } else { } goto ldv_50449; } else { } tmp___2 = drm_can_sleep___5(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_50451: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983048L, 1); if ((tmp___3 & 1073741824U) == 0U) { goto ldv_50450; } else { } ldv_50449: ; if (ret__ != 0) { drm_err("Failed to enable PCH transcoder\n"); } else { } return; } } static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv , enum pipe pipe ) { struct drm_device *dev ; uint32_t reg ; uint32_t val ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; struct drm_i915_private *__p ; { dev = dev_priv->dev; assert_fdi_tx(dev_priv, pipe, 0); assert_fdi_rx(dev_priv, pipe, 0); assert_pch_ports_disabled(dev_priv, pipe); reg = (uint32_t )((int )pipe * 4096 + 983048); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); tmp = msecs_to_jiffies(50U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50470; ldv_50469: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___0 & 1073741824U) != 0U) { ret__ = -110; } else { } goto ldv_50468; } else { } tmp___1 = drm_can_sleep___5(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_50470: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___2 & 1073741824U) != 0U) { goto ldv_50469; } else { } ldv_50468: ; if (ret__ != 0) { drm_err("failed to disable transcoder %c\n", (int )pipe + 65); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 1U) { reg = (uint32_t )((int )pipe * 4096 + 983140); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); } else { } return; } } static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv ) { u32 val ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983048L, 1); val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983048L, val, 1); tmp = msecs_to_jiffies(50U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50492; ldv_50491: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983048L, 1); if ((tmp___0 & 1073741824U) != 0U) { ret__ = -110; } else { } goto ldv_50490; } else { } tmp___1 = drm_can_sleep___5(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_50492: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983048L, 1); if ((tmp___2 & 1073741824U) != 0U) { goto ldv_50491; } else { } ldv_50490: ; if (ret__ != 0) { drm_err("Failed to disable PCH transcoder\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983140L, 1); val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983140L, val, 1); return; } } static void intel_enable_pipe(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum pipe pipe ; enum transcoder cpu_transcoder ; enum transcoder tmp ; enum pipe pch_transcoder ; int reg ; u32 val ; struct drm_i915_private *__p ; bool tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int __ret_warn_on ; long tmp___1 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; tmp = intel_pipe_to_cpu_transcoder(dev_priv, pipe); cpu_transcoder = tmp; assert_planes_disabled(dev_priv, pipe); assert_cursor(dev_priv, pipe, 0); assert_sprites_disabled(dev_priv, pipe); __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p->pch_type == 3U) { pch_transcoder = 0; } else { pch_transcoder = pipe; } __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 4U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { _L: /* CIL Label */ tmp___0 = intel_pipe_has_type(crtc, 9); if ((int )tmp___0) { assert_dsi_pll(dev_priv, 1); } else { assert_pll(dev_priv, pipe, 1); } } else if ((int )(crtc->config)->has_pch_encoder) { assert_fdi_rx_pll(dev_priv, pch_transcoder, 1); assert_fdi_tx_pll_enabled(dev_priv, (enum pipe )cpu_transcoder); } else { } } reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((int )val < 0) { __ret_warn_on = ((int )pipe != 0 || (dev_priv->quirks & 1UL) == 0UL) && ((int )pipe != 1 || (dev_priv->quirks & 16UL) == 0UL); tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2158, "WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } } static void intel_disable_pipe(struct intel_crtc *crtc ) { struct drm_i915_private *dev_priv ; enum transcoder cpu_transcoder ; enum pipe pipe ; int reg ; u32 val ; { dev_priv = (struct drm_i915_private *)(crtc->base.dev)->dev_private; cpu_transcoder = (crtc->config)->cpu_transcoder; pipe = crtc->pipe; assert_planes_disabled(dev_priv, pipe); assert_cursor(dev_priv, pipe, 0); assert_sprites_disabled(dev_priv, pipe); reg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((int )val >= 0) { return; } else { } if ((int )(crtc->config)->double_wide) { val = val & 3221225471U; } else { } if (((int )pipe != 0 || (dev_priv->quirks & 1UL) == 0UL) && ((int )pipe != 1 || (dev_priv->quirks & 16UL) == 0UL)) { val = val & 2147483647U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); if ((int )val >= 0) { intel_wait_for_pipe_off(crtc); } else { } return; } } static void intel_enable_primary_hw_plane(struct drm_plane *plane , struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_plane_state const *__mptr___0 ; { dev = plane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; assert_pipe(dev_priv, intel_crtc->pipe, 1); __mptr___0 = (struct drm_plane_state const *)plane->state; ((struct intel_plane_state *)__mptr___0)->visible = 1; (*(dev_priv->display.update_primary_plane))(crtc, plane->fb, crtc->x, crtc->y); return; } } static bool need_vtd_wa(struct drm_device *dev ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U && intel_iommu_gfx_mapped != 0) { return (1); } else { } return (0); } } unsigned int intel_tile_height(struct drm_device *dev , uint32_t pixel_format , uint64_t fb_format_modifier ) { unsigned int tile_height ; uint32_t pixel_bytes ; struct drm_i915_private *__p ; int tmp ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; int __ret_warn_on___0 ; long tmp___3 ; { switch (fb_format_modifier) { case 0ULL: tile_height = 1U; goto ldv_50560; case 1ULL: __p = to_i915((struct drm_device const *)dev); tile_height = (unsigned int )((unsigned char )__p->info.gen) == 2U ? 16U : 8U; goto ldv_50560; case 2ULL: tile_height = 32U; goto ldv_50560; case 3ULL: tmp = drm_format_plane_cpp(pixel_format, 0); pixel_bytes = (uint32_t )tmp; switch (pixel_bytes) { default: ; case 1U: tile_height = 64U; goto ldv_50572; case 2U: ; case 4U: tile_height = 32U; goto ldv_50572; case 8U: tile_height = 16U; goto ldv_50572; case 16U: __ret_warn_once = 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2278, "128-bit pixels are not supported for display!"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); tile_height = 16U; goto ldv_50572; } ldv_50572: ; goto ldv_50560; default: __ret_warn_on___0 = 1; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2284, "Missing switch case (%lu) in %s\n", (long )fb_format_modifier, "intel_tile_height"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); tile_height = 1U; goto ldv_50560; } ldv_50560: ; return (tile_height); } } unsigned int intel_fb_align_height(struct drm_device *dev , unsigned int height , uint32_t pixel_format , uint64_t fb_format_modifier ) { unsigned int tmp ; unsigned int tmp___0 ; { tmp = intel_tile_height(dev, pixel_format, fb_format_modifier); tmp___0 = intel_tile_height(dev, pixel_format, fb_format_modifier); return (((tmp + height) - 1U) & - tmp___0); } } static int intel_fill_fb_ggtt_view(struct i915_ggtt_view *view , struct drm_framebuffer *fb , struct drm_plane_state const *plane_state ) { struct intel_rotation_info *info ; bool tmp ; int tmp___0 ; { info = & view->__annonCompField78.rotation_info; *view = i915_ggtt_view_normal; if ((unsigned long )plane_state == (unsigned long )((struct drm_plane_state const *)0)) { return (0); } else { } tmp = intel_rotation_90_or_270(plane_state->rotation); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } *view = i915_ggtt_view_rotated; info->height = fb->height; info->pixel_format = fb->pixel_format; info->pitch = fb->pitches[0]; info->fb_modifier = fb->modifier[0]; return (0); } } int intel_pin_and_fence_fb_obj(struct drm_plane *plane , struct drm_framebuffer *fb , struct drm_plane_state const *plane_state , struct intel_engine_cs *pipelined ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; struct i915_ggtt_view view ; u32 alignment ; int ret ; int __ret_warn_on ; int tmp___0 ; long tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; bool __warned ; int __ret_warn_once ; struct drm_i915_private *__p___4 ; int __ret_warn_on___0 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; int __ret_warn_on___1 ; long tmp___6 ; bool tmp___7 ; { dev = fb->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; tmp___0 = mutex_is_locked(& dev->struct_mutex); __ret_warn_on = tmp___0 == 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2337, "WARN_ON(!mutex_is_locked(&dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); switch (fb->modifier[0]) { case 0ULL: __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 8U) { alignment = 262144U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { alignment = 131072U; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { alignment = 131072U; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { alignment = 4096U; } else { alignment = 65536U; } } } } goto ldv_50639; case 1ULL: __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 8U) { alignment = 262144U; } else { alignment = 0U; } goto ldv_50639; case 2ULL: ; case 3ULL: __p___4 = to_i915((struct drm_device const *)dev); __ret_warn_once = (unsigned int )((unsigned char )__p___4->info.gen) <= 8U; tmp___4 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___4 != 0L) { __ret_warn_on___0 = ! __warned; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2361, "Y tiling bo slipped through, driver bug!\n"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { __warned = 1; } else { } } else { } tmp___5 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___5 != 0L) { return (-22); } else { } alignment = 1048576U; goto ldv_50639; default: __ret_warn_on___1 = 1; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2366, "Missing switch case (%lu) in %s\n", (long )fb->modifier[0], "intel_pin_and_fence_fb_obj"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); return (-22); } ldv_50639: ret = intel_fill_fb_ggtt_view(& view, fb, plane_state); if (ret != 0) { return (ret); } else { } tmp___7 = need_vtd_wa(dev); if ((int )tmp___7 && alignment <= 262143U) { alignment = 262144U; } else { } intel_runtime_pm_get(dev_priv); dev_priv->mm.interruptible = 0; ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, (struct i915_ggtt_view const *)(& view)); if (ret != 0) { goto err_interruptible; } else { } ret = i915_gem_object_get_fence(obj); if (ret != 0) { goto err_unpin; } else { } i915_gem_object_pin_fence(obj); dev_priv->mm.interruptible = 1; intel_runtime_pm_put(dev_priv); return (0); err_unpin: i915_gem_object_unpin_from_display_plane(obj, (struct i915_ggtt_view const *)(& view)); err_interruptible: dev_priv->mm.interruptible = 1; intel_runtime_pm_put(dev_priv); return (ret); } } static void intel_unpin_fb_obj(struct drm_framebuffer *fb , struct drm_plane_state const *plane_state ) { struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; struct i915_ggtt_view view ; int ret ; int __ret_warn_on ; int tmp___0 ; long tmp___1 ; bool __warned ; int __ret_warn_once ; int __ret_warn_on___0 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; { if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; tmp___0 = mutex_is_locked(& (obj->base.dev)->struct_mutex); __ret_warn_on = tmp___0 == 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2427, "WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = intel_fill_fb_ggtt_view(& view, fb, plane_state); __ret_warn_once = ret != 0; tmp___4 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___4 != 0L) { __ret_warn_on___0 = ! __warned; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2430, "Couldn\'t get view from plane state!"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); i915_gem_object_unpin_fence(obj); i915_gem_object_unpin_from_display_plane(obj, (struct i915_ggtt_view const *)(& view)); return; } } unsigned long intel_gen4_compute_page_offset(int *x , int *y , unsigned int tiling_mode , unsigned int cpp , unsigned int pitch ) { unsigned int tile_rows ; unsigned int tiles ; unsigned int offset ; { if (tiling_mode != 0U) { tile_rows = (unsigned int )(*y / 8); *y = *y % 8; tiles = (unsigned int )*x / (512U / cpp); *x = (int )((unsigned int )*x % (512U / cpp)); return ((unsigned long )((tile_rows * pitch + tiles * 512U) * 8U)); } else { offset = (unsigned int )*y * pitch + (unsigned int )*x * cpp; *y = 0; *x = (int )((offset & 4095U) / cpp); return ((unsigned long )offset & 4294963200UL); } } } static int i9xx_format_to_fourcc(int format ) { { switch (format) { case 134217728: ; return (538982467); case 268435456: ; return (892424792); case 335544320: ; return (909199186); default: ; case 402653184: ; return (875713112); case 939524096: ; return (875709016); case 671088640: ; return (808669784); case 536870912: ; return (808665688); } } } static int skl_format_to_fourcc(int format , bool rgb_order , bool alpha ) { { switch (format) { case 234881024: ; return (909199186); default: ; case 67108864: ; if ((int )rgb_order) { if ((int )alpha) { return (875708993); } else { return (875709016); } } else if ((int )alpha) { return (875713089); } else { return (875713112); } case 33554432: ; if ((int )rgb_order) { return (808665688); } else { return (808669784); } } } } static bool intel_alloc_initial_plane_obj(struct intel_crtc *crtc , struct intel_initial_plane_config *plane_config ) { struct drm_device *dev ; struct drm_i915_gem_object *obj ; struct drm_mode_fb_cmd2 mode_cmd ; struct drm_framebuffer *fb ; u32 base_aligned ; u32 size_aligned ; long tmp ; struct drm_framebuffer const *__mptr ; int tmp___0 ; long tmp___1 ; { dev = crtc->base.dev; obj = (struct drm_i915_gem_object *)0; mode_cmd.fb_id = 0U; mode_cmd.width = 0U; mode_cmd.height = 0U; mode_cmd.pixel_format = 0U; mode_cmd.flags = 0U; mode_cmd.handles[0] = 0U; mode_cmd.handles[1] = 0U; mode_cmd.handles[2] = 0U; mode_cmd.handles[3] = 0U; mode_cmd.pitches[0] = 0U; mode_cmd.pitches[1] = 0U; mode_cmd.pitches[2] = 0U; mode_cmd.pitches[3] = 0U; mode_cmd.offsets[0] = 0U; mode_cmd.offsets[1] = 0U; mode_cmd.offsets[2] = 0U; mode_cmd.offsets[3] = 0U; mode_cmd.modifier[0] = 0ULL; mode_cmd.modifier[1] = 0ULL; mode_cmd.modifier[2] = 0ULL; mode_cmd.modifier[3] = 0ULL; fb = & (plane_config->fb)->base; base_aligned = plane_config->base & 4294963200U; size_aligned = (((plane_config->base + (u32 )plane_config->size) - 1U) | 4095U) + 1U; size_aligned = size_aligned - base_aligned; if (plane_config->size == 0) { return (0); } else { } obj = i915_gem_object_create_stolen_for_preallocated(dev, base_aligned, base_aligned, size_aligned); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return (0); } else { } obj->tiling_mode = (unsigned char )plane_config->tiling; if ((unsigned int )*((unsigned char *)obj + 409UL) == 64U) { obj->stride = fb->pitches[0]; } else { } mode_cmd.pixel_format = fb->pixel_format; mode_cmd.width = fb->width; mode_cmd.height = fb->height; mode_cmd.pitches[0] = fb->pitches[0]; mode_cmd.modifier[0] = fb->modifier[0]; mode_cmd.flags = 2U; mutex_lock_nested(& dev->struct_mutex, 0U); __mptr = (struct drm_framebuffer const *)fb; tmp___0 = intel_framebuffer_init(dev, (struct intel_framebuffer *)__mptr, & mode_cmd, obj); if (tmp___0 != 0) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_alloc_initial_plane_obj", "intel fb init failed\n"); } else { } goto out_unref_obj; } else { } mutex_unlock(& dev->struct_mutex); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_alloc_initial_plane_obj", "initial plane fb obj %p\n", obj); } else { } return (1); out_unref_obj: drm_gem_object_unreference___12(& obj->base); mutex_unlock(& dev->struct_mutex); return (0); } } static void update_state_fb(struct drm_plane *plane ) { { if ((unsigned long )plane->fb == (unsigned long )(plane->state)->fb) { return; } else { } if ((unsigned long )(plane->state)->fb != (unsigned long )((struct drm_framebuffer *)0)) { drm_framebuffer_unreference((plane->state)->fb); } else { } (plane->state)->fb = plane->fb; if ((unsigned long )(plane->state)->fb != (unsigned long )((struct drm_framebuffer *)0)) { drm_framebuffer_reference((plane->state)->fb); } else { } return; } } static void intel_find_initial_plane_obj(struct intel_crtc *intel_crtc , struct intel_initial_plane_config *plane_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc *c ; struct intel_crtc *i ; struct drm_i915_gem_object *obj ; struct drm_plane *primary ; struct drm_framebuffer *fb ; bool tmp ; struct list_head const *__mptr ; struct drm_crtc const *__mptr___0 ; struct drm_framebuffer const *__mptr___1 ; unsigned long tmp___0 ; struct list_head const *__mptr___2 ; struct drm_framebuffer const *__mptr___3 ; { dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; primary = intel_crtc->base.primary; if ((unsigned long )plane_config->fb == (unsigned long )((struct intel_framebuffer *)0)) { return; } else { } tmp = intel_alloc_initial_plane_obj(intel_crtc, plane_config); if ((int )tmp) { fb = & (plane_config->fb)->base; goto valid_fb; } else { } kfree((void const *)plane_config->fb); __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; c = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_50751; ldv_50750: __mptr___0 = (struct drm_crtc const *)c; i = (struct intel_crtc *)__mptr___0; if ((unsigned long )(& intel_crtc->base) == (unsigned long )c) { goto ldv_50747; } else { } if (! i->active) { goto ldv_50747; } else { } fb = (c->primary)->fb; if ((unsigned long )fb == (unsigned long )((struct drm_framebuffer *)0)) { goto ldv_50747; } else { } if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___1 = (struct drm_framebuffer const *)fb; obj = ((struct intel_framebuffer *)__mptr___1)->obj; } else { obj = (struct drm_i915_gem_object *)0; } tmp___0 = i915_gem_obj_ggtt_offset(obj); if (tmp___0 == (unsigned long )plane_config->base) { drm_framebuffer_reference(fb); goto valid_fb; } else { } ldv_50747: __mptr___2 = (struct list_head const *)c->head.next; c = (struct drm_crtc *)__mptr___2 + 0xfffffffffffffff0UL; ldv_50751: ; if ((unsigned long )(& c->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_50750; } else { } return; valid_fb: ; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___3 = (struct drm_framebuffer const *)fb; obj = ((struct intel_framebuffer *)__mptr___3)->obj; } else { obj = (struct drm_i915_gem_object *)0; } if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { dev_priv->preserve_bios_swizzle = 1; } else { } primary->fb = fb; (primary->state)->crtc = & intel_crtc->base; primary->crtc = & intel_crtc->base; update_state_fb(primary); obj->frontbuffer_bits = (unsigned short )((int )((short )obj->frontbuffer_bits) | (int )((short )(1 << (int )intel_crtc->pipe * 4))); return; } } static void i9xx_update_primary_plane(struct drm_crtc *crtc , struct drm_framebuffer *fb , int x , int y ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_plane *primary ; bool visible ; struct drm_plane_state const *__mptr___0 ; struct drm_i915_gem_object *obj ; int plane ; unsigned long linear_offset ; u32 dspcntr ; u32 reg ; int pixel_size ; struct drm_i915_private *__p ; struct drm_framebuffer const *__mptr___1 ; int __ret_warn_on ; long tmp ; long tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; unsigned long tmp___1 ; unsigned long tmp___2 ; struct drm_i915_private *__p___6 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; primary = crtc->primary; __mptr___0 = (struct drm_plane_state const *)primary->state; visible = ((struct intel_plane_state *)__mptr___0)->visible; plane = (int )intel_crtc->plane; reg = ((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U; if (! visible || (unsigned long )fb == (unsigned long )((struct drm_framebuffer *)0)) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, 0U, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 0U, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), 0U, 1); } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } else { } if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___1 = (struct drm_framebuffer const *)fb; obj = ((struct intel_framebuffer *)__mptr___1)->obj; } else { obj = (struct drm_i915_gem_object *)0; } __ret_warn_on = (unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2663, "WARN_ON(obj == NULL)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); dspcntr = 1073741824U; dspcntr = dspcntr | 2147483648U; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 3U) { if ((int )intel_crtc->pipe == 1) { dspcntr = dspcntr | 16777216U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459152U), (uint32_t )((((intel_crtc->config)->pipe_src_h + -1) << 16) | ((intel_crtc->config)->pipe_src_w + -1)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459148U), 0U, 1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { if (plane == 1) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[plane] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 395788U), (uint32_t )((((intel_crtc->config)->pipe_src_h + -1) << 16) | ((intel_crtc->config)->pipe_src_w + -1)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[plane] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 395784U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[plane] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 395792U), 0U, 1); } else { } } else { } } else { } } switch (fb->pixel_format) { case 538982467U: dspcntr = dspcntr | 134217728U; goto ldv_50805; case 892424792U: dspcntr = dspcntr | 268435456U; goto ldv_50805; case 909199186U: dspcntr = dspcntr | 335544320U; goto ldv_50805; case 875713112U: dspcntr = dspcntr | 402653184U; goto ldv_50805; case 875709016U: dspcntr = dspcntr | 939524096U; goto ldv_50805; case 808669784U: dspcntr = dspcntr | 671088640U; goto ldv_50805; case 808665688U: dspcntr = dspcntr | 536870912U; goto ldv_50805; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (2714), "i" (12UL)); ldv_50813: ; goto ldv_50813; } ldv_50805: __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 3U && (unsigned int )*((unsigned char *)obj + 409UL) != 0U) { dspcntr = dspcntr | 1024U; } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 44UL) != 0U) { dspcntr = dspcntr | 16384U; } else { } linear_offset = (unsigned long )(fb->pitches[0] * (unsigned int )y + (unsigned int )(x * pixel_size)); __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) > 3U) { intel_crtc->dspaddr_offset = intel_gen4_compute_page_offset(& x, & y, (unsigned int )obj->tiling_mode, (unsigned int )pixel_size, fb->pitches[0]); linear_offset = linear_offset - intel_crtc->dspaddr_offset; } else { intel_crtc->dspaddr_offset = linear_offset; } if (((crtc->primary)->state)->rotation == 4U) { dspcntr = dspcntr | 32768U; x = ((intel_crtc->config)->pipe_src_w + -1) + x; y = ((intel_crtc->config)->pipe_src_h + -1) + y; linear_offset = (unsigned long )((unsigned int )((intel_crtc->config)->pipe_src_h + -1) * fb->pitches[0] + (unsigned int )(((intel_crtc->config)->pipe_src_w + -1) * pixel_size)) + linear_offset; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dspcntr, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459144U), fb->pitches[0], 1); __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 3U) { tmp___1 = i915_gem_obj_ggtt_offset(obj); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), (uint32_t )tmp___1 + (uint32_t )intel_crtc->dspaddr_offset, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), (uint32_t )((y << 16) | x), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), (uint32_t )linear_offset, 1); } else { tmp___2 = i915_gem_obj_ggtt_offset(obj); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), (uint32_t )tmp___2 + (uint32_t )linear_offset, 1); } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } } static void ironlake_update_primary_plane(struct drm_crtc *crtc , struct drm_framebuffer *fb , int x , int y ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_plane *primary ; bool visible ; struct drm_plane_state const *__mptr___0 ; struct drm_i915_gem_object *obj ; int plane ; unsigned long linear_offset ; u32 dspcntr ; u32 reg ; int pixel_size ; struct drm_framebuffer const *__mptr___1 ; int __ret_warn_on ; long tmp ; long tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; unsigned long tmp___1 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; primary = crtc->primary; __mptr___0 = (struct drm_plane_state const *)primary->state; visible = ((struct intel_plane_state *)__mptr___0)->visible; plane = (int )intel_crtc->plane; reg = ((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U; if (! visible || (unsigned long )fb == (unsigned long )((struct drm_framebuffer *)0)) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } else { } if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___1 = (struct drm_framebuffer const *)fb; obj = ((struct intel_framebuffer *)__mptr___1)->obj; } else { obj = (struct drm_i915_gem_object *)0; } __ret_warn_on = (unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2786, "WARN_ON(obj == NULL)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); dspcntr = 1073741824U; dspcntr = dspcntr | 2147483648U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { dspcntr = dspcntr | 16777216U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { dspcntr = dspcntr | 16777216U; } else { } } else { } } switch (fb->pixel_format) { case 538982467U: dspcntr = dspcntr | 134217728U; goto ldv_50882; case 909199186U: dspcntr = dspcntr | 335544320U; goto ldv_50882; case 875713112U: dspcntr = dspcntr | 402653184U; goto ldv_50882; case 875709016U: dspcntr = dspcntr | 939524096U; goto ldv_50882; case 808669784U: dspcntr = dspcntr | 671088640U; goto ldv_50882; case 808665688U: dspcntr = dspcntr | 536870912U; goto ldv_50882; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (2818), "i" (12UL)); ldv_50889: ; goto ldv_50889; } ldv_50882: ; if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { dspcntr = dspcntr | 1024U; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { dspcntr = dspcntr | 16384U; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) != 8U) { dspcntr = dspcntr | 16384U; } else { } } } else { } linear_offset = (unsigned long )(fb->pitches[0] * (unsigned int )y + (unsigned int )(x * pixel_size)); intel_crtc->dspaddr_offset = intel_gen4_compute_page_offset(& x, & y, (unsigned int )obj->tiling_mode, (unsigned int )pixel_size, fb->pitches[0]); linear_offset = linear_offset - intel_crtc->dspaddr_offset; if (((crtc->primary)->state)->rotation == 4U) { dspcntr = dspcntr | 32768U; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U) { x = ((intel_crtc->config)->pipe_src_w + -1) + x; y = ((intel_crtc->config)->pipe_src_h + -1) + y; linear_offset = (unsigned long )((unsigned int )((intel_crtc->config)->pipe_src_h + -1) * fb->pitches[0] + (unsigned int )(((intel_crtc->config)->pipe_src_w + -1) * pixel_size)) + linear_offset; } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) != 8U) { x = ((intel_crtc->config)->pipe_src_w + -1) + x; y = ((intel_crtc->config)->pipe_src_h + -1) + y; linear_offset = (unsigned long )((unsigned int )((intel_crtc->config)->pipe_src_h + -1) * fb->pitches[0] + (unsigned int )(((intel_crtc->config)->pipe_src_w + -1) * pixel_size)) + linear_offset; } else { } } } else { } } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dspcntr, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459144U), fb->pitches[0], 1); tmp___1 = i915_gem_obj_ggtt_offset(obj); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), (uint32_t )tmp___1 + (uint32_t )intel_crtc->dspaddr_offset, 1); __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), (uint32_t )((y << 16) | x), 1); } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 45UL) == 0U) { __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___10->info.gen) == 8U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), (uint32_t )((y << 16) | x), 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), (uint32_t )((y << 16) | x), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), (uint32_t )linear_offset, 1); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), (uint32_t )((y << 16) | x), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), (uint32_t )linear_offset, 1); } } (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } } u32 intel_fb_stride_alignment(struct drm_device *dev , uint64_t fb_modifier , uint32_t pixel_format ) { u32 bits_per_pixel ; int tmp ; struct drm_i915_private *__p ; int __ret_warn_on ; long tmp___0 ; { tmp = drm_format_plane_cpp(pixel_format, 0); bits_per_pixel = (u32 )(tmp * 8); switch (fb_modifier) { case 0ULL: ; return (64U); case 1ULL: __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { return (128U); } else { } return (512U); case 2ULL: ; return (128U); case 3ULL: ; if (bits_per_pixel == 8U) { return (64U); } else { return (128U); } default: __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2891, "Missing switch case (%lu) in %s\n", (long )fb_modifier, "intel_fb_stride_alignment"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (64U); } } } unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane , struct drm_i915_gem_object *obj ) { struct i915_ggtt_view const *view ; bool tmp ; unsigned long tmp___0 ; { view = & i915_ggtt_view_normal; tmp = intel_rotation_90_or_270((intel_plane->base.state)->rotation); if ((int )tmp) { view = & i915_ggtt_view_rotated; } else { } tmp___0 = i915_gem_obj_ggtt_offset_view(obj, view); return (tmp___0); } } void skl_detach_scalers(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc_scaler_state *scaler_state ; int i ; long tmp ; { if ((unsigned long )intel_crtc == (unsigned long )((struct intel_crtc *)0) || (unsigned long )intel_crtc->config == (unsigned long )((struct intel_crtc_state *)0)) { return; } else { } dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; scaler_state = & (intel_crtc->config)->scaler_state; i = 0; goto ldv_50978; ldv_50977: ; if (scaler_state->scalers[i].in_use == 0) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i * 256 + (int )intel_crtc->pipe * ((i * 256 + 428416) + (i * -256 + -426368))) + 426368), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i * 256 + (int )intel_crtc->pipe * ((i * 256 + 428400) + (i * -256 + -426352))) + 426352), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i * 256 + (int )intel_crtc->pipe * ((i * 256 + 428404) + (i * -256 + -426356))) + 426356), 0U, 1); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("skl_detach_scalers", "CRTC:%d Disabled scaler id %u.%u\n", intel_crtc->base.base.id, (int )intel_crtc->pipe, i); } else { } } else { } i = i + 1; ldv_50978: ; if (intel_crtc->num_scalers > i) { goto ldv_50977; } else { } return; } } u32 skl_plane_ctl_format(uint32_t pixel_format ) { int __ret_warn_on ; long tmp ; { switch (pixel_format) { case 538982467U: ; return (201326592U); case 909199186U: ; return (234881024U); case 875709016U: ; return (68157440U); case 875713112U: ; return (67108864U); case 875708993U: ; return (68157472U); case 875713089U: ; return (67108896U); case 808669784U: ; return (33554432U); case 808665688U: ; return (34603008U); case 1448695129U: ; return (0U); case 1431918169U: ; return (131072U); case 1498831189U: ; return (65536U); case 1498765654U: ; return (196608U); default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2971, "Missing switch case (%lu) in %s\n", (long )pixel_format, "skl_plane_ctl_format"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } return (0U); } } u32 skl_plane_ctl_tiling(uint64_t fb_modifier ) { int __ret_warn_on ; long tmp ; { switch (fb_modifier) { case 0ULL: ; goto ldv_51003; case 1ULL: ; return (1024U); case 2ULL: ; return (4096U); case 3ULL: ; return (5120U); default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 2989, "Missing switch case (%lu) in %s\n", (long )fb_modifier, "skl_plane_ctl_tiling"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } ldv_51003: ; return (0U); } } u32 skl_plane_ctl_rotation(unsigned int rotation ) { int __ret_warn_on ; long tmp ; { switch (rotation) { case 1U: ; goto ldv_51015; case 2U: ; return (3U); case 4U: ; return (2U); case 8U: ; return (1U); default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3011, "Missing switch case (%lu) in %s\n", (long )rotation, "skl_plane_ctl_rotation"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } ldv_51015: ; return (0U); } } static void skylake_update_primary_plane(struct drm_crtc *crtc , struct drm_framebuffer *fb , int x , int y ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_plane *plane ; bool visible ; struct drm_plane_state const *__mptr___0 ; struct drm_i915_gem_object *obj ; int pipe ; u32 plane_ctl ; u32 stride_div ; u32 stride ; u32 tile_height ; u32 plane_offset ; u32 plane_size ; unsigned int rotation ; int x_offset ; int y_offset ; unsigned long surf_addr ; struct intel_crtc_state *crtc_state ; struct intel_plane_state *plane_state ; int src_x ; int src_y ; int src_w ; int src_h ; int dst_x ; int dst_y ; int dst_w ; int dst_h ; int scaler_id ; struct drm_plane_state const *__mptr___1 ; u32 tmp ; u32 tmp___0 ; u32 tmp___1 ; struct drm_framebuffer const *__mptr___2 ; struct drm_plane const *__mptr___3 ; int tmp___2 ; int tmp___3 ; int __ret_warn_on ; long tmp___4 ; int tmp___5 ; bool tmp___6 ; uint32_t ps_ctrl ; int __ret_warn_on___0 ; long tmp___7 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; plane = crtc->primary; __mptr___0 = (struct drm_plane_state const *)plane->state; visible = ((struct intel_plane_state *)__mptr___0)->visible; pipe = intel_crtc->pipe; crtc_state = intel_crtc->config; src_x = 0; src_y = 0; src_w = 0; src_h = 0; dst_x = 0; dst_y = 0; dst_w = 0; dst_h = 0; scaler_id = -1; __mptr___1 = (struct drm_plane_state const *)plane->state; plane_state = (struct intel_plane_state *)__mptr___1; if (! visible || (unsigned long )fb == (unsigned long )((struct drm_framebuffer *)0)) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459136), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459164), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 459136), 0); return; } else { } plane_ctl = 3229614080U; tmp = skl_plane_ctl_format(fb->pixel_format); plane_ctl = tmp | plane_ctl; tmp___0 = skl_plane_ctl_tiling(fb->modifier[0]); plane_ctl = tmp___0 | plane_ctl; plane_ctl = plane_ctl | 8192U; rotation = (plane->state)->rotation; tmp___1 = skl_plane_ctl_rotation(rotation); plane_ctl = tmp___1 | plane_ctl; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___2 = (struct drm_framebuffer const *)fb; obj = ((struct intel_framebuffer *)__mptr___2)->obj; } else { obj = (struct drm_i915_gem_object *)0; } stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], fb->pixel_format); __mptr___3 = (struct drm_plane const *)plane; surf_addr = intel_plane_obj_offset((struct intel_plane *)__mptr___3, obj); tmp___5 = drm_rect_width((struct drm_rect const *)(& plane_state->src)); if (tmp___5 != 0) { scaler_id = plane_state->scaler_id; src_x = plane_state->src.x1 >> 16; src_y = plane_state->src.y1 >> 16; tmp___2 = drm_rect_width((struct drm_rect const *)(& plane_state->src)); src_w = tmp___2 >> 16; tmp___3 = drm_rect_height((struct drm_rect const *)(& plane_state->src)); src_h = tmp___3 >> 16; dst_x = plane_state->dst.x1; dst_y = plane_state->dst.y1; dst_w = drm_rect_width((struct drm_rect const *)(& plane_state->dst)); dst_h = drm_rect_height((struct drm_rect const *)(& plane_state->dst)); __ret_warn_on = x != src_x || y != src_y; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3080, "WARN_ON(x != src_x || y != src_y)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { src_w = (intel_crtc->config)->pipe_src_w; src_h = (intel_crtc->config)->pipe_src_h; } tmp___6 = intel_rotation_90_or_270(rotation); if ((int )tmp___6) { tile_height = intel_tile_height(dev, fb->pixel_format, fb->modifier[0]); stride = ((fb->height + tile_height) - 1U) / tile_height; x_offset = (int )((stride * tile_height - (u32 )y) - (u32 )src_h); y_offset = x; plane_size = (u32 )(((src_w + -1) << 16) | (src_h + -1)); } else { stride = fb->pitches[0] / stride_div; x_offset = x; y_offset = y; plane_size = (u32 )(((src_h + -1) << 16) | (src_w + -1)); } plane_offset = (u32 )((y_offset << 16) | x_offset); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459136), plane_ctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459172), plane_offset, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459152), plane_size, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459144), stride, 1); if (scaler_id >= 0) { ps_ctrl = 0U; __ret_warn_on___0 = dst_w == 0 || dst_h == 0; tmp___7 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___7 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3110, "WARN_ON(!dst_w || !dst_h)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); ps_ctrl = crtc_state->scaler_state.scalers[scaler_id].mode | 2181038080U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((scaler_id * 256 + ((scaler_id * 256 + 428416) + (scaler_id * -256 + -426368)) * pipe) + 426368), ps_ctrl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((scaler_id * 256 + ((scaler_id * 256 + 428384) + (scaler_id * -256 + -426336)) * pipe) + 426336), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((scaler_id * 256 + ((scaler_id * 256 + 428400) + (scaler_id * -256 + -426352)) * pipe) + 426352), (uint32_t )((dst_x << 16) | dst_y), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((scaler_id * 256 + ((scaler_id * 256 + 428404) + (scaler_id * -256 + -426356)) * pipe) + 426356), (uint32_t )((dst_w << 16) | dst_h), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459148), 0U, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459148), (uint32_t )((dst_y << 16) | dst_x), 1); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459164), (uint32_t )surf_addr, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 459164), 0); return; } } static int intel_pipe_set_base_atomic(struct drm_crtc *crtc , struct drm_framebuffer *fb , int x , int y , enum mode_set_atomic state ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->display.disable_fbc != (unsigned long )((void (*)(struct drm_device * ))0)) { (*(dev_priv->display.disable_fbc))(dev); } else { } (*(dev_priv->display.update_primary_plane))(crtc, fb, x, y); return (0); } } static void intel_complete_page_flips(struct drm_device *dev ) { struct drm_crtc *crtc ; struct list_head const *__mptr ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr___0 ; enum plane plane ; struct list_head const *__mptr___1 ; { __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_51094; ldv_51093: __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; plane = intel_crtc->plane; intel_prepare_page_flip(dev, (int )plane); intel_finish_page_flip_plane(dev, (int )plane); __mptr___1 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___1 + 0xfffffffffffffff0UL; ldv_51094: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_51093; } else { } return; } } static void intel_update_primary_planes(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct list_head const *__mptr ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr___0 ; struct list_head const *__mptr___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_51109; ldv_51108: __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; drm_modeset_lock(& crtc->mutex, (struct drm_modeset_acquire_ctx *)0); if ((int )intel_crtc->active && (unsigned long )(crtc->primary)->fb != (unsigned long )((struct drm_framebuffer *)0)) { (*(dev_priv->display.update_primary_plane))(crtc, (crtc->primary)->fb, crtc->x, crtc->y); } else { } drm_modeset_unlock(& crtc->mutex); __mptr___1 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___1 + 0xfffffffffffffff0UL; ldv_51109: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_51108; } else { } return; } } void intel_crtc_reset(struct intel_crtc *crtc ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; { tmp = to_i915((struct drm_device const *)crtc->base.dev); dev_priv = tmp; if (! crtc->active) { return; } else { } intel_crtc_disable_planes(& crtc->base); (*(dev_priv->display.crtc_disable))(& crtc->base); (*(dev_priv->display.crtc_enable))(& crtc->base); intel_crtc_enable_planes(& crtc->base); return; } } void intel_prepare_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct intel_crtc *crtc ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { return; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 4U) { return; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { return; } else { } } drm_modeset_lock_all(dev); __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_51144; ldv_51143: ; if (! crtc->active) { goto ldv_51142; } else { } intel_crtc_disable_planes(& crtc->base); (*(dev_priv->display.crtc_disable))(& crtc->base); ldv_51142: __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_51144: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_51143; } else { } return; } } void intel_finish_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; intel_complete_page_flips(dev); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { return; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 4U) { intel_update_primary_planes(dev); return; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { intel_update_primary_planes(dev); return; } else { } } intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); intel_modeset_init_hw(dev); spin_lock_irq(& dev_priv->irq_lock); if ((unsigned long )dev_priv->display.hpd_irq_setup != (unsigned long )((void (*)(struct drm_device * ))0)) { (*(dev_priv->display.hpd_irq_setup))(dev); } else { } spin_unlock_irq(& dev_priv->irq_lock); intel_modeset_setup_hw_state(dev, 1); intel_hpd_init(dev_priv); drm_modeset_unlock_all(dev); return; } } static void intel_finish_fb(struct drm_framebuffer *old_fb ) { struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp___0 ; bool was_interruptible ; int ret ; int __ret_warn_on ; long tmp___1 ; { if ((unsigned long )old_fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)old_fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; tmp___0 = to_i915((struct drm_device const *)obj->base.dev); dev_priv = tmp___0; was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = 0; ret = i915_gem_object_wait_rendering(obj, 1); dev_priv->mm.interruptible = was_interruptible; __ret_warn_on = ret != 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3291, "WARN_ON(ret)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; bool pending ; bool tmp ; int tmp___0 ; struct drm_crtc const *__mptr___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = i915_reset_in_progress(& dev_priv->gpu_error); if ((int )tmp) { return (0); } else { tmp___0 = atomic_read((atomic_t const *)(& dev_priv->gpu_error.reset_counter)); if (intel_crtc->reset_counter != (unsigned int )tmp___0) { return (0); } else { } } spin_lock_irq(& dev->event_lock); __mptr___0 = (struct drm_crtc const *)crtc; pending = (unsigned long )((struct intel_crtc *)__mptr___0)->unpin_work != (unsigned long )((struct intel_unpin_work *)0); spin_unlock_irq(& dev->event_lock); return (pending); } } static void intel_update_pipe_size(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_display_mode const *adjusted_mode ; bool tmp ; bool tmp___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (! i915.fastboot) { return; } else { } adjusted_mode = (struct drm_display_mode const *)(& (crtc->config)->base.adjusted_mode); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )crtc->pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393244U), (uint32_t )((((int )adjusted_mode->crtc_hdisplay + -1) << 16) | ((int )adjusted_mode->crtc_vdisplay + -1)), 1); if (! (crtc->config)->pch_pfit.enabled) { tmp = intel_pipe_has_type(crtc, 4); if ((int )tmp) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe * 2048 + 426112), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe * 2048 + 426096), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe * 2048 + 426100), 0U, 1); } else { tmp___0 = intel_pipe_has_type(crtc, 8); if ((int )tmp___0) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe * 2048 + 426112), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe * 2048 + 426096), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe * 2048 + 426100), 0U, 1); } else { } } } else { } (crtc->config)->pipe_src_w = adjusted_mode->crtc_hdisplay; (crtc->config)->pipe_src_h = adjusted_mode->crtc_vdisplay; return; } } static void intel_fdi_normal_train(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; u32 reg ; u32 temp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp ; struct drm_i915_private *__p___1 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { temp = temp & 4294966527U; temp = temp | 262912U; } else { temp = temp & 3489660927U; temp = temp | 805568512U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { temp = temp & 4294966527U; temp = temp | 768U; } else { temp = temp & 3489660927U; temp = temp | 805306368U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 64U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(4295000UL); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, tmp | 201326592U, 1); } else { } return; } } static void ironlake_fdi_link_train(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; u32 reg ; u32 temp ; u32 tries ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; assert_pipe(dev_priv, (enum pipe )pipe, 1); reg = (u32 )(pipe * 4096 + 983064); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4294966783U; temp = temp & 4294967039U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __const_udelay(644250UL); reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4291297279U; temp = (u32 )(((intel_crtc->config)->fdi_lanes + -1) << 19) | temp; temp = temp & 3489660927U; temp = temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 2147483648U, 1); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 3489660927U; temp = temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe + 198659) * 4), 2U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe + 198659) * 4), 3U, 1); reg = (u32 )(pipe * 4096 + 983060); tries = 0U; goto ldv_51240; ldv_51239: temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ironlake_fdi_link_train", "FDI_RX_IIR 0x%x\n", temp); } else { } if ((temp & 256U) != 0U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ironlake_fdi_link_train", "FDI train 1 done.\n"); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 256U, 1); goto ldv_51238; } else { } tries = tries + 1U; ldv_51240: ; if (tries <= 4U) { goto ldv_51239; } else { } ldv_51238: ; if (tries == 5U) { drm_err("FDI train 1 fail!\n"); } else { } reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 3489660927U; temp = temp | 268435456U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 3489660927U; temp = temp | 268435456U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); reg = (u32 )(pipe * 4096 + 983060); tries = 0U; goto ldv_51243; ldv_51242: temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ironlake_fdi_link_train", "FDI_RX_IIR 0x%x\n", temp); } else { } if ((temp & 512U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 512U, 1); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ironlake_fdi_link_train", "FDI train 2 done.\n"); } else { } goto ldv_51241; } else { } tries = tries + 1U; ldv_51243: ; if (tries <= 4U) { goto ldv_51242; } else { } ldv_51241: ; if (tries == 5U) { drm_err("FDI train 2 fail!\n"); } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("ironlake_fdi_link_train", "FDI train done\n"); } else { } return; } } static int const snb_b_fdi_train_param[4U] = { 0, 243269632, 239075328, 234881024}; static void gen6_fdi_link_train(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; u32 reg ; u32 temp ; u32 i ; u32 retry ; struct drm_i915_private *__p ; long tmp ; long tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; reg = (u32 )(pipe * 4096 + 983064); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4294966783U; temp = temp & 4294967039U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4291297279U; temp = (u32 )(((intel_crtc->config)->fdi_lanes + -1) << 19) | temp; temp = temp & 3489660927U; temp = temp; temp = temp & 4030726143U; temp = temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 983056), 2097296U, 1); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 2U) { temp = temp & 4294966527U; temp = temp; } else { temp = temp & 3489660927U; temp = temp; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); i = 0U; goto ldv_51270; ldv_51269: reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4030726143U; temp = (u32 )snb_b_fdi_train_param[i] | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(2147500UL); retry = 0U; goto ldv_51267; ldv_51266: reg = (u32 )(pipe * 4096 + 983060); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen6_fdi_link_train", "FDI_RX_IIR 0x%x\n", temp); } else { } if ((temp & 256U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 256U, 1); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("gen6_fdi_link_train", "FDI train 1 done.\n"); } else { } goto ldv_51265; } else { } __const_udelay(214750UL); retry = retry + 1U; ldv_51267: ; if (retry <= 4U) { goto ldv_51266; } else { } ldv_51265: ; if (retry <= 4U) { goto ldv_51268; } else { } i = i + 1U; ldv_51270: ; if (i <= 3U) { goto ldv_51269; } else { } ldv_51268: ; if (i == 4U) { drm_err("FDI train 1 fail!\n"); } else { } reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 3489660927U; temp = temp | 268435456U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U) { temp = temp & 4030726143U; temp = temp; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type == 2U) { temp = temp & 4294966527U; temp = temp | 256U; } else { temp = temp & 3489660927U; temp = temp | 268435456U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); i = 0U; goto ldv_51288; ldv_51287: reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4030726143U; temp = (u32 )snb_b_fdi_train_param[i] | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(2147500UL); retry = 0U; goto ldv_51285; ldv_51284: reg = (u32 )(pipe * 4096 + 983060); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("gen6_fdi_link_train", "FDI_RX_IIR 0x%x\n", temp); } else { } if ((temp & 512U) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 512U, 1); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("gen6_fdi_link_train", "FDI train 2 done.\n"); } else { } goto ldv_51283; } else { } __const_udelay(214750UL); retry = retry + 1U; ldv_51285: ; if (retry <= 4U) { goto ldv_51284; } else { } ldv_51283: ; if (retry <= 4U) { goto ldv_51286; } else { } i = i + 1U; ldv_51288: ; if (i <= 3U) { goto ldv_51287; } else { } ldv_51286: ; if (i == 4U) { drm_err("FDI train 2 fail!\n"); } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("gen6_fdi_link_train", "FDI train done.\n"); } else { } return; } } static void ivb_manual_fdi_link_train(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; u32 reg ; u32 temp ; u32 i ; u32 j ; uint32_t tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; uint32_t tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; uint32_t tmp___7 ; long tmp___8 ; long tmp___9 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; reg = (u32 )(pipe * 4096 + 983064); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4294966783U; temp = temp & 4294967039U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(644250UL); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 983060), 1); drm_ut_debug_printk("ivb_manual_fdi_link_train", "FDI_RX_IIR before link train 0x%x\n", tmp); } else { } j = 0U; goto ldv_51314; ldv_51313: reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4294965503U; temp = temp & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4294966271U; temp = temp & 4294966527U; temp = temp & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4291297279U; temp = (u32 )(((intel_crtc->config)->fdi_lanes + -1) << 19) | temp; temp = temp; temp = temp & 4030726143U; temp = (u32 )snb_b_fdi_train_param[j / 2U] | temp; temp = temp | 2048U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 983056), 2097296U, 1); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp; temp = temp | 2048U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(4295UL); i = 0U; goto ldv_51307; ldv_51306: reg = (u32 )(pipe * 4096 + 983060); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ivb_manual_fdi_link_train", "FDI_RX_IIR 0x%x\n", temp); } else { } if ((temp & 256U) != 0U) { goto _L; } else { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___3 & 256U) != 0U) { _L: /* CIL Label */ (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 256U, 1); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ivb_manual_fdi_link_train", "FDI train 1 done, level %i.\n", i); } else { } goto ldv_51305; } else { } } __const_udelay(4295UL); i = i + 1U; ldv_51307: ; if (i <= 3U) { goto ldv_51306; } else { } ldv_51305: ; if (i == 4U) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("ivb_manual_fdi_link_train", "FDI train 1 fail on vswing %d\n", j / 2U); } else { } goto ldv_51308; } else { } reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4294966527U; temp = temp | 256U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4294966527U; temp = temp | 256U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(8590UL); i = 0U; goto ldv_51311; ldv_51310: reg = (u32 )(pipe * 4096 + 983060); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("ivb_manual_fdi_link_train", "FDI_RX_IIR 0x%x\n", temp); } else { } if ((temp & 512U) != 0U) { goto _L___0; } else { tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp___7 & 512U) != 0U) { _L___0: /* CIL Label */ (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 512U, 1); tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("ivb_manual_fdi_link_train", "FDI train 2 done, level %i.\n", i); } else { } goto train_done; } else { } } __const_udelay(8590UL); i = i + 1U; ldv_51311: ; if (i <= 3U) { goto ldv_51310; } else { } if (i == 4U) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("ivb_manual_fdi_link_train", "FDI train 2 fail on vswing %d\n", j / 2U); } else { } } else { } ldv_51308: j = j + 1U; ldv_51314: ; if (j <= 7U) { goto ldv_51313; } else { } train_done: tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("ivb_manual_fdi_link_train", "FDI train done.\n"); } else { } return; } } static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; u32 reg ; u32 temp ; uint32_t tmp ; { dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = intel_crtc->pipe; reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4290838527U; temp = (u32 )(((intel_crtc->config)->fdi_lanes + -1) << 19) | temp; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); temp = ((tmp & 224U) << 11) | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 8192U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(859000UL); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 16U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(859000UL); reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((temp & 16384U) == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp | 16384U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(429500UL); } else { } return; } } static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; u32 reg ; u32 temp ; { dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = intel_crtc->pipe; reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp & 4294967279U, 1); reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp & 4294950911U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(429500UL); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp & 4294959103U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(429500UL); return; } } static void ironlake_fdi_disable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; u32 reg ; u32 temp ; uint32_t tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 4294508543U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); temp = ((tmp & 224U) << 11) | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(429500UL); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe + 198659) * 4), 2U, 1); } else { } reg = (u32 )(pipe * 4096 + 393472); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 3489660927U; temp = temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); reg = (u32 )(pipe * 4096 + 983052); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { temp = temp & 4294966527U; temp = temp; } else { temp = temp & 3489660927U; temp = temp; } temp = temp & 4294508543U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); temp = ((tmp___0 & 224U) << 11) | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); __const_udelay(429500UL); return; } } bool intel_has_pending_fb_unpin(struct drm_device *dev ) { struct intel_crtc *crtc ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_51365; ldv_51364: tmp = atomic_read((atomic_t const *)(& crtc->unpin_work_count)); if (tmp == 0) { goto ldv_51363; } else { } if ((unsigned long )crtc->unpin_work != (unsigned long )((struct intel_unpin_work *)0)) { intel_wait_for_vblank(dev, (int )crtc->pipe); } else { } return (1); ldv_51363: __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_51365: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_51364; } else { } return (0); } } static void page_flip_completed(struct intel_crtc *intel_crtc ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct intel_unpin_work *work ; { tmp = to_i915((struct drm_device const *)intel_crtc->base.dev); dev_priv = tmp; work = intel_crtc->unpin_work; __asm__ volatile ("": : : "memory"); intel_crtc->unpin_work = (struct intel_unpin_work *)0; if ((unsigned long )work->event != (unsigned long )((struct drm_pending_vblank_event *)0)) { drm_send_vblank_event(intel_crtc->base.dev, (int )intel_crtc->pipe, work->event); } else { } drm_crtc_vblank_put(& intel_crtc->base); __wake_up(& dev_priv->pending_flip_queue, 3U, 0, (void *)0); queue_work___2(dev_priv->wq, & work->work); trace_i915_flip_complete((int )intel_crtc->plane, work->pending_flip_obj); return; } } void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; int tmp ; long tmp___0 ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; bool __warned ; int __ret_warn_once ; int __ret_warn_on___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; int __ret_warn_on___1 ; long __ret ; wait_queue_t __wait ; long __ret___0 ; long __int ; long tmp___4 ; bool __cond ; bool tmp___5 ; int tmp___6 ; bool __cond___0 ; bool tmp___7 ; int tmp___8 ; long tmp___9 ; long tmp___10 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = waitqueue_active(& dev_priv->pending_flip_queue); __ret_warn_on = tmp != 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3913, "WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret = 15000L; __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3916, 0); tmp___7 = intel_crtc_has_pending_flip(crtc); if ((int )tmp___7 != 0) { tmp___8 = 0; } else { tmp___8 = 1; } __cond___0 = (bool )tmp___8; if ((int )__cond___0 && __ret == 0L) { __ret = 1L; } else { } if (((int )__cond___0 || __ret == 0L) == 0) { __ret___0 = 15000L; INIT_LIST_HEAD(& __wait.task_list); __wait.flags = 0U; ldv_51390: tmp___4 = prepare_to_wait_event(& dev_priv->pending_flip_queue, & __wait, 2); __int = tmp___4; tmp___5 = intel_crtc_has_pending_flip(crtc); if ((int )tmp___5 != 0) { tmp___6 = 0; } else { tmp___6 = 1; } __cond = (bool )tmp___6; if ((int )__cond && __ret___0 == 0L) { __ret___0 = 1L; } else { } if (((int )__cond || __ret___0 == 0L) != 0) { goto ldv_51389; } else { } __ret___0 = schedule_timeout(__ret___0); goto ldv_51390; ldv_51389: finish_wait(& dev_priv->pending_flip_queue, & __wait); __ret = __ret___0; } else { } __ret_warn_on___1 = __ret == 0L; tmp___9 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3916, "WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, !intel_crtc_has_pending_flip(crtc), 60*HZ) == 0)"); } else { } tmp___10 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___10 != 0L) { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; spin_lock_irq(& dev->event_lock); if ((unsigned long )intel_crtc->unpin_work != (unsigned long )((struct intel_unpin_work *)0)) { __ret_warn_once = 1; tmp___3 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___3 != 0L) { __ret_warn_on___0 = ! __warned; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3921, "Removing stuck page flip\n"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); page_flip_completed(intel_crtc); } else { } spin_unlock_irq(& dev->event_lock); } else { } if ((unsigned long )(crtc->primary)->fb != (unsigned long )((struct drm_framebuffer *)0)) { mutex_lock_nested(& dev->struct_mutex, 0U); intel_finish_fb((crtc->primary)->fb); mutex_unlock(& dev->struct_mutex); } else { } return; } } static void lpt_program_iclkip(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int clock ; struct drm_crtc const *__mptr ; u32 divsel ; u32 phaseinc ; u32 auxdiv ; u32 phasedir ; u32 temp ; u32 tmp ; u32 iclk_virtual_root_freq ; u32 iclk_pi_range ; u32 desired_divisor ; u32 msb_divisor_value ; u32 pi_value ; int __ret_warn_on ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; long tmp___2 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; clock = (((struct intel_crtc *)__mptr)->config)->base.adjusted_mode.crtc_clock; phasedir = 0U; mutex_lock_nested(& dev_priv->sb_lock, 0U); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811040L, 0U, 1); tmp = intel_sbi_read(dev_priv, 1548, 0); intel_sbi_write(dev_priv, 1548, tmp | 1U, 0); if (clock == 20000) { auxdiv = 1U; divsel = 65U; phaseinc = 32U; } else { iclk_virtual_root_freq = 172800000U; iclk_pi_range = 64U; desired_divisor = iclk_virtual_root_freq / (u32 )clock; msb_divisor_value = desired_divisor / iclk_pi_range; pi_value = desired_divisor % iclk_pi_range; auxdiv = 0U; divsel = msb_divisor_value - 2U; phaseinc = pi_value; } __ret_warn_on = ((divsel << 1) & 4294967041U) != 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3983, "WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & ~SBI_SSCDIVINTPHASE_DIVSEL_MASK)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = phasedir << 15 != 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 3985, "WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & ~SBI_SSCDIVINTPHASE_INCVAL_MASK)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("lpt_program_iclkip", "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", clock, auxdiv, divsel, phasedir, phaseinc); } else { } temp = intel_sbi_read(dev_priv, 1536, 0); temp = temp & 4294967041U; temp = (divsel << 1) | temp; temp = temp & 4294934783U; temp = (phaseinc << 8) | temp; temp = (phasedir << 15) | temp; temp = temp | 1U; intel_sbi_write(dev_priv, 1536, temp, 0); temp = intel_sbi_read(dev_priv, 1552, 0); temp = temp & 4294967279U; temp = (auxdiv << 4) | temp; intel_sbi_write(dev_priv, 1552, temp, 0); temp = intel_sbi_read(dev_priv, 1548, 0); temp = temp & 4294967294U; intel_sbi_write(dev_priv, 1548, temp, 0); __const_udelay(103080UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811040L, 1U, 1); mutex_unlock(& dev_priv->sb_lock); return; } } static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc , enum pipe pch_transcoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum transcoder cpu_transcoder ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; uint32_t tmp___5 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; cpu_transcoder = (crtc->config)->cpu_transcoder; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393216U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )pch_transcoder + 224) * 4096), tmp, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393220U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pch_transcoder * 4096 + 917508), tmp___0, 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393224U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pch_transcoder * 4096 + 917512), tmp___1, 1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393228U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pch_transcoder * 4096 + 917516), tmp___2, 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393232U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pch_transcoder * 4096 + 917520), tmp___3, 1); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393236U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pch_transcoder * 4096 + 917524), tmp___4, 1); tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393256U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pch_transcoder * 4096 + 917544), tmp___5, 1); return; } } static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev , bool enable ) { struct drm_i915_private *dev_priv ; uint32_t temp ; int __ret_warn_on ; uint32_t tmp ; long tmp___0 ; int __ret_warn_on___0 ; uint32_t tmp___1 ; long tmp___2 ; long tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794624L, 1); if (((temp & 4096U) != 0U) == (int )enable) { return; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 987148L, 1); __ret_warn_on = (int )tmp < 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4056, "WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 991244L, 1); __ret_warn_on___0 = (int )tmp___1 < 0; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4057, "WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); temp = temp & 4294963199U; if ((int )enable) { temp = temp | 4096U; } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("cpt_set_fdi_bc_bifurcation", "%sabling fdi C rx\n", (int )enable ? (char *)"en" : (char *)"dis"); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794624L, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794624L, 0); return; } } static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; { dev = intel_crtc->base.dev; switch ((int )intel_crtc->pipe) { case 0: ; goto ldv_51448; case 1: ; if ((intel_crtc->config)->fdi_lanes > 2) { cpt_set_fdi_bc_bifurcation(dev, 0); } else { cpt_set_fdi_bc_bifurcation(dev, 1); } goto ldv_51448; case 2: cpt_set_fdi_bc_bifurcation(dev, 1); goto ldv_51448; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (4087), "i" (12UL)); ldv_51452: ; goto ldv_51452; } ldv_51448: ; return; } } static void ironlake_pch_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; u32 reg ; u32 temp ; struct drm_i915_private *__p ; uint32_t tmp ; u32 sel ; struct drm_i915_private *__p___0 ; u32 bpc ; uint32_t tmp___0 ; int tmp___1 ; struct drm_i915_private *__p___1 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; assert_pch_transcoder_disabled(dev_priv, (enum pipe )pipe); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { ivybridge_update_fdi_bc_bifurcation(intel_crtc); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393264U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 983088), tmp & 2113929216U, 1); (*(dev_priv->display.fdi_link_train))(crtc); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815104L, 1); temp = (u32 )(1 << (pipe * 4 + 3)) | temp; sel = (u32 )(1 << pipe * 4); if ((int )(intel_crtc->config)->shared_dpll == 1) { temp = temp | sel; } else { temp = ~ sel & temp; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 815104L, temp, 1); } else { } intel_enable_shared_dpll(intel_crtc); assert_panel_unlocked(dev_priv, (enum pipe )pipe); ironlake_pch_transcoder_set_timings(intel_crtc, (enum pipe )pipe); intel_fdi_normal_train(crtc); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type == 2U && (int )(intel_crtc->config)->has_dp_encoder) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); bpc = (tmp___0 & 224U) >> 5; reg = (u32 )(pipe * 4096 + 918272); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 2684352999U; temp = temp | 2147483648U; temp = (bpc << 9) | temp; if ((int )crtc->mode.flags & 1) { temp = temp | 8U; } else { } if ((crtc->mode.flags & 4U) != 0U) { temp = temp | 16U; } else { } tmp___1 = intel_trans_dp_port_sel(crtc); switch (tmp___1) { case 934144: temp = temp; goto ldv_51485; case 934400: temp = temp | 536870912U; goto ldv_51485; case 934656: temp = temp | 1073741824U; goto ldv_51485; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (4177), "i" (12UL)); ldv_51489: ; goto ldv_51489; } ldv_51485: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); } else { } ironlake_enable_pch_transcoder(dev_priv, (enum pipe )pipe); return; } } static void lpt_pch_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum transcoder cpu_transcoder ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; assert_pch_transcoder_disabled(dev_priv, 0); lpt_program_iclkip(crtc); ironlake_pch_transcoder_set_timings(intel_crtc, 0); lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); return; } } void intel_put_shared_dpll(struct intel_crtc *crtc ) { struct intel_shared_dpll *pll ; struct intel_shared_dpll *tmp ; int __ret_warn_on ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; int __ret_warn_on___1 ; long tmp___2 ; { tmp = intel_crtc_to_shared_dpll(crtc); pll = tmp; if ((unsigned long )pll == (unsigned long )((struct intel_shared_dpll *)0)) { return; } else { } if ((pll->config.crtc_mask & (unsigned int )(1 << (int )crtc->pipe)) == 0U) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4211, "bad %s crtc mask\n", pll->name); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } else { } pll->config.crtc_mask = pll->config.crtc_mask & (unsigned int )(~ (1 << (int )crtc->pipe)); if (pll->config.crtc_mask == 0U) { __ret_warn_on___0 = (int )pll->on; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4217, "WARN_ON(pll->on)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); __ret_warn_on___1 = pll->active != 0; tmp___2 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4218, "WARN_ON(pll->active)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); } else { } (crtc->config)->shared_dpll = -1; return; } } struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc , struct intel_crtc_state *crtc_state ) { struct drm_i915_private *dev_priv ; struct intel_shared_dpll *pll ; enum intel_dpll_id i ; long tmp ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p ; struct intel_encoder *encoder ; struct intel_digital_port *intel_dig_port ; int __ret_warn_on___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; int __ret_warn_on___1 ; long tmp___4 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; long tmp___5 ; int tmp___6 ; long tmp___7 ; long tmp___8 ; { dev_priv = (struct drm_i915_private *)(crtc->base.dev)->dev_private; __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p->pch_type == 1U) { i = (enum intel_dpll_id )crtc->pipe; pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_get_shared_dpll", "CRTC:%d using pre-allocated %s\n", crtc->base.base.id, pll->name); } else { } __ret_warn_on = (pll->new_config)->crtc_mask != 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4239, "WARN_ON(pll->new_config->crtc_mask)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); goto found; } else { } __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 9U) { encoder = intel_ddi_get_crtc_new_encoder(crtc_state); __ret_warn_on___0 = (unsigned long )encoder == (unsigned long )((struct intel_encoder *)0); tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4250, "WARN_ON(!encoder)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { return ((struct intel_shared_dpll *)0); } else { } intel_dig_port = enc_to_dig_port(& encoder->base); i = (enum intel_dpll_id )intel_dig_port->port; pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_get_shared_dpll", "CRTC:%d using pre-allocated %s\n", crtc->base.base.id, pll->name); } else { } __ret_warn_on___1 = (pll->new_config)->crtc_mask != 0U; tmp___4 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4259, "WARN_ON(pll->new_config->crtc_mask)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); goto found; } else { } } else { } i = 0; goto ldv_51546; ldv_51545: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; if ((pll->new_config)->crtc_mask == 0U) { goto ldv_51544; } else { } tmp___6 = memcmp((void const *)(& crtc_state->dpll_hw_state), (void const *)(& (pll->new_config)->hw_state), 68UL); if (tmp___6 == 0) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_get_shared_dpll", "CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", crtc->base.base.id, pll->name, (pll->new_config)->crtc_mask, pll->active); } else { } goto found; } else { } ldv_51544: i = (enum intel_dpll_id )((int )i + 1); ldv_51546: ; if (dev_priv->num_shared_dpll > (int )i) { goto ldv_51545; } else { } i = 0; goto ldv_51549; ldv_51548: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; if ((pll->new_config)->crtc_mask == 0U) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_get_shared_dpll", "CRTC:%d allocated %s\n", crtc->base.base.id, pll->name); } else { } goto found; } else { } i = (enum intel_dpll_id )((int )i + 1); ldv_51549: ; if (dev_priv->num_shared_dpll > (int )i) { goto ldv_51548; } else { } return ((struct intel_shared_dpll *)0); found: ; if ((pll->new_config)->crtc_mask == 0U) { (pll->new_config)->hw_state = crtc_state->dpll_hw_state; } else { } crtc_state->shared_dpll = i; tmp___8 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_get_shared_dpll", "using %s for pipe %c\n", pll->name, (int )crtc->pipe + 65); } else { } (pll->new_config)->crtc_mask = (pll->new_config)->crtc_mask | (unsigned int )(1 << (int )crtc->pipe); return (pll); } } static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv , unsigned int clear_pipes ) { struct intel_shared_dpll *pll ; enum intel_dpll_id i ; void *tmp ; { i = 0; goto ldv_51559; ldv_51558: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; tmp = kmemdup((void const *)(& pll->config), 72UL, 208U); pll->new_config = (struct intel_shared_dpll_config *)tmp; if ((unsigned long )pll->new_config == (unsigned long )((struct intel_shared_dpll_config *)0)) { goto cleanup; } else { } (pll->new_config)->crtc_mask = (pll->new_config)->crtc_mask & ~ clear_pipes; i = (enum intel_dpll_id )((int )i + 1); ldv_51559: ; if (dev_priv->num_shared_dpll > (int )i) { goto ldv_51558; } else { } return (0); cleanup: ; goto ldv_51562; ldv_51561: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; kfree((void const *)pll->new_config); pll->new_config = (struct intel_shared_dpll_config *)0; ldv_51562: i = (enum intel_dpll_id )((int )i - 1); if ((int )i >= 0) { goto ldv_51561; } else { } return (-12); } } static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv ) { struct intel_shared_dpll *pll ; enum intel_dpll_id i ; int __ret_warn_on ; long tmp ; { i = 0; goto ldv_51572; ldv_51571: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; __ret_warn_on = (unsigned long )pll->new_config == (unsigned long )(& pll->config); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4352, "WARN_ON(pll->new_config == &pll->config)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); pll->config = *(pll->new_config); kfree((void const *)pll->new_config); pll->new_config = (struct intel_shared_dpll_config *)0; i = (enum intel_dpll_id )((int )i + 1); ldv_51572: ; if (dev_priv->num_shared_dpll > (int )i) { goto ldv_51571; } else { } return; } } static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv ) { struct intel_shared_dpll *pll ; enum intel_dpll_id i ; int __ret_warn_on ; long tmp ; { i = 0; goto ldv_51582; ldv_51581: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; __ret_warn_on = (unsigned long )pll->new_config == (unsigned long )(& pll->config); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4368, "WARN_ON(pll->new_config == &pll->config)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); kfree((void const *)pll->new_config); pll->new_config = (struct intel_shared_dpll_config *)0; i = (enum intel_dpll_id )((int )i + 1); ldv_51582: ; if (dev_priv->num_shared_dpll > (int )i) { goto ldv_51581; } else { } return; } } static void cpt_verify_modeset(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; int dslreg ; u32 temp ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; unsigned long timeout_____0 ; unsigned long tmp___3 ; int ret_____0 ; uint32_t tmp___4 ; bool tmp___5 ; uint32_t tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dslreg = (int )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458752U); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dslreg, 1); __const_udelay(2147500UL); tmp___3 = msecs_to_jiffies(5U); timeout_____0 = (tmp___3 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_51601; ldv_51600: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dslreg, 1); if (tmp___4 == temp) { ret_____0 = -110; } else { } goto ldv_51599; } else { } tmp___5 = drm_can_sleep___5(); if ((int )tmp___5) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_51601: tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dslreg, 1); if (tmp___6 == temp) { goto ldv_51600; } else { } ldv_51599: ; if (ret_____0 != 0) { tmp = msecs_to_jiffies(5U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_51613; ldv_51612: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dslreg, 1); if (tmp___0 == temp) { ret__ = -110; } else { } goto ldv_51611; } else { } tmp___1 = drm_can_sleep___5(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_51613: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dslreg, 1); if (tmp___2 == temp) { goto ldv_51612; } else { } ldv_51611: ; if (ret__ != 0) { drm_err("mode set failed: pipe %c stuck\n", pipe + 65); } else { } } else { } return; } } int skl_update_scaler_users(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state , struct intel_plane *intel_plane , struct intel_plane_state *plane_state , int force_detach ) { int need_scaling ; int idx ; int src_w ; int src_h ; int dst_w ; int dst_h ; int *scaler_id ; struct drm_framebuffer *fb ; struct intel_crtc_scaler_state *scaler_state ; unsigned int rotation ; unsigned int tmp ; int tmp___0 ; int tmp___1 ; struct drm_display_mode *adjusted_mode ; bool tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; int __ret_warn_on ; long tmp___6 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; { if ((unsigned long )intel_crtc == (unsigned long )((struct intel_crtc *)0) || (unsigned long )crtc_state == (unsigned long )((struct intel_crtc_state *)0)) { return (0); } else { } scaler_state = & crtc_state->scaler_state; if ((unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0)) { tmp = drm_plane_index(& intel_plane->base); idx = (int )tmp; } else { idx = 31; } fb = (unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0) ? plane_state->base.fb : (struct drm_framebuffer *)0; if ((unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0)) { tmp___0 = drm_rect_width((struct drm_rect const *)(& plane_state->src)); src_w = tmp___0 >> 16; tmp___1 = drm_rect_height((struct drm_rect const *)(& plane_state->src)); src_h = tmp___1 >> 16; dst_w = drm_rect_width((struct drm_rect const *)(& plane_state->dst)); dst_h = drm_rect_height((struct drm_rect const *)(& plane_state->dst)); scaler_id = & plane_state->scaler_id; rotation = plane_state->base.rotation; } else { adjusted_mode = & crtc_state->base.adjusted_mode; src_w = crtc_state->pipe_src_w; src_h = crtc_state->pipe_src_h; dst_w = adjusted_mode->hdisplay; dst_h = adjusted_mode->vdisplay; scaler_id = & scaler_state->scaler_id; rotation = 0U; } tmp___2 = intel_rotation_90_or_270(rotation); need_scaling = (int )tmp___2 ? src_h != dst_w || src_w != dst_h : src_w != dst_w || src_h != dst_h; if ((force_detach != 0 || need_scaling == 0) || ((unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0) && ((unsigned long )fb == (unsigned long )((struct drm_framebuffer *)0) || ! plane_state->visible))) { if (*scaler_id >= 0) { scaler_state->scaler_users = scaler_state->scaler_users & (unsigned int )(~ (1 << idx)); scaler_state->scalers[*scaler_id].in_use = 0; tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("skl_update_scaler_users", "Staged freeing scaler id %d.%d from %s:%d crtc_state = %p scaler_users = 0x%x\n", (int )intel_crtc->pipe, *scaler_id, (unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0) ? (char *)"PLANE" : (char *)"CRTC", (unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0) ? intel_plane->base.base.id : intel_crtc->base.base.id, crtc_state, scaler_state->scaler_users); } else { } *scaler_id = -1; } else { } return (0); } else { } if (((((((src_w <= 7 || src_h <= 7) || dst_w <= 7) || dst_h <= 7) || src_w > 4096) || src_h > 4096) || dst_w > 4096) || dst_h > 4096) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("skl_update_scaler_users", "%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u size is out of scaler range\n", (unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0) ? (char *)"PLANE" : (char *)"CRTC", (unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0) ? intel_plane->base.base.id : intel_crtc->base.base.id, (int )intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h); } else { } return (-22); } else { } __ret_warn_on = (unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0) && intel_plane->ckey.flags != 1U; tmp___6 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4493, "WARN_ON(intel_plane && intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)"); } else { } tmp___7 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___7 != 0L) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("skl_update_scaler_users", "PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey", intel_plane->base.base.id, src_w, src_h, dst_w, dst_h); } else { } return (-22); } else { } if ((unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0)) { switch (fb->pixel_format) { case 909199186U: ; case 875709016U: ; case 875713112U: ; case 875708993U: ; case 875713089U: ; case 808669784U: ; case 808665688U: ; case 1448695129U: ; case 1431918169U: ; case 1498831189U: ; case 1498765654U: ; goto ldv_51647; default: tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("skl_update_scaler_users", "PLANE:%d FB:%d unsupported scaling format 0x%x\n", intel_plane->base.base.id, fb->base.id, fb->pixel_format); } else { } return (-22); } ldv_51647: ; } else { } scaler_state->scaler_users = scaler_state->scaler_users | (unsigned int )(1 << idx); tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("skl_update_scaler_users", "%s:%d staged scaling request for %ux%u->%ux%u crtc_state = %p scaler_users = 0x%x\n", (unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0) ? (char *)"PLANE" : (char *)"CRTC", (unsigned long )intel_plane != (unsigned long )((struct intel_plane *)0) ? intel_plane->base.base.id : intel_crtc->base.base.id, src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users); } else { } return (0); } } static void skylake_pfit_update(struct intel_crtc *crtc , int enable ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; struct intel_crtc_scaler_state *scaler_state ; long tmp ; int id ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; scaler_state = & (crtc->config)->scaler_state; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("skylake_pfit_update", "for crtc_state = %p\n", crtc->config); } else { } skl_update_scaler_users(crtc, crtc->config, (struct intel_plane *)0, (struct intel_plane_state *)0, enable == 0); intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config); skl_detach_scalers(crtc); if (enable == 0) { return; } else { } if ((int )(crtc->config)->pch_pfit.enabled) { __ret_warn_on = (crtc->config)->scaler_state.scaler_id < 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4551, "WARN_ON(crtc->config->scaler_state.scaler_id < 0)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { drm_err("Requesting pfit without getting a scaler first\n"); return; } else { } id = scaler_state->scaler_id; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((id * 256 + ((id * 256 + 428416) + (id * -256 + -426368)) * pipe) + 426368), scaler_state->scalers[id].mode | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((id * 256 + ((id * 256 + 428400) + (id * -256 + -426352)) * pipe) + 426352), (crtc->config)->pch_pfit.pos, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((id * 256 + ((id * 256 + 428404) + (id * -256 + -426356)) * pipe) + 426356), (crtc->config)->pch_pfit.size, 1); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("skylake_pfit_update", "for crtc_state = %p scaler_id = %d\n", crtc->config, id); } else { } } else { } return; } } static void ironlake_pfit_enable(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; if ((int )(crtc->config)->pch_pfit.enabled) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 2048 + 426112), (uint32_t )((pipe << 29) | -2139095040), 1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 2048 + 426112), (uint32_t )((pipe << 29) | -2139095040), 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 2048 + 426112), 2155872256U, 1); } } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 2048 + 426096), (crtc->config)->pch_pfit.pos, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 2048 + 426100), (crtc->config)->pch_pfit.size, 1); } else { } return; } } static void intel_enable_sprite_planes(struct drm_crtc *crtc ) { struct drm_device *dev ; enum pipe pipe ; struct drm_crtc const *__mptr ; struct drm_plane *plane ; struct intel_plane *intel_plane ; struct list_head const *__mptr___0 ; struct drm_plane const *__mptr___1 ; struct list_head const *__mptr___2 ; { dev = crtc->dev; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; __mptr___0 = (struct list_head const *)dev->mode_config.plane_list.next; plane = (struct drm_plane *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_51695; ldv_51694: ; if ((unsigned int )plane->type == 0U) { __mptr___1 = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr___1; if ((int )intel_plane->pipe == (int )pipe) { intel_plane_restore(& intel_plane->base); } else { } } else { } __mptr___2 = (struct list_head const *)plane->head.next; plane = (struct drm_plane *)__mptr___2 + 0xfffffffffffffff8UL; ldv_51695: ; if ((unsigned long )(& plane->head) != (unsigned long )(& dev->mode_config.plane_list)) { goto ldv_51694; } else { } return; } } void hsw_enable_ips(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; int tmp ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (! (crtc->config)->ips_enabled) { return; } else { } intel_wait_for_vblank(dev, (int )crtc->pipe); assert_plane(dev_priv, crtc->plane, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp = sandybridge_pcode_write(dev_priv, 25U, 3221225472U); __ret_warn_on = tmp != 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4615, "WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); mutex_unlock(& dev_priv->rps.hw_lock); } else { goto _L; } } else { _L: /* CIL Label */ (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 275464L, 2147483648U, 1); tmp___1 = msecs_to_jiffies(50U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_51726; ldv_51725: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 275464L, 0); if ((int )tmp___2 >= 0) { ret__ = -110; } else { } goto ldv_51724; } else { } tmp___3 = drm_can_sleep___5(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_51726: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 275464L, 0); if ((int )tmp___4 >= 0) { goto ldv_51725; } else { } ldv_51724: ; if (ret__ != 0) { drm_err("Timed out waiting for IPS enable\n"); } else { } } return; } } void hsw_disable_ips(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; int tmp ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (! (crtc->config)->ips_enabled) { return; } else { } assert_plane(dev_priv, crtc->plane, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp = sandybridge_pcode_write(dev_priv, 25U, 0U); __ret_warn_on = tmp != 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4645, "WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); mutex_unlock(& dev_priv->rps.hw_lock); tmp___1 = msecs_to_jiffies(42U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_51757; ldv_51756: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 275464L, 1); if ((int )tmp___2 < 0) { ret__ = -110; } else { } goto ldv_51755; } else { } tmp___3 = drm_can_sleep___5(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_51757: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 275464L, 1); if ((int )tmp___4 < 0) { goto ldv_51756; } else { } ldv_51755: ; if (ret__ != 0) { drm_err("Timed out waiting for IPS disable\n"); } else { } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 275464L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 275464L, 0); } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 275464L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 275464L, 0); } intel_wait_for_vblank(dev, (int )crtc->pipe); return; } } static void intel_crtc_load_lut(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; int palreg ; int i ; bool reenable_ips ; bool tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; uint32_t tmp___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; palreg = (int )((unsigned int )dev_priv->info.palette_offsets[(int )pipe] + (unsigned int )dev_priv->info.display_mmio_offset); reenable_ips = 0; if (! (crtc->state)->enable || ! intel_crtc->active) { return; } else { } __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { _L: /* CIL Label */ tmp = intel_pipe_has_type(intel_crtc, 9); if ((int )tmp) { assert_dsi_pll(dev_priv, 1); } else { assert_pll(dev_priv, pipe, 1); } } else { } } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 4U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { palreg = ((int )pipe + 148) * 2048; } else { } } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U && (int )(intel_crtc->config)->ips_enabled) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 2048 + 304256), 1); if ((tmp___0 & 3U) == 3U) { hsw_disable_ips(intel_crtc); reenable_ips = 1; } else { } } else { } i = 0; goto ldv_51802; ldv_51801: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(i * 4 + palreg), (uint32_t )((((int )intel_crtc->lut_r[i] << 16) | ((int )intel_crtc->lut_g[i] << 8)) | (int )intel_crtc->lut_b[i]), 1); i = i + 1; ldv_51802: ; if (i <= 255) { goto ldv_51801; } else { } if ((int )reenable_ips) { hsw_enable_ips(intel_crtc); } else { } return; } } static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { if ((unsigned long )intel_crtc->overlay != (unsigned long )((struct intel_overlay *)0)) { dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev->struct_mutex, 0U); dev_priv->mm.interruptible = 0; intel_overlay_switch_off(intel_crtc->overlay); dev_priv->mm.interruptible = 1; mutex_unlock(& dev->struct_mutex); } else { } return; } } static void intel_post_enable_primary(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { intel_wait_for_vblank(dev, pipe); } else { } } else { } hsw_enable_ips(intel_crtc); mutex_lock_nested(& dev->struct_mutex, 0U); intel_fbc_update(dev); mutex_unlock(& dev->struct_mutex); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 2U) { intel_set_cpu_fifo_underrun_reporting(dev_priv, (enum pipe )pipe, 1); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 4U) { i9xx_check_fifo_underruns(dev_priv); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { i9xx_check_fifo_underruns(dev_priv); } else { } } return; } } static void intel_pre_disable_primary(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { intel_set_cpu_fifo_underrun_reporting(dev_priv, (enum pipe )pipe, 0); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 4U) { intel_set_memory_cxsr(dev_priv, 0); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { intel_set_memory_cxsr(dev_priv, 0); } else { } } mutex_lock_nested(& dev->struct_mutex, 0U); if ((unsigned long )dev_priv->fbc.crtc == (unsigned long )intel_crtc) { intel_fbc_disable(dev); } else { } mutex_unlock(& dev->struct_mutex); hsw_disable_ips(intel_crtc); return; } } static void intel_crtc_enable_planes(struct drm_crtc *crtc ) { struct drm_device *dev ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; { dev = crtc->dev; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; intel_enable_primary_hw_plane(crtc->primary, crtc); intel_enable_sprite_planes(crtc); intel_crtc_update_cursor(crtc, 1); intel_post_enable_primary(crtc); intel_frontbuffer_flip(dev, (unsigned int )(15 << pipe * 4)); return; } } static void intel_crtc_disable_planes(struct drm_crtc *crtc ) { struct drm_device *dev ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_plane *intel_plane ; int pipe ; struct list_head const *__mptr___0 ; struct drm_crtc *from ; struct list_head const *__mptr___1 ; { dev = crtc->dev; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; intel_crtc_wait_for_pending_flips(crtc); intel_pre_disable_primary(crtc); intel_crtc_dpms_overlay_disable(intel_crtc); __mptr___0 = (struct list_head const *)dev->mode_config.plane_list.next; intel_plane = (struct intel_plane *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_51898; ldv_51897: ; if ((int )intel_plane->pipe == pipe) { from = intel_plane->base.crtc; (*(intel_plane->disable_plane))(& intel_plane->base, (unsigned long )from != (unsigned long )((struct drm_crtc *)0) ? (unsigned long )from != (unsigned long )((struct drm_crtc *)0) : crtc, 1); } else { } __mptr___1 = (struct list_head const *)intel_plane->base.head.next; intel_plane = (struct intel_plane *)__mptr___1 + 0xfffffffffffffff8UL; ldv_51898: ; if ((unsigned long )(& intel_plane->base.head) != (unsigned long )(& dev->mode_config.plane_list)) { goto ldv_51897; } else { } intel_frontbuffer_flip(dev, (unsigned int )(15 << pipe * 4)); return; } } static void ironlake_crtc_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *encoder ; int pipe ; int __ret_warn_on ; long tmp ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct drm_i915_private *__p ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; __ret_warn_on = ! (crtc->state)->enable; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4887, "WARN_ON(!crtc->state->enable)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((int )intel_crtc->active) { return; } else { } if ((int )(intel_crtc->config)->has_pch_encoder) { intel_prepare_shared_dpll(intel_crtc); } else { } if ((int )(intel_crtc->config)->has_dp_encoder) { intel_dp_set_m_n(intel_crtc, 0); } else { } intel_set_pipe_timings(intel_crtc); if ((int )(intel_crtc->config)->has_pch_encoder) { intel_cpu_transcoder_set_m_n(intel_crtc, & (intel_crtc->config)->fdi_m_n, (struct intel_link_m_n *)0); } else { } ironlake_set_pipeconf(crtc); intel_crtc->active = 1; intel_set_cpu_fifo_underrun_reporting(dev_priv, (enum pipe )pipe, 1); intel_set_pch_fifo_underrun_reporting(dev_priv, (enum transcoder )pipe, 1); __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_51917; ldv_51916: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { if ((unsigned long )encoder->pre_enable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(encoder->pre_enable))(encoder); } else { } } else { } __mptr___1 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_51917: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_51916; } else { } if ((int )(intel_crtc->config)->has_pch_encoder) { ironlake_fdi_pll_enable(intel_crtc); } else { assert_fdi_tx(dev_priv, (enum pipe )pipe, 0); assert_fdi_rx(dev_priv, (enum pipe )pipe, 0); } ironlake_pfit_enable(intel_crtc); intel_crtc_load_lut(crtc); intel_update_watermarks(crtc); intel_enable_pipe(intel_crtc); if ((int )(intel_crtc->config)->has_pch_encoder) { ironlake_pch_enable(crtc); } else { } assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); __mptr___2 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_51924; ldv_51923: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { (*(encoder->enable))(encoder); } else { } __mptr___3 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___3 + 0xfffffffffffffff8UL; ldv_51924: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_51923; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 2U) { cpt_verify_modeset(dev, (int )intel_crtc->pipe); } else { } return; } } static bool hsw_crtc_supports_ips(struct intel_crtc *crtc ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int tmp ; { __p = to_i915((struct drm_device const *)crtc->base.dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)crtc->base.dev); if (((int )__p___0->info.device_id & 65280) == 2560) { goto _L; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)crtc->base.dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)crtc->base.dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { _L: /* CIL Label */ if ((int )crtc->pipe == 0) { tmp = 1; } else { tmp = 0; } } else { tmp = 0; } } else { tmp = 0; } } return ((bool )tmp); } } static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc ) { struct drm_device *dev ; struct intel_crtc *crtc_it ; struct intel_crtc *other_active_crtc ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = crtc->base.dev; other_active_crtc = (struct intel_crtc *)0; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc_it = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_51971; ldv_51970: ; if (! crtc_it->active || (unsigned long )crtc_it == (unsigned long )crtc) { goto ldv_51969; } else { } if ((unsigned long )other_active_crtc != (unsigned long )((struct intel_crtc *)0)) { return; } else { } other_active_crtc = crtc_it; ldv_51969: __mptr___0 = (struct list_head const *)crtc_it->base.head.next; crtc_it = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_51971: ; if ((unsigned long )(& crtc_it->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_51970; } else { } if ((unsigned long )other_active_crtc == (unsigned long )((struct intel_crtc *)0)) { return; } else { } intel_wait_for_vblank(dev, (int )other_active_crtc->pipe); intel_wait_for_vblank(dev, (int )other_active_crtc->pipe); return; } } static void haswell_crtc_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *encoder ; int pipe ; int __ret_warn_on ; long tmp ; struct intel_shared_dpll *tmp___0 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int __ret_warn_on___0 ; struct drm_i915_private *__p ; long tmp___1 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; __ret_warn_on = ! (crtc->state)->enable; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 4993, "WARN_ON(!crtc->state->enable)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((int )intel_crtc->active) { return; } else { } tmp___0 = intel_crtc_to_shared_dpll(intel_crtc); if ((unsigned long )tmp___0 != (unsigned long )((struct intel_shared_dpll *)0)) { intel_enable_shared_dpll(intel_crtc); } else { } if ((int )(intel_crtc->config)->has_dp_encoder) { intel_dp_set_m_n(intel_crtc, 0); } else { } intel_set_pipe_timings(intel_crtc); if ((unsigned int )(intel_crtc->config)->cpu_transcoder != 3U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )(intel_crtc->config)->cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393260U), (intel_crtc->config)->pixel_multiplier - 1U, 1); } else { } if ((int )(intel_crtc->config)->has_pch_encoder) { intel_cpu_transcoder_set_m_n(intel_crtc, & (intel_crtc->config)->fdi_m_n, (struct intel_link_m_n *)0); } else { } haswell_set_pipeconf(crtc); intel_set_pipe_csc(crtc); intel_crtc->active = 1; intel_set_cpu_fifo_underrun_reporting(dev_priv, (enum pipe )pipe, 1); __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_51990; ldv_51989: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { if ((unsigned long )encoder->pre_enable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(encoder->pre_enable))(encoder); } else { } } else { } __mptr___1 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_51990: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_51989; } else { } if ((int )(intel_crtc->config)->has_pch_encoder) { intel_set_pch_fifo_underrun_reporting(dev_priv, 0, 1); (*(dev_priv->display.fdi_link_train))(crtc); } else { } intel_ddi_enable_pipe_clock(intel_crtc); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 9U) { skylake_pfit_update(intel_crtc, 1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 8U) { ironlake_pfit_enable(intel_crtc); } else { __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { __p = to_i915((struct drm_device const *)dev); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 5040, "Missing switch case (%lu) in %s\n", (long )__p->info.gen, "haswell_crtc_enable"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } } intel_crtc_load_lut(crtc); intel_ddi_set_pipe_settings(crtc); intel_ddi_enable_transcoder_func(crtc); intel_update_watermarks(crtc); intel_enable_pipe(intel_crtc); if ((int )(intel_crtc->config)->has_pch_encoder) { lpt_pch_enable(crtc); } else { } if ((int )(intel_crtc->config)->dp_encoder_is_mst) { intel_ddi_set_vc_payload_alloc(crtc, 1); } else { } assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); __mptr___2 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_52018; ldv_52017: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { (*(encoder->enable))(encoder); intel_opregion_notify_encoder(encoder, 1); } else { } __mptr___3 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___3 + 0xfffffffffffffff8UL; ldv_52018: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52017; } else { } haswell_mode_set_planes_workaround(intel_crtc); return; } } static void ironlake_pfit_disable(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; if ((int )(crtc->config)->pch_pfit.enabled) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 2048 + 426112), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 2048 + 426096), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 2048 + 426100), 0U, 1); } else { } return; } } static void ironlake_crtc_disable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *encoder ; int pipe ; u32 reg ; u32 temp ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct drm_i915_private *__p ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; if (! intel_crtc->active) { return; } else { } __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_52043; ldv_52042: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { (*(encoder->disable))(encoder); } else { } __mptr___1 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_52043: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52042; } else { } drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); if ((int )(intel_crtc->config)->has_pch_encoder) { intel_set_pch_fifo_underrun_reporting(dev_priv, (enum transcoder )pipe, 0); } else { } intel_disable_pipe(intel_crtc); ironlake_pfit_disable(intel_crtc); if ((int )(intel_crtc->config)->has_pch_encoder) { ironlake_fdi_disable(crtc); } else { } __mptr___2 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_52050; ldv_52049: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { if ((unsigned long )encoder->post_disable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(encoder->post_disable))(encoder); } else { } } else { } __mptr___3 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___3 + 0xfffffffffffffff8UL; ldv_52050: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52049; } else { } if ((int )(intel_crtc->config)->has_pch_encoder) { ironlake_disable_pch_transcoder(dev_priv, (enum pipe )pipe); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 2U) { reg = (u32 )(pipe * 4096 + 918272); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); temp = temp & 536870911U; temp = temp | 1610612736U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815104L, 1); temp = (u32 )(~ ((1 << (pipe * 4 + 3)) | (1 << pipe * 4))) & temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 815104L, temp, 1); } else { } intel_disable_shared_dpll(intel_crtc); ironlake_fdi_pll_disable(intel_crtc); } else { } intel_crtc->active = 0; intel_update_watermarks(crtc); mutex_lock_nested(& dev->struct_mutex, 0U); intel_fbc_update(dev); mutex_unlock(& dev->struct_mutex); return; } } static void haswell_crtc_disable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *encoder ; enum transcoder cpu_transcoder ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct intel_shared_dpll *tmp___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; if (! intel_crtc->active) { return; } else { } __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_52073; ldv_52072: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { intel_opregion_notify_encoder(encoder, 0); (*(encoder->disable))(encoder); } else { } __mptr___1 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_52073: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52072; } else { } drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); if ((int )(intel_crtc->config)->has_pch_encoder) { intel_set_pch_fifo_underrun_reporting(dev_priv, 0, 0); } else { } intel_disable_pipe(intel_crtc); if ((int )(intel_crtc->config)->dp_encoder_is_mst) { intel_ddi_set_vc_payload_alloc(crtc, 0); } else { } intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 9U) { skylake_pfit_update(intel_crtc, 0); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 8U) { ironlake_pfit_disable(intel_crtc); } else { __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { __p = to_i915((struct drm_device const *)dev); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 5186, "Missing switch case (%lu) in %s\n", (long )__p->info.gen, "haswell_crtc_disable"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } } intel_ddi_disable_pipe_clock(intel_crtc); if ((int )(intel_crtc->config)->has_pch_encoder) { lpt_disable_pch_transcoder(dev_priv); intel_ddi_fdi_disable(crtc); } else { } __mptr___2 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_52101; ldv_52100: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { if ((unsigned long )encoder->post_disable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(encoder->post_disable))(encoder); } else { } } else { } __mptr___3 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___3 + 0xfffffffffffffff8UL; ldv_52101: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52100; } else { } intel_crtc->active = 0; intel_update_watermarks(crtc); mutex_lock_nested(& dev->struct_mutex, 0U); intel_fbc_update(dev); mutex_unlock(& dev->struct_mutex); tmp___0 = intel_crtc_to_shared_dpll(intel_crtc); if ((unsigned long )tmp___0 != (unsigned long )((struct intel_shared_dpll *)0)) { intel_disable_shared_dpll(intel_crtc); } else { } return; } } static void ironlake_crtc_off(struct drm_crtc *crtc ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; intel_put_shared_dpll(intel_crtc); return; } } static void i9xx_pfit_enable(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc_state *pipe_config ; int __ret_warn_on ; uint32_t tmp ; long tmp___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe_config = crtc->config; if (pipe_config->gmch_pfit.control == 0U) { return; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397872U), 1); __ret_warn_on = (int )tmp < 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 5230, "WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); assert_pipe(dev_priv, crtc->pipe, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397876U), pipe_config->gmch_pfit.pgm_ratios, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397872U), pipe_config->gmch_pfit.control, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )crtc->pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393248U), 0U, 1); return; } } static enum intel_display_power_domain port_to_power_domain(enum port port ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { switch ((unsigned int )port) { case 0U: ; return (11); case 1U: ; return (13); case 2U: ; return (15); case 3U: ; return (17); default: __ret_warn_once = 1; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 5253, "WARN_ON_ONCE(1)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return (20); } } } enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder ) { struct drm_device *dev ; struct intel_digital_port *intel_dig_port ; bool __warned ; int __ret_warn_once ; struct drm_i915_private *__p ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; enum intel_display_power_domain tmp___2 ; struct intel_dp_mst_encoder *tmp___3 ; enum intel_display_power_domain tmp___4 ; { dev = intel_encoder->base.dev; switch ((unsigned int )intel_encoder->type) { case 10U: __p = to_i915((struct drm_device const *)dev); __ret_warn_once = (unsigned int )*((unsigned char *)__p + 46UL) == 0U; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 5271, "WARN_ON_ONCE(!HAS_DDI(dev))"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); case 7U: ; case 6U: ; case 8U: intel_dig_port = enc_to_dig_port(& intel_encoder->base); tmp___2 = port_to_power_domain(intel_dig_port->port); return (tmp___2); case 11U: tmp___3 = enc_to_mst(& intel_encoder->base); intel_dig_port = tmp___3->primary; tmp___4 = port_to_power_domain(intel_dig_port->port); return (tmp___4); case 1U: ; return (19); case 9U: ; return (18); default: ; return (20); } } } static unsigned long get_crtc_power_domains(struct drm_crtc *crtc ) { struct drm_device *dev ; struct intel_encoder *intel_encoder ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; unsigned long mask ; enum transcoder transcoder ; struct list_head const *__mptr___0 ; enum intel_display_power_domain tmp ; struct list_head const *__mptr___1 ; { dev = crtc->dev; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; transcoder = intel_pipe_to_cpu_transcoder((struct drm_i915_private *)dev->dev_private, pipe); mask = 1UL << (int )pipe; mask = ((unsigned int )transcoder != 3U ? 1UL << (int )((unsigned int )transcoder + 6U) : 512UL) | mask; if ((int )(intel_crtc->config)->pch_pfit.enabled || (int )(intel_crtc->config)->pch_pfit.force_thru) { mask = (1UL << ((int )pipe + 3)) | mask; } else { } __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_52170; ldv_52169: ; if ((unsigned long )intel_encoder->base.crtc == (unsigned long )crtc) { tmp = intel_display_port_power_domain(intel_encoder); mask = (1UL << (int )tmp) | mask; } else { } __mptr___1 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_52170: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52169; } else { } return (mask); } } static void modeset_update_crtc_power_domains(struct drm_atomic_state *state ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned long pipe_domains[3U] ; unsigned int tmp ; struct intel_crtc *crtc ; struct list_head const *__mptr ; enum intel_display_power_domain domain ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; enum intel_display_power_domain domain___0 ; struct list_head const *__mptr___2 ; { dev = state->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe_domains[0] = 0UL; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } pipe_domains[tmp] = 0UL; tmp = tmp + 1U; } __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_52189; ldv_52188: ; if (! (crtc->base.state)->enable) { goto ldv_52184; } else { } pipe_domains[(int )crtc->pipe] = get_crtc_power_domains(& crtc->base); domain = 0; goto ldv_52186; ldv_52185: ; if (((unsigned long )(1 << (int )domain) & pipe_domains[(int )crtc->pipe]) != 0UL) { intel_display_power_get(dev_priv, domain); } else { } domain = (enum intel_display_power_domain )((unsigned int )domain + 1U); ldv_52186: ; if ((unsigned int )domain <= 28U) { goto ldv_52185; } else { } ldv_52184: __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_52189: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_52188; } else { } if ((unsigned long )dev_priv->display.modeset_global_resources != (unsigned long )((void (*)(struct drm_atomic_state * ))0)) { (*(dev_priv->display.modeset_global_resources))(state); } else { } __mptr___1 = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr___1 + 0xfffffffffffffff0UL; goto ldv_52200; ldv_52199: domain___0 = 0; goto ldv_52197; ldv_52196: ; if (((unsigned long )(1 << (int )domain___0) & crtc->enabled_power_domains) != 0UL) { intel_display_power_put(dev_priv, domain___0); } else { } domain___0 = (enum intel_display_power_domain )((unsigned int )domain___0 + 1U); ldv_52197: ; if ((unsigned int )domain___0 <= 28U) { goto ldv_52196; } else { } crtc->enabled_power_domains = pipe_domains[(int )crtc->pipe]; __mptr___2 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___2 + 0xfffffffffffffff0UL; ldv_52200: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_52199; } else { } intel_display_set_init_power(dev_priv, 0); return; } } void broxton_set_cdclk(struct drm_device *dev , int frequency ) { struct drm_i915_private *dev_priv ; uint32_t divider ; uint32_t ratio ; uint32_t current_freq ; int ret ; uint32_t tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; uint32_t val ; unsigned long timeout_____0 ; unsigned long tmp___4 ; int ret_____0 ; uint32_t tmp___5 ; bool tmp___6 ; uint32_t tmp___7 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; switch (frequency) { case 144000: divider = 12582912U; ratio = 60U; goto ldv_52212; case 288000: divider = 8388608U; ratio = 60U; goto ldv_52212; case 384000: divider = 4194304U; ratio = 60U; goto ldv_52212; case 576000: divider = 0U; ratio = 60U; goto ldv_52212; case 624000: divider = 0U; ratio = 65U; goto ldv_52212; case 19200: ratio = 0U; divider = 0U; goto ldv_52212; default: drm_err("unsupported CDCLK freq %d", frequency); return; } ldv_52212: mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); ret = sandybridge_pcode_write(dev_priv, 23U, 2147483648U); mutex_unlock(& dev_priv->rps.hw_lock); if (ret != 0) { drm_err("PCode CDCLK freq change notify failed (err %d, freq %d)\n", ret, frequency); return; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286720L, 1); current_freq = tmp & 2047U; current_freq = current_freq * 500U + 1000U; if ((frequency == 19200 || frequency == 624000) || current_freq == 624000U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286832L, 2147483647U, 1); tmp___0 = msecs_to_jiffies(1U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52229; ldv_52228: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286832L, 1); if ((tmp___1 & 1073741824U) != 0U) { ret__ = -110; } else { } goto ldv_52227; } else { } tmp___2 = drm_can_sleep___5(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52229: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286832L, 1); if ((tmp___3 & 1073741824U) != 0U) { goto ldv_52228; } else { } ldv_52227: ; if (ret__ != 0) { drm_err("timout waiting for DE PLL unlock\n"); } else { } } else { } if (frequency != 19200) { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 446464L, 1); val = val & 4294967040U; val = val | ratio; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 446464L, val, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286832L, 2147483648U, 1); tmp___4 = msecs_to_jiffies(1U); timeout_____0 = (tmp___4 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_52242; ldv_52241: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286832L, 1); if ((tmp___5 & 1073741824U) == 0U) { ret_____0 = -110; } else { } goto ldv_52240; } else { } tmp___6 = drm_can_sleep___5(); if ((int )tmp___6) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52242: tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286832L, 1); if ((tmp___7 & 1073741824U) == 0U) { goto ldv_52241; } else { } ldv_52240: ; if (ret_____0 != 0) { drm_err("timeout waiting for DE PLL lock\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286720L, 1); val = val & 4282384383U; val = val | divider; val = val & 4294901759U; if (frequency > 499999) { val = val | 65536U; } else { } val = val & 4294965248U; val = (uint32_t )((frequency + -1000) / 500) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286720L, val, 1); } else { } mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); ret = sandybridge_pcode_write(dev_priv, 23U, (u32 )((frequency + 24999) / 25000)); mutex_unlock(& dev_priv->rps.hw_lock); if (ret != 0) { drm_err("PCode CDCLK freq set failed, (err %d, freq %d)\n", ret, frequency); return; } else { } dev_priv->cdclk_freq = (unsigned int )frequency; return; } } void broxton_init_cdclk(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t val ; long tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 287752L, 1); val = val & 4294967279U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 287752L, val, 1); intel_display_power_get(dev_priv, 23); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286832L, 1); if ((int )tmp___0 < 0) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("broxton_init_cdclk", "Display already initialized\n"); } else { } return; } else { } broxton_set_cdclk(dev, 624000); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282632L, tmp___1 | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 0); __const_udelay(42950UL); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 1); if ((tmp___2 & 1073741824U) == 0U) { drm_err("DBuf power enable timeout!\n"); } else { } return; } } void broxton_uninit_cdclk(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282632L, tmp & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 0); __const_udelay(42950UL); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 1); if ((tmp___0 & 1073741824U) != 0U) { drm_err("DBuf power disable timeout!\n"); } else { } broxton_set_cdclk(dev, 19200); intel_display_power_put(dev_priv, 23); return; } } static struct skl_cdclk_entry const skl_cdclk_frequencies[7U] = { {308570U, 8640U}, {337500U, 8100U}, {432000U, 8640U}, {450000U, 8100U}, {540000U, 8100U}, {617140U, 8640U}, {675000U, 8100U}}; static unsigned int skl_cdclk_decimal(unsigned int freq ) { { return ((freq - 1000U) / 500U); } } static unsigned int skl_cdclk_get_vco(unsigned int freq ) { unsigned int i ; struct skl_cdclk_entry const *e ; { i = 0U; goto ldv_52269; ldv_52268: e = (struct skl_cdclk_entry const *)(& skl_cdclk_frequencies) + (unsigned long )i; if ((unsigned int )e->freq == freq) { return ((unsigned int )e->vco); } else { } i = i + 1U; ldv_52269: ; if (i <= 6U) { goto ldv_52268; } else { } return (8100U); } } static void skl_dpll0_enable(struct drm_i915_private *dev_priv , unsigned int required_vco ) { unsigned int min_freq ; u32 val ; unsigned int tmp ; uint32_t tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286720L, 1); val = val; val = val | 134217728U; if (required_vco == 8640U) { min_freq = 308570U; } else { min_freq = 337500U; } tmp = skl_cdclk_decimal(min_freq); val = tmp | 134217728U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286720L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286720L, 0); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 1); val = val & 4294967233U; val = val | 1U; if (required_vco == 8640U) { val = val | 8U; } else { val = val | 4U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 442456L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 0); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286736L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286736L, tmp___0 | 2147483648U, 1); tmp___1 = msecs_to_jiffies(5U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52287; ldv_52286: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286736L, 1); if ((tmp___2 & 1073741824U) == 0U) { ret__ = -110; } else { } goto ldv_52285; } else { } tmp___3 = drm_can_sleep___5(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52287: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286736L, 1); if ((tmp___4 & 1073741824U) == 0U) { goto ldv_52286; } else { } ldv_52285: ; if (ret__ != 0) { drm_err("DPLL0 not locked\n"); } else { } return; } } static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv ) { int ret ; u32 val ; { val = 3U; mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); ret = sandybridge_pcode_read(dev_priv, 7U, & val); mutex_unlock(& dev_priv->rps.hw_lock); return ((bool )(ret == 0 && (int )val & 1)); } } static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv ) { unsigned int i ; bool tmp ; { i = 0U; goto ldv_52299; ldv_52298: tmp = skl_cdclk_pcu_ready(dev_priv); if ((int )tmp) { return (1); } else { } __const_udelay(42950UL); i = i + 1U; ldv_52299: ; if (i <= 14U) { goto ldv_52298; } else { } return (0); } } static void skl_set_cdclk(struct drm_i915_private *dev_priv , unsigned int freq ) { u32 freq_select ; u32 pcu_ack ; long tmp ; bool tmp___0 ; int tmp___1 ; unsigned int tmp___2 ; { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("skl_set_cdclk", "Changing CDCLK to %dKHz\n", freq); } else { } tmp___0 = skl_cdclk_wait_for_pcu_ready(dev_priv); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { drm_err("failed to inform PCU about cdclk change\n"); return; } else { } switch (freq) { case 450000U: ; case 432000U: freq_select = 0U; pcu_ack = 1U; goto ldv_52310; case 540000U: freq_select = 67108864U; pcu_ack = 2U; goto ldv_52310; case 308570U: ; case 337500U: ; default: freq_select = 134217728U; pcu_ack = 0U; goto ldv_52310; case 617140U: ; case 675000U: freq_select = 201326592U; pcu_ack = 3U; goto ldv_52310; } ldv_52310: tmp___2 = skl_cdclk_decimal(freq); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286720L, tmp___2 | freq_select, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286720L, 0); mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); sandybridge_pcode_write(dev_priv, 7U, pcu_ack); mutex_unlock(& dev_priv->rps.hw_lock); return; } } void skl_uninit_cdclk(struct drm_i915_private *dev_priv ) { uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282632L, tmp & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 0); __const_udelay(42950UL); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 1); if ((tmp___0 & 1073741824U) != 0U) { drm_err("DBuf power disable timeout\n"); } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286736L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286736L, tmp___1 & 2147483647U, 1); tmp___2 = msecs_to_jiffies(1U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52330; ldv_52329: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286736L, 1); if ((tmp___3 & 1073741824U) != 0U) { ret__ = -110; } else { } goto ldv_52328; } else { } tmp___4 = drm_can_sleep___5(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52330: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286736L, 1); if ((tmp___5 & 1073741824U) != 0U) { goto ldv_52329; } else { } ldv_52328: ; if (ret__ != 0) { drm_err("Couldn\'t disable DPLL0\n"); } else { } intel_display_power_put(dev_priv, 23); return; } } void skl_init_cdclk(struct drm_i915_private *dev_priv ) { u32 val ; unsigned int required_vco ; long tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 287752L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 287752L, val | 16U, 1); intel_display_power_get(dev_priv, 23); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286736L, 1); if ((int )tmp___0 < 0) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("skl_init_cdclk", "DPLL0 already running\n"); } else { } return; } else { } required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); skl_dpll0_enable(dev_priv, required_vco); skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 282632L, tmp___1 | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 0); __const_udelay(42950UL); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 282632L, 1); if ((tmp___2 & 1073741824U) == 0U) { drm_err("DBuf power enable timeout\n"); } else { } return; } } static int valleyview_get_vco(struct drm_i915_private *dev_priv ) { int hpll_freq ; int vco_freq[4U] ; u32 tmp ; { vco_freq[0] = 800; vco_freq[1] = 1600; vco_freq[2] = 2000; vco_freq[3] = 2400; mutex_lock_nested(& dev_priv->sb_lock, 0U); tmp = vlv_cck_read(dev_priv, 8U); hpll_freq = (int )tmp & 3; mutex_unlock(& dev_priv->sb_lock); return (vco_freq[hpll_freq] * 1000); } } static void vlv_update_cdclk(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->display.get_display_clock_speed))(dev); dev_priv->cdclk_freq = (unsigned int )tmp; tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("vlv_update_cdclk", "Current CD clock rate: %d kHz\n", dev_priv->cdclk_freq); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1598736L, (dev_priv->cdclk_freq + 999U) / 1000U, 1); return; } } static void valleyview_set_cdclk(struct drm_device *dev , int cdclk ) { struct drm_i915_private *dev_priv ; u32 val ; u32 cmd ; int __ret_warn_on ; int tmp ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; u32 tmp___2 ; bool tmp___3 ; u32 tmp___4 ; u32 divider ; unsigned int __x ; int __d ; unsigned long timeout_____0 ; unsigned long tmp___5 ; int ret_____0 ; u32 tmp___6 ; bool tmp___7 ; u32 tmp___8 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->display.get_display_clock_speed))(dev); __ret_warn_on = (unsigned int )tmp != dev_priv->cdclk_freq; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 5774, "WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->cdclk_freq)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if (cdclk > 319999) { cmd = 2U; } else if (cdclk == 266667) { cmd = 1U; } else { cmd = 0U; } mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); val = vlv_punit_read(dev_priv, 54U); val = val & 4294918143U; val = (cmd << 14) | val; vlv_punit_write(dev_priv, 54U, val); tmp___1 = msecs_to_jiffies(50U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52367; ldv_52366: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = vlv_punit_read(dev_priv, 54U); if ((tmp___2 & 3221225472U) != cmd << 30) { ret__ = -110; } else { } goto ldv_52365; } else { } tmp___3 = drm_can_sleep___5(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52367: tmp___4 = vlv_punit_read(dev_priv, 54U); if ((tmp___4 & 3221225472U) != cmd << 30) { goto ldv_52366; } else { } ldv_52365: ; if (ret__ != 0) { drm_err("timed out waiting for CDclk change\n"); } else { } mutex_unlock(& dev_priv->rps.hw_lock); mutex_lock_nested(& dev_priv->sb_lock, 0U); if (cdclk == 400000) { __x = dev_priv->hpll_freq << 1; __d = cdclk; divider = ((unsigned int )(__d / 2) + __x) / (unsigned int )__d - 1U; val = vlv_cck_read(dev_priv, 107U); val = val & 4294967264U; val = val | divider; vlv_cck_write(dev_priv, 107U, val); tmp___5 = msecs_to_jiffies(50U); timeout_____0 = (tmp___5 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_52383; ldv_52382: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___6 = vlv_cck_read(dev_priv, 107U); if ((tmp___6 & 7936U) != divider << 8) { ret_____0 = -110; } else { } goto ldv_52381; } else { } tmp___7 = drm_can_sleep___5(); if ((int )tmp___7) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52383: tmp___8 = vlv_cck_read(dev_priv, 107U); if ((tmp___8 & 7936U) != divider << 8) { goto ldv_52382; } else { } ldv_52381: ; if (ret_____0 != 0) { drm_err("timed out waiting for CDclk change\n"); } else { } } else { } val = vlv_bunit_read(dev_priv, 17U); val = val & 4294967168U; if (cdclk == 400000) { val = val | 18U; } else { val = val | 12U; } vlv_bunit_write(dev_priv, 17U, val); mutex_unlock(& dev_priv->sb_lock); vlv_update_cdclk(dev); return; } } static void cherryview_set_cdclk(struct drm_device *dev , int cdclk ) { struct drm_i915_private *dev_priv ; u32 val ; u32 cmd ; int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; unsigned int __x ; int __d ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; u32 tmp___3 ; bool tmp___4 ; u32 tmp___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->display.get_display_clock_speed))(dev); __ret_warn_on = (unsigned int )tmp != dev_priv->cdclk_freq; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 5839, "WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->cdclk_freq)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); switch (cdclk) { case 333333: ; case 320000: ; case 266667: ; case 200000: ; goto ldv_52398; default: __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 5848, "Missing switch case (%lu) in %s\n", (long )cdclk, "cherryview_set_cdclk"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); return; } ldv_52398: __x = dev_priv->hpll_freq << 1; __d = cdclk; cmd = ((unsigned int )(__d / 2) + __x) / (unsigned int )__d - 1U; mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); val = vlv_punit_read(dev_priv, 54U); val = val & 4294959359U; val = (cmd << 8) | val; vlv_punit_write(dev_priv, 54U, val); tmp___2 = msecs_to_jiffies(50U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52416; ldv_52415: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = vlv_punit_read(dev_priv, 54U); if ((tmp___3 & 520093696U) != cmd << 24) { ret__ = -110; } else { } goto ldv_52414; } else { } tmp___4 = drm_can_sleep___5(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52416: tmp___5 = vlv_punit_read(dev_priv, 54U); if ((tmp___5 & 520093696U) != cmd << 24) { goto ldv_52415; } else { } ldv_52414: ; if (ret__ != 0) { drm_err("timed out waiting for CDclk change\n"); } else { } mutex_unlock(& dev_priv->rps.hw_lock); vlv_update_cdclk(dev); return; } } static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv , int max_pixclk ) { int freq_320 ; int limit ; int tmp ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { freq_320 = (dev_priv->hpll_freq << 1) % 320000U != 0U ? 333333 : 320000; __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { tmp = 95; } else { tmp = 90; } } else { tmp = 90; } limit = tmp; __p___3 = dev_priv; if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { goto _L___0; } else { __p___4 = dev_priv; if ((unsigned int )((unsigned char )__p___4->info.gen) != 8U) { _L___0: /* CIL Label */ if ((freq_320 * limit) / 100 < max_pixclk) { return (400000); } else { goto _L; } } else _L: /* CIL Label */ if ((limit * 266667) / 100 < max_pixclk) { return (freq_320); } else if (max_pixclk > 0) { return (266667); } else { return (200000); } } } } static int broxton_calc_cdclk(struct drm_i915_private *dev_priv , int max_pixclk ) { { if (max_pixclk > 518400) { return (624000); } else if (max_pixclk > 345600) { return (576000); } else if (max_pixclk > 259200) { return (384000); } else if (max_pixclk > 129600) { return (288000); } else { return (144000); } } } static int intel_mode_max_pixclk(struct drm_device *dev , struct drm_atomic_state *state ) { struct intel_crtc *intel_crtc ; struct intel_crtc_state *crtc_state ; int max_pixclk ; struct list_head const *__mptr ; long tmp ; bool tmp___0 ; int _max1 ; int _max2 ; struct list_head const *__mptr___0 ; { max_pixclk = 0; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; intel_crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_52468; ldv_52467: ; if ((unsigned long )state != (unsigned long )((struct drm_atomic_state *)0)) { crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); } else { crtc_state = intel_crtc->config; } tmp___0 = IS_ERR((void const *)crtc_state); if ((int )tmp___0) { tmp = PTR_ERR((void const *)crtc_state); return ((int )tmp); } else { } if (! crtc_state->base.enable) { goto ldv_52463; } else { } _max1 = max_pixclk; _max2 = crtc_state->base.adjusted_mode.crtc_clock; max_pixclk = _max1 > _max2 ? _max1 : _max2; ldv_52463: __mptr___0 = (struct list_head const *)intel_crtc->base.head.next; intel_crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_52468: ; if ((unsigned long )(& intel_crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_52467; } else { } return (max_pixclk); } } static int valleyview_modeset_global_pipes(struct drm_atomic_state *state ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct drm_crtc *crtc ; struct drm_crtc_state *crtc_state ; int max_pixclk ; int tmp___0 ; int cdclk ; int i ; struct drm_i915_private *__p ; struct list_head const *__mptr ; long tmp___1 ; bool tmp___2 ; struct list_head const *__mptr___0 ; { tmp = to_i915((struct drm_device const *)state->dev); dev_priv = tmp; tmp___0 = intel_mode_max_pixclk(state->dev, state); max_pixclk = tmp___0; if (max_pixclk < 0) { return (max_pixclk); } else { } __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); } else { cdclk = broxton_calc_cdclk(dev_priv, max_pixclk); } if ((unsigned int )cdclk == dev_priv->cdclk_freq) { return (0); } else { } __mptr = (struct list_head const *)(state->dev)->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_52491; ldv_52490: ; if (! (crtc->state)->enable) { goto ldv_52489; } else { } crtc_state = drm_atomic_get_crtc_state(state, crtc); tmp___2 = IS_ERR((void const *)crtc_state); if ((int )tmp___2) { tmp___1 = PTR_ERR((void const *)crtc_state); return ((int )tmp___1); } else { } ldv_52489: __mptr___0 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_52491: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& (state->dev)->mode_config.crtc_list)) { goto ldv_52490; } else { } i = 0; goto ldv_52494; ldv_52493: ; if ((unsigned long )crtc_state != (unsigned long )((struct drm_crtc_state *)0)) { if ((int )crtc_state->enable) { crtc_state->mode_changed = 1; } else { } } else { } i = i + 1; ldv_52494: ; if ((state->dev)->mode_config.num_crtc > i) { crtc = *(state->crtcs + (unsigned long )i); crtc_state = *(state->crtc_states + (unsigned long )i); goto ldv_52493; } else { } return (0); } } static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv ) { unsigned int credits ; unsigned int default_credits ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; unsigned int __x ; int __d ; int __ret_warn_on ; uint32_t tmp ; long tmp___0 ; { __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { default_credits = 1073741824U; } else { default_credits = 0U; } } else { default_credits = 0U; } __x = dev_priv->cdclk_freq; __d = 1000; if (((unsigned int )(__d / 2) + __x) / (unsigned int )__d >= dev_priv->rps.cz_freq) { __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { credits = 2147483648U; } else { credits = 1879048192U; } } else { credits = 1879048192U; } } else { credits = default_credits; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1598732L, default_credits | 16384U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1598732L, credits | 134234112U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1598732L, 1); __ret_warn_on = (tmp & 134217728U) != 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6022, "WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int max_pixclk ; int tmp ; int req_cdclk ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = old_state->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_mode_max_pixclk(dev, (struct drm_atomic_state *)0); max_pixclk = tmp; __ret_warn_on = max_pixclk < 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6034, "WARN_ON(max_pixclk < 0)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return; } else { } req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); if ((unsigned int )req_cdclk != dev_priv->cdclk_freq) { intel_display_power_get(dev_priv, 0); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { cherryview_set_cdclk(dev, req_cdclk); } else { valleyview_set_cdclk(dev, req_cdclk); } } else { valleyview_set_cdclk(dev, req_cdclk); } vlv_program_pfi_credits(dev_priv); intel_display_power_put(dev_priv, 0); } else { } return; } } static void valleyview_crtc_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *encoder ; int pipe ; bool is_dsi ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *dev_priv___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; { dev = crtc->dev; tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; __ret_warn_on = ! (crtc->state)->enable; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6071, "WARN_ON(!crtc->state->enable)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((int )intel_crtc->active) { return; } else { } is_dsi = intel_pipe_has_type(intel_crtc, 9); if (! is_dsi) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { chv_prepare_pll(intel_crtc, (struct intel_crtc_state const *)intel_crtc->config); } else { vlv_prepare_pll(intel_crtc, (struct intel_crtc_state const *)intel_crtc->config); } } else { vlv_prepare_pll(intel_crtc, (struct intel_crtc_state const *)intel_crtc->config); } } else { } if ((int )(intel_crtc->config)->has_dp_encoder) { intel_dp_set_m_n(intel_crtc, 0); } else { } intel_set_pipe_timings(intel_crtc); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { if (pipe == 1) { dev_priv___0 = (struct drm_i915_private *)dev->dev_private; (*(dev_priv___0->uncore.funcs.mmio_writel))(dev_priv___0, (off_t )(((unsigned int )(dev_priv___0->info.trans_offsets[pipe] - dev_priv___0->info.trans_offsets[0]) + (unsigned int )dev_priv___0->info.display_mmio_offset) + 395776U), 0U, 1); (*(dev_priv___0->uncore.funcs.mmio_writel))(dev_priv___0, (off_t )(((unsigned int )(dev_priv___0->info.trans_offsets[pipe] - dev_priv___0->info.trans_offsets[0]) + (unsigned int )dev_priv___0->info.display_mmio_offset) + 395780U), 0U, 1); } else { } } else { } } else { } i9xx_set_pipeconf(intel_crtc); intel_crtc->active = 1; intel_set_cpu_fifo_underrun_reporting(dev_priv, (enum pipe )pipe, 1); __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_52594; ldv_52593: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { if ((unsigned long )encoder->pre_pll_enable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(encoder->pre_pll_enable))(encoder); } else { } } else { } __mptr___1 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_52594: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52593; } else { } if (! is_dsi) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 8U) { chv_enable_pll(intel_crtc, (struct intel_crtc_state const *)intel_crtc->config); } else { vlv_enable_pll(intel_crtc, (struct intel_crtc_state const *)intel_crtc->config); } } else { vlv_enable_pll(intel_crtc, (struct intel_crtc_state const *)intel_crtc->config); } } else { } __mptr___2 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_52613; ldv_52612: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { if ((unsigned long )encoder->pre_enable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(encoder->pre_enable))(encoder); } else { } } else { } __mptr___3 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___3 + 0xfffffffffffffff8UL; ldv_52613: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52612; } else { } i9xx_pfit_enable(intel_crtc); intel_crtc_load_lut(crtc); intel_update_watermarks(crtc); intel_enable_pipe(intel_crtc); assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); __mptr___4 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___4 + 0xfffffffffffffff8UL; goto ldv_52620; ldv_52619: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { (*(encoder->enable))(encoder); } else { } __mptr___5 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___5 + 0xfffffffffffffff8UL; ldv_52620: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52619; } else { } return; } } static void i9xx_set_pll_dividers(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )crtc->pipe + 3080) * 8), (crtc->config)->dpll_hw_state.fp0, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe * 8 + 24644), (crtc->config)->dpll_hw_state.fp1, 1); return; } } static void i9xx_crtc_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *encoder ; int pipe ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; { dev = crtc->dev; tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; __ret_warn_on = ! (crtc->state)->enable; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6149, "WARN_ON(!crtc->state->enable)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((int )intel_crtc->active) { return; } else { } i9xx_set_pll_dividers(intel_crtc); if ((int )(intel_crtc->config)->has_dp_encoder) { intel_dp_set_m_n(intel_crtc, 0); } else { } intel_set_pipe_timings(intel_crtc); i9xx_set_pipeconf(intel_crtc); intel_crtc->active = 1; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { intel_set_cpu_fifo_underrun_reporting(dev_priv, (enum pipe )pipe, 1); } else { } __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_52650; ldv_52649: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { if ((unsigned long )encoder->pre_enable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(encoder->pre_enable))(encoder); } else { } } else { } __mptr___1 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_52650: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52649; } else { } i9xx_enable_pll(intel_crtc); i9xx_pfit_enable(intel_crtc); intel_crtc_load_lut(crtc); intel_update_watermarks(crtc); intel_enable_pipe(intel_crtc); assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); __mptr___2 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_52657; ldv_52656: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { (*(encoder->enable))(encoder); } else { } __mptr___3 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___3 + 0xfffffffffffffff8UL; ldv_52657: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52656; } else { } return; } } static void i9xx_pfit_disable(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; long tmp___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((crtc->config)->gmch_pfit.control == 0U) { return; } else { } assert_pipe(dev_priv, crtc->pipe, 0); tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397872U), 1); drm_ut_debug_printk("i9xx_pfit_disable", "disabling pfit, current: 0x%08x\n", tmp); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397872U), 0U, 1); return; } } static void i9xx_crtc_disable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *encoder ; int pipe ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; bool tmp ; int tmp___0 ; struct drm_i915_private *__p___2 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; if (! intel_crtc->active) { return; } else { } intel_wait_for_vblank(dev, pipe); __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_52680; ldv_52679: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { (*(encoder->disable))(encoder); } else { } __mptr___1 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_52680: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52679; } else { } drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); intel_disable_pipe(intel_crtc); i9xx_pfit_disable(intel_crtc); __mptr___2 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_52687; ldv_52686: ; if ((unsigned long )encoder->base.crtc == (unsigned long )crtc) { if ((unsigned long )encoder->post_disable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(encoder->post_disable))(encoder); } else { } } else { } __mptr___3 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___3 + 0xfffffffffffffff8UL; ldv_52687: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52686; } else { } tmp = intel_pipe_has_type(intel_crtc, 9); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { chv_disable_pll(dev_priv, (enum pipe )pipe); } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { vlv_disable_pll(dev_priv, (enum pipe )pipe); } else { i9xx_disable_pll(intel_crtc); } } } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 2U) { intel_set_cpu_fifo_underrun_reporting(dev_priv, (enum pipe )pipe, 0); } else { } intel_crtc->active = 0; intel_update_watermarks(crtc); mutex_lock_nested(& dev->struct_mutex, 0U); intel_fbc_update(dev); mutex_unlock(& dev->struct_mutex); return; } } static void i9xx_crtc_off(struct drm_crtc *crtc ) { { return; } } void intel_crtc_control(struct drm_crtc *crtc , bool enable ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum intel_display_power_domain domain ; unsigned long domains ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; if ((int )enable) { if (! intel_crtc->active) { domains = get_crtc_power_domains(crtc); domain = 0; goto ldv_52728; ldv_52727: ; if (((unsigned long )(1 << (int )domain) & domains) != 0UL) { intel_display_power_get(dev_priv, domain); } else { } domain = (enum intel_display_power_domain )((unsigned int )domain + 1U); ldv_52728: ; if ((unsigned int )domain <= 28U) { goto ldv_52727; } else { } intel_crtc->enabled_power_domains = domains; (*(dev_priv->display.crtc_enable))(crtc); intel_crtc_enable_planes(crtc); } else { } } else if ((int )intel_crtc->active) { intel_crtc_disable_planes(crtc); (*(dev_priv->display.crtc_disable))(crtc); domains = intel_crtc->enabled_power_domains; domain = 0; goto ldv_52731; ldv_52730: ; if (((unsigned long )(1 << (int )domain) & domains) != 0UL) { intel_display_power_put(dev_priv, domain); } else { } domain = (enum intel_display_power_domain )((unsigned int )domain + 1U); ldv_52731: ; if ((unsigned int )domain <= 28U) { goto ldv_52730; } else { } intel_crtc->enabled_power_domains = 0UL; } else { } return; } } void intel_crtc_update_dpms(struct drm_crtc *crtc ) { struct drm_device *dev ; struct intel_encoder *intel_encoder ; bool enable ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = crtc->dev; enable = 0; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_52744; ldv_52743: ; if ((unsigned long )intel_encoder->base.crtc == (unsigned long )crtc) { enable = ((int )enable | (int )intel_encoder->connectors_active) != 0; } else { } __mptr___0 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_52744: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52743; } else { } intel_crtc_control(crtc, (int )enable); (crtc->state)->active = enable; return; } } static void intel_crtc_disable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_connector *connector ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; long tmp ; struct list_head const *__mptr ; struct drm_encoder const *__mptr___0 ; struct list_head const *__mptr___1 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __ret_warn_on = ! (crtc->state)->enable; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6316, "WARN_ON(!crtc->state->enable)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); intel_crtc_disable_planes(crtc); (*(dev_priv->display.crtc_disable))(crtc); (*(dev_priv->display.off))(crtc); drm_plane_helper_disable(crtc->primary); __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct drm_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_52762; ldv_52761: ; if ((unsigned long )connector->encoder == (unsigned long )((struct drm_encoder *)0) || (unsigned long )(connector->encoder)->crtc == (unsigned long )((struct drm_crtc *)0)) { goto ldv_52758; } else { } if ((unsigned long )(connector->encoder)->crtc != (unsigned long )crtc) { goto ldv_52758; } else { } connector->dpms = 3; __mptr___0 = (struct drm_encoder const *)connector->encoder; ((struct intel_encoder *)__mptr___0)->connectors_active = 0; ldv_52758: __mptr___1 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_52762: ; if ((unsigned long )(& connector->head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_52761; } else { } return; } } void intel_encoder_destroy(struct drm_encoder *encoder ) { struct intel_encoder *intel_encoder ; struct drm_encoder const *__mptr ; { __mptr = (struct drm_encoder const *)encoder; intel_encoder = (struct intel_encoder *)__mptr; drm_encoder_cleanup(encoder); kfree((void const *)intel_encoder); return; } } static void intel_encoder_dpms(struct intel_encoder *encoder , int mode ) { { if (mode == 0) { encoder->connectors_active = 1; intel_crtc_update_dpms(encoder->base.crtc); } else { encoder->connectors_active = 0; intel_crtc_update_dpms(encoder->base.crtc); } return; } } static void intel_connector_check_state(struct intel_connector *connector ) { struct intel_encoder *encoder ; struct drm_crtc *crtc ; bool encoder_enabled ; enum pipe pipe ; long tmp ; int __ret_warn_on ; int __ret_warn_on___0 ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___1 ; int __ret_warn_on___2 ; long tmp___2 ; long tmp___3 ; int __ret_warn_on___3 ; int __ret_warn_on___4 ; long tmp___4 ; long tmp___5 ; int __ret_warn_on___5 ; int __ret_warn_on___6 ; long tmp___6 ; long tmp___7 ; int __ret_warn_on___7 ; int __ret_warn_on___8 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; int __ret_warn_on___9 ; int __ret_warn_on___10 ; long tmp___11 ; long tmp___12 ; int __ret_warn_on___11 ; struct drm_crtc const *__mptr ; int tmp___13 ; int __ret_warn_on___12 ; long tmp___14 ; long tmp___15 ; int __ret_warn_on___13 ; struct drm_crtc const *__mptr___0 ; int __ret_warn_on___14 ; long tmp___16 ; long tmp___17 ; bool tmp___18 ; { tmp___18 = (*(connector->get_hw_state))(connector); if ((int )tmp___18) { encoder = connector->encoder; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_connector_check_state", "[CONNECTOR:%d:%s]\n", connector->base.base.id, connector->base.name); } else { } if ((unsigned long )connector->mst_port != (unsigned long )((struct intel_dp *)0)) { return; } else { } __ret_warn_on = connector->base.dpms == 3; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6380, "wrong connector dpms state\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("wrong connector dpms state\n"); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___1 = (unsigned long )connector->base.encoder != (unsigned long )(& encoder->base); tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6382, "active connector not linked to encoder\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("active connector not linked to encoder\n"); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if ((unsigned long )encoder != (unsigned long )((struct intel_encoder *)0)) { __ret_warn_on___3 = ! encoder->connectors_active; tmp___5 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___5 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___4 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6386, "encoder->connectors_active not set\n"); } else { } ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); } else { drm_err("encoder->connectors_active not set\n"); } } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); encoder_enabled = (*(encoder->get_hw_state))(encoder, & pipe); __ret_warn_on___5 = ! encoder_enabled; tmp___7 = ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); if (tmp___7 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___6 = 1; tmp___6 = ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6389, "encoder not enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); } else { drm_err("encoder not enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); __ret_warn_on___7 = (unsigned long )encoder->base.crtc == (unsigned long )((struct drm_crtc *)0); tmp___9 = ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); if (tmp___9 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___8 = 1; tmp___8 = ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6390, "WARN_ON(!encoder->base.crtc)\n"); } else { } ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); } else { drm_err("WARN_ON(!encoder->base.crtc)\n"); } } else { } tmp___10 = ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); if (tmp___10 != 0L) { return; } else { } crtc = encoder->base.crtc; __ret_warn_on___9 = ! (crtc->state)->enable; tmp___12 = ldv__builtin_expect(__ret_warn_on___9 != 0, 0L); if (tmp___12 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___10 = 1; tmp___11 = ldv__builtin_expect(__ret_warn_on___10 != 0, 0L); if (tmp___11 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6396, "crtc not enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___10 != 0, 0L); } else { drm_err("crtc not enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___9 != 0, 0L); __mptr = (struct drm_crtc const *)crtc; if (((struct intel_crtc *)__mptr)->active) { tmp___13 = 0; } else { tmp___13 = 1; } __ret_warn_on___11 = tmp___13; tmp___15 = ldv__builtin_expect(__ret_warn_on___11 != 0, 0L); if (tmp___15 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___12 = 1; tmp___14 = ldv__builtin_expect(__ret_warn_on___12 != 0, 0L); if (tmp___14 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6397, "crtc not active\n"); } else { } ldv__builtin_expect(__ret_warn_on___12 != 0, 0L); } else { drm_err("crtc not active\n"); } } else { } ldv__builtin_expect(__ret_warn_on___11 != 0, 0L); __mptr___0 = (struct drm_crtc const *)crtc; __ret_warn_on___13 = (int )((struct intel_crtc *)__mptr___0)->pipe != (int )pipe; tmp___17 = ldv__builtin_expect(__ret_warn_on___13 != 0, 0L); if (tmp___17 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___14 = 1; tmp___16 = ldv__builtin_expect(__ret_warn_on___14 != 0, 0L); if (tmp___16 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6399, "encoder active on the wrong pipe\n"); } else { } ldv__builtin_expect(__ret_warn_on___14 != 0, 0L); } else { drm_err("encoder active on the wrong pipe\n"); } } else { } ldv__builtin_expect(__ret_warn_on___13 != 0, 0L); } else { } } else { } return; } } int intel_connector_init(struct intel_connector *connector ) { struct drm_connector_state *connector_state ; void *tmp ; { tmp = kzalloc(32UL, 208U); connector_state = (struct drm_connector_state *)tmp; if ((unsigned long )connector_state == (unsigned long )((struct drm_connector_state *)0)) { return (-12); } else { } connector->base.state = connector_state; return (0); } } struct intel_connector *intel_connector_alloc(void) { struct intel_connector *connector ; void *tmp ; int tmp___0 ; { tmp = kzalloc(1072UL, 208U); connector = (struct intel_connector *)tmp; if ((unsigned long )connector == (unsigned long )((struct intel_connector *)0)) { return ((struct intel_connector *)0); } else { } tmp___0 = intel_connector_init(connector); if (tmp___0 < 0) { kfree((void const *)connector); return ((struct intel_connector *)0); } else { } return (connector); } } void intel_connector_dpms(struct drm_connector *connector , int mode ) { struct drm_encoder const *__mptr ; { if (mode != 0) { mode = 3; } else { } if (connector->dpms == mode) { return; } else { } connector->dpms = mode; if ((unsigned long )connector->encoder != (unsigned long )((struct drm_encoder *)0)) { __mptr = (struct drm_encoder const *)connector->encoder; intel_encoder_dpms((struct intel_encoder *)__mptr, mode); } else { } intel_modeset_check_state(connector->dev); return; } } bool intel_connector_get_hw_state(struct intel_connector *connector ) { enum pipe pipe ; struct intel_encoder *encoder ; bool tmp ; { pipe = 0; encoder = connector->encoder; tmp = (*(encoder->get_hw_state))(encoder, & pipe); return (tmp); } } static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state ) { { if ((int )crtc_state->base.enable && (int )crtc_state->has_pch_encoder) { return (crtc_state->fdi_lanes); } else { } return (0); } } static int ironlake_check_fdi_lanes(struct drm_device *dev , enum pipe pipe , struct intel_crtc_state *pipe_config ) { struct drm_atomic_state *state ; struct intel_crtc *other_crtc ; struct intel_crtc_state *other_crtc_state ; long tmp ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_crtc const *__mptr ; struct drm_crtc *tmp___2 ; long tmp___3 ; bool tmp___4 ; long tmp___5 ; int tmp___6 ; long tmp___7 ; struct drm_crtc const *__mptr___0 ; struct drm_crtc *tmp___8 ; long tmp___9 ; bool tmp___10 ; long tmp___11 ; int tmp___12 ; { state = pipe_config->base.state; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ironlake_check_fdi_lanes", "checking fdi config on pipe %c, lanes %i\n", (int )pipe + 65, pipe_config->fdi_lanes); } else { } if (pipe_config->fdi_lanes > 4) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ironlake_check_fdi_lanes", "invalid fdi lane config on pipe %c: %i lanes\n", (int )pipe + 65, pipe_config->fdi_lanes); } else { } return (-22); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { _L: /* CIL Label */ if (pipe_config->fdi_lanes > 2) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ironlake_check_fdi_lanes", "only 2 lanes on haswell, required: %i lanes\n", pipe_config->fdi_lanes); } else { } return (-22); } else { return (0); } } else { } } else { } } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 38UL) == 2U) { return (0); } else { } switch ((int )pipe) { case 0: ; return (0); case 1: ; if (pipe_config->fdi_lanes <= 2) { return (0); } else { } tmp___2 = intel_get_crtc_for_pipe(dev, 2); __mptr = (struct drm_crtc const *)tmp___2; other_crtc = (struct intel_crtc *)__mptr; other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); tmp___4 = IS_ERR((void const *)other_crtc_state); if ((int )tmp___4) { tmp___3 = PTR_ERR((void const *)other_crtc_state); return ((int )tmp___3); } else { } tmp___6 = pipe_required_fdi_lanes(other_crtc_state); if (tmp___6 > 0) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("ironlake_check_fdi_lanes", "invalid shared fdi lane config on pipe %c: %i lanes\n", (int )pipe + 65, pipe_config->fdi_lanes); } else { } return (-22); } else { } return (0); case 2: ; if (pipe_config->fdi_lanes > 2) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("ironlake_check_fdi_lanes", "only 2 lanes on pipe %c: required %i lanes\n", (int )pipe + 65, pipe_config->fdi_lanes); } else { } return (-22); } else { } tmp___8 = intel_get_crtc_for_pipe(dev, 1); __mptr___0 = (struct drm_crtc const *)tmp___8; other_crtc = (struct intel_crtc *)__mptr___0; other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); tmp___10 = IS_ERR((void const *)other_crtc_state); if ((int )tmp___10) { tmp___9 = PTR_ERR((void const *)other_crtc_state); return ((int )tmp___9); } else { } tmp___12 = pipe_required_fdi_lanes(other_crtc_state); if (tmp___12 > 2) { tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("ironlake_check_fdi_lanes", "fdi link B uses too many lanes to enable link C\n"); } else { } return (-22); } else { } return (0); default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (6538), "i" (12UL)); ldv_52881: ; goto ldv_52881; } } } static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_display_mode *adjusted_mode ; int lane ; int link_bw ; int fdi_dotclock ; int ret ; bool needs_recompute ; u32 tmp ; long tmp___0 ; { dev = intel_crtc->base.dev; adjusted_mode = & pipe_config->base.adjusted_mode; needs_recompute = 0; retry: tmp = intel_fdi_link_freq(dev); link_bw = (int )((tmp * 100000000U) / 10000U); fdi_dotclock = adjusted_mode->crtc_clock; lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, pipe_config->pipe_bpp); pipe_config->fdi_lanes = lane; intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, link_bw, & pipe_config->fdi_m_n); ret = ironlake_check_fdi_lanes(intel_crtc->base.dev, intel_crtc->pipe, pipe_config); if (ret == -22 && pipe_config->pipe_bpp > 18) { pipe_config->pipe_bpp = pipe_config->pipe_bpp + -6; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ironlake_fdi_compute_config", "fdi link bw constraint, reducing pipe bpp to %i\n", pipe_config->pipe_bpp); } else { } needs_recompute = 1; pipe_config->bw_constrained = 1; goto retry; } else { } if ((int )needs_recompute) { return (1); } else { } return (ret); } } static void hsw_compute_ips_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { bool tmp ; int tmp___0 ; { if (i915.enable_ips != 0) { tmp = hsw_crtc_supports_ips(crtc); if ((int )tmp) { if (pipe_config->pipe_bpp <= 24) { tmp___0 = 1; } else { tmp___0 = 0; } } else { tmp___0 = 0; } } else { tmp___0 = 0; } pipe_config->ips_enabled = (bool )tmp___0; return; } } static int intel_crtc_compute_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_display_mode *adjusted_mode ; int ret ; int clock_limit ; int tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; bool tmp___0 ; bool tmp___1 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; int tmp___2 ; long tmp___3 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; adjusted_mode = & pipe_config->base.adjusted_mode; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 3U) { tmp = (*(dev_priv->display.get_display_clock_speed))(dev); clock_limit = tmp; if ((int )crtc->pipe == 0) { goto _L; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { _L: /* CIL Label */ if (adjusted_mode->crtc_clock > (clock_limit * 9) / 10) { clock_limit = clock_limit * 2; pipe_config->double_wide = 1; } else { } } else { } } if (adjusted_mode->crtc_clock > (clock_limit * 9) / 10) { return (-22); } else { } } else { } tmp___0 = intel_pipe_will_have_type((struct intel_crtc_state const *)pipe_config, 4); if ((int )tmp___0) { tmp___1 = intel_is_dual_link_lvds(dev); if ((int )tmp___1) { pipe_config->pipe_src_w = pipe_config->pipe_src_w & -2; } else { goto _L___0; } } else _L___0: /* CIL Label */ if ((int )pipe_config->double_wide) { pipe_config->pipe_src_w = pipe_config->pipe_src_w & -2; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 4U) { goto _L___1; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 44UL) != 0U) { _L___1: /* CIL Label */ if (adjusted_mode->hsync_start == adjusted_mode->hdisplay) { return (-22); } else { } } else { } } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if (((int )__p___4->info.device_id & 65280) == 2560) { hsw_compute_ips_config(crtc, pipe_config); } else { goto _L___2; } } else { _L___2: /* CIL Label */ __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 8U) { hsw_compute_ips_config(crtc, pipe_config); } else { } } else { } } if ((int )pipe_config->has_pch_encoder) { tmp___2 = ironlake_fdi_compute_config(crtc, pipe_config); return (tmp___2); } else { } ret = 0; tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_crtc_compute_config", "intel_crtc = %p drm_state (pipe_config->base.state) = %p\n", crtc, pipe_config->base.state); } else { } ret = intel_atomic_setup_scalers(dev, crtc, pipe_config); return (ret); } } static int skylake_get_display_clock_speed(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; uint32_t lcpll1 ; uint32_t tmp___0 ; uint32_t cdctl ; uint32_t tmp___1 ; uint32_t linkrate ; int __ret_warn_on ; long tmp___2 ; uint32_t tmp___3 ; int __ret_warn_on___0 ; long tmp___4 ; int __ret_warn_on___1 ; long tmp___5 ; { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286736L, 1); lcpll1 = tmp___0; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286720L, 1); cdctl = tmp___1; if ((int )lcpll1 >= 0) { __ret_warn_on = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6668, "LCPLL1 not enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (24000); } else { } if ((cdctl & 201326592U) == 67108864U) { return (540000); } else { } tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 1); linkrate = (tmp___3 & 14U) >> 1; if (linkrate == 5U || linkrate == 4U) { switch (cdctl & 201326592U) { case 0U: ; return (432000); case 134217728U: ; return (308570); case 201326592U: ; return (617140); default: __ret_warn_on___0 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6689, "Unknown cd freq selection\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } } else { switch (cdctl & 201326592U) { case 0U: ; return (450000); case 134217728U: ; return (337500); case 201326592U: ; return (675000); default: __ret_warn_on___1 = 1; tmp___5 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6701, "Unknown cd freq selection\n"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); } } return (24000); } } static int broadwell_get_display_clock_speed(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t lcpll ; uint32_t tmp ; uint32_t freq ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); lcpll = tmp; freq = lcpll & 201326592U; if ((lcpll & 2097152U) != 0U) { return (800000); } else { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270356L, 1); if ((tmp___0 & 16777216U) != 0U) { return (450000); } else if (freq == 0U) { return (450000); } else if (freq == 67108864U) { return (540000); } else if (freq == 134217728U) { return (337500); } else { return (675000); } } } } static int haswell_get_display_clock_speed(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t lcpll ; uint32_t tmp ; uint32_t freq ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); lcpll = tmp; freq = lcpll & 201326592U; if ((lcpll & 2097152U) != 0U) { return (800000); } else { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270356L, 1); if ((tmp___0 & 16777216U) != 0U) { return (450000); } else if (freq == 0U) { return (450000); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if (((int )__p___0->info.device_id & 65280) == 2560) { return (337500); } else { return (540000); } } else { return (540000); } } } } } static int valleyview_get_display_clock_speed(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 val ; int divider ; int tmp ; int __ret_warn_on ; long tmp___0 ; unsigned int __x ; int __d ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (dev_priv->hpll_freq == 0U) { tmp = valleyview_get_vco(dev_priv); dev_priv->hpll_freq = (unsigned int )tmp; } else { } mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_cck_read(dev_priv, 107U); mutex_unlock(& dev_priv->sb_lock); divider = (int )val & 31; __ret_warn_on = (val & 7936U) != (u32 )(divider << 8); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6764, "cdclk change in progress\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __x = dev_priv->hpll_freq << 1; __d = divider + 1; return ((int )(((unsigned int )(__d / 2) + __x) / (unsigned int )__d)); } } static int ilk_get_display_clock_speed(struct drm_device *dev ) { { return (450000); } } static int i945_get_display_clock_speed(struct drm_device *dev ) { { return (400000); } } static int i915_get_display_clock_speed(struct drm_device *dev ) { { return (333333); } } static int i9xx_misc_get_display_clock_speed(struct drm_device *dev ) { { return (200000); } } static int pnv_get_display_clock_speed(struct drm_device *dev ) { u16 gcfgc ; { gcfgc = 0U; pci_read_config_word((struct pci_dev const *)dev->pdev, 240, & gcfgc); switch ((int )gcfgc & 112) { case 0: ; return (266667); case 16: ; return (333333); case 32: ; return (444444); case 80: ; return (200000); default: drm_err("Unknown pnv display core clock 0x%04x\n", (int )gcfgc); case 96: ; return (133333); case 112: ; return (166667); } } } static int i915gm_get_display_clock_speed(struct drm_device *dev ) { u16 gcfgc ; { gcfgc = 0U; pci_read_config_word((struct pci_dev const *)dev->pdev, 240, & gcfgc); if (((int )gcfgc & 128) != 0) { return (133333); } else { switch ((int )gcfgc & 112) { case 64: ; return (333333); default: ; case 0: ; return (190000); } } } } static int i865_get_display_clock_speed(struct drm_device *dev ) { { return (266667); } } static int i855_get_display_clock_speed(struct drm_device *dev ) { u16 hpllcc ; { hpllcc = 0U; switch ((int )hpllcc & 15) { case 0: ; case 1: ; return (200000); case 3: ; return (250000); case 2: ; return (133333); } return (0); } } static int i830_get_display_clock_speed(struct drm_device *dev ) { { return (133333); } } static void intel_reduce_m_n_ratio(uint32_t *num , uint32_t *den ) { { goto ldv_53062; ldv_53061: *num = *num >> 1; *den = *den >> 1; ldv_53062: ; if (*num > 16777215U || *den > 16777215U) { goto ldv_53061; } else { } return; } } static void compute_m_n(unsigned int m , unsigned int n , uint32_t *ret_m , uint32_t *ret_n ) { unsigned int __min1 ; unsigned long tmp ; unsigned int __min2 ; u64 tmp___0 ; { tmp = __roundup_pow_of_two((unsigned long )n); __min1 = (unsigned int )tmp; __min2 = 8388608U; *ret_n = __min1 < __min2 ? __min1 : __min2; tmp___0 = div_u64((unsigned long long )m * (unsigned long long )*ret_n, n); *ret_m = (uint32_t )tmp___0; intel_reduce_m_n_ratio(ret_m, ret_n); return; } } void intel_link_compute_m_n(int bits_per_pixel , int nlanes , int pixel_clock , int link_clock , struct intel_link_m_n *m_n ) { { m_n->tu = 64U; compute_m_n((unsigned int )(bits_per_pixel * pixel_clock), (unsigned int )((link_clock * nlanes) * 8), & m_n->gmch_m, & m_n->gmch_n); compute_m_n((unsigned int )pixel_clock, (unsigned int )link_clock, & m_n->link_m, & m_n->link_n); return; } } __inline static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv ) { { if (i915.panel_use_ssc >= 0) { return (i915.panel_use_ssc != 0); } else { } return ((bool )((unsigned int )*((unsigned char *)dev_priv + 41280UL) != 0U && (dev_priv->quirks & 2UL) == 0UL)); } } static int i9xx_get_refclk(struct intel_crtc_state const *crtc_state , int num_connectors ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int refclk ; int __ret_warn_on ; long tmp ; long tmp___0 ; struct drm_i915_private *__p ; bool tmp___1 ; bool tmp___2 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = (crtc_state->base.crtc)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __ret_warn_on = (unsigned long )crtc_state->base.state == (unsigned long )((struct drm_atomic_state */* const */)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 6910, "WARN_ON(!crtc_state->base.state)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { refclk = 100000; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { refclk = 100000; } else { goto _L___1; } } else { _L___1: /* CIL Label */ tmp___1 = intel_pipe_will_have_type(crtc_state, 4); if ((int )tmp___1) { tmp___2 = intel_panel_use_ssc(dev_priv); if ((int )tmp___2) { if (num_connectors <= 1) { refclk = dev_priv->vbt.lvds_ssc_freq; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i9xx_get_refclk", "using SSC reference clock of %d kHz\n", refclk); } else { } } else { goto _L___0; } } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { refclk = 96000; } else { refclk = 48000; } } } } return (refclk); } } static uint32_t pnv_dpll_compute_fp(struct dpll *dpll ) { { return ((uint32_t )(((1 << dpll->n) << 16) | dpll->m2)); } } static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll ) { { return ((uint32_t )(((dpll->n << 16) | (dpll->m1 << 8)) | dpll->m2)); } } static void i9xx_update_pll_dividers(struct intel_crtc *crtc , struct intel_crtc_state *crtc_state , intel_clock_t *reduced_clock ) { struct drm_device *dev ; u32 fp ; u32 fp2 ; struct drm_i915_private *__p ; bool tmp ; { dev = crtc->base.dev; fp2 = 0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { fp = pnv_dpll_compute_fp(& crtc_state->dpll); if ((unsigned long )reduced_clock != (unsigned long )((intel_clock_t *)0)) { fp2 = pnv_dpll_compute_fp(reduced_clock); } else { } } else { fp = i9xx_dpll_compute_fp(& crtc_state->dpll); if ((unsigned long )reduced_clock != (unsigned long )((intel_clock_t *)0)) { fp2 = i9xx_dpll_compute_fp(reduced_clock); } else { } } crtc_state->dpll_hw_state.fp0 = fp; crtc->lowfreq_avail = 0; tmp = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp && (unsigned long )reduced_clock != (unsigned long )((intel_clock_t *)0)) { crtc_state->dpll_hw_state.fp1 = fp2; crtc->lowfreq_avail = 1; } else { crtc_state->dpll_hw_state.fp1 = fp; } return; } } static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv , enum pipe pipe ) { u32 reg_val ; { reg_val = vlv_dpio_read(dev_priv, pipe, 32868); reg_val = reg_val & 4294967040U; reg_val = reg_val | 48U; vlv_dpio_write(dev_priv, pipe, 32868, reg_val); reg_val = vlv_dpio_read(dev_priv, pipe, 32940); reg_val = reg_val & 2365587455U; reg_val = 2348810240U; vlv_dpio_write(dev_priv, pipe, 32940, reg_val); reg_val = vlv_dpio_read(dev_priv, pipe, 32868); reg_val = reg_val & 4294967040U; vlv_dpio_write(dev_priv, pipe, 32868, reg_val); reg_val = vlv_dpio_read(dev_priv, pipe, 32940); reg_val = reg_val & 16777215U; reg_val = reg_val | 2952790016U; vlv_dpio_write(dev_priv, pipe, 32940, reg_val); return; } } static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc , struct intel_link_m_n *m_n ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 917552), ((m_n->tu - 1U) << 25) | m_n->gmch_m, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 917556), m_n->gmch_n, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 917568), m_n->link_m, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 917572), m_n->link_n, 1); return; } } static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc , struct intel_link_m_n *m_n , struct intel_link_m_n *m2_n2 ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; enum transcoder transcoder ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; transcoder = (crtc->config)->cpu_transcoder; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 4U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393264U), ((m_n->tu - 1U) << 25) | m_n->gmch_m, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393268U), m_n->gmch_n, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393280U), m_n->link_m, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393284U), m_n->link_n, 1); if ((unsigned long )m2_n2 != (unsigned long )((struct intel_link_m_n *)0)) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { goto _L; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) <= 7U) { _L: /* CIL Label */ if ((int )(crtc->config)->has_drrs) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393272U), ((m2_n2->tu - 1U) << 25) | m2_n2->gmch_m, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393276U), m2_n2->gmch_n, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393288U), m2_n2->link_m, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393292U), m2_n2->link_n, 1); } else { } } else { } } } else { } } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 458832), ((m_n->tu - 1U) << 25) | m_n->gmch_m, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 458836), m_n->gmch_n, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 458848), m_n->link_m, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 458852), m_n->link_n, 1); } return; } } void intel_dp_set_m_n(struct intel_crtc *crtc , enum link_m_n_set m_n ) { struct intel_link_m_n *dp_m_n ; struct intel_link_m_n *dp_m2_n2 ; { dp_m2_n2 = (struct intel_link_m_n *)0; if ((unsigned int )m_n == 0U) { dp_m_n = & (crtc->config)->dp_m_n; dp_m2_n2 = & (crtc->config)->dp_m2_n2; } else if ((unsigned int )m_n == 1U) { dp_m_n = & (crtc->config)->dp_m2_n2; } else { drm_err("Unsupported divider value\n"); return; } if ((int )(crtc->config)->has_pch_encoder) { intel_pch_transcoder_set_m_n(crtc, & (crtc->config)->dp_m_n); } else { intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); } return; } } static void vlv_update_pll(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { u32 dpll ; u32 dpll_md ; { dpll = 1879056384U; if ((int )crtc->pipe == 1) { dpll = dpll | 16384U; } else { } dpll = dpll | 2147483648U; pipe_config->dpll_hw_state.dpll = dpll; dpll_md = (pipe_config->pixel_multiplier - 1U) << 8; pipe_config->dpll_hw_state.dpll_md = dpll_md; return; } } static void vlv_prepare_pll(struct intel_crtc *crtc , struct intel_crtc_state const *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; u32 mdiv ; u32 bestn ; u32 bestm1 ; u32 bestm2 ; u32 bestp1 ; u32 bestp2 ; u32 coreclk ; u32 reg_val ; bool tmp ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; mutex_lock_nested(& dev_priv->sb_lock, 0U); bestn = (u32 )pipe_config->dpll.n; bestm1 = (u32 )pipe_config->dpll.m1; bestm2 = (u32 )pipe_config->dpll.m2; bestp1 = (u32 )pipe_config->dpll.p1; bestp2 = (u32 )pipe_config->dpll.p2; if (pipe == 1) { vlv_pllb_recal_opamp(dev_priv, (enum pipe )pipe); } else { } vlv_dpio_write(dev_priv, (enum pipe )pipe, 49220, 16777231U); reg_val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (pipe + 1026) * 32); reg_val = reg_val & 16777215U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (pipe + 1026) * 32, reg_val); vlv_dpio_write(dev_priv, (enum pipe )pipe, 33024, 1552U); mdiv = (bestm1 << 8) | (bestm2 & 255U); mdiv = ((bestp1 << 21) | (bestp2 << 16)) | mdiv; mdiv = (bestn << 12) | mdiv; mdiv = mdiv | 16777216U; mdiv = mdiv | 268435456U; vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32780, mdiv); mdiv = mdiv | 2048U; vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32780, mdiv); if ((int )pipe_config->port_clock == 162000) { vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32840, 10420227U); } else { tmp = intel_pipe_has_type(crtc, 1); if ((int )tmp) { vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32840, 10420227U); } else { tmp___0 = intel_pipe_has_type(crtc, 6); if ((int )tmp___0) { vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32840, 10420227U); } else { vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32840, 13631503U); } } } if ((int )pipe_config->has_dp_encoder) { if (pipe == 0) { vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32788, 234094592U); } else { vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32788, 234291200U); } } else if (pipe == 0) { vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32788, 234291200U); } else { vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32788, 234094592U); } coreclk = vlv_dpio_read(dev_priv, (enum pipe )pipe, pipe * 32 + 32796); coreclk = (coreclk & 65280U) | 29360128U; tmp___1 = intel_pipe_has_type(crtc, 7); if ((int )tmp___1) { coreclk = coreclk | 16777216U; } else { tmp___2 = intel_pipe_has_type(crtc, 8); if ((int )tmp___2) { coreclk = coreclk | 16777216U; } else { } } vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32796, coreclk); vlv_dpio_write(dev_priv, (enum pipe )pipe, pipe * 32 + 32844, 2273775616U); mutex_unlock(& dev_priv->sb_lock); return; } } static void chv_update_pll(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { { pipe_config->dpll_hw_state.dpll = 2952798208U; if ((int )crtc->pipe != 0) { pipe_config->dpll_hw_state.dpll = pipe_config->dpll_hw_state.dpll | 16384U; } else { } pipe_config->dpll_hw_state.dpll_md = (pipe_config->pixel_multiplier - 1U) << 8; return; } } static void chv_prepare_pll(struct intel_crtc *crtc , struct intel_crtc_state const *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; int dpll_reg ; enum dpio_channel port ; int tmp ; u32 loopfilter ; u32 tribuf_calcntr ; u32 bestn ; u32 bestm1 ; u32 bestm2 ; u32 bestp1 ; u32 bestp2 ; u32 bestm2_frac ; u32 dpio_val ; int vco ; u32 tmp___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; dpll_reg = (int )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)); tmp = vlv_pipe_to_channel((enum pipe )pipe); port = (enum dpio_channel )tmp; bestn = (u32 )pipe_config->dpll.n; bestm2_frac = (u32 )pipe_config->dpll.m2 & 4194303U; bestm1 = (u32 )pipe_config->dpll.m1; bestm2 = (u32 )(pipe_config->dpll.m2 >> 22); bestp1 = (u32 )pipe_config->dpll.p1; bestp2 = (u32 )pipe_config->dpll.p2; vco = pipe_config->dpll.vco; dpio_val = 0U; loopfilter = 0U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )dpll_reg, (unsigned int )pipe_config->dpll_hw_state.dpll & 2147483647U, 1); mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 4294967116U + 33076U), ((bestp1 << 13) | (bestp2 << 8)) | 10485776U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32768U), bestm2); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32772U), 256U); if (bestm2_frac != 0U) { vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32776U), bestm2_frac); } else { } dpio_val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32780U)); dpio_val = dpio_val & 4294901744U; dpio_val = dpio_val | 2U; if (bestm2_frac != 0U) { dpio_val = dpio_val | 65536U; } else { } vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32780U), dpio_val); dpio_val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32804U)); dpio_val = dpio_val & 4294967280U; dpio_val = dpio_val | 10U; if (bestm2_frac == 0U) { dpio_val = dpio_val | 1U; } else { } vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32804U), dpio_val); if (vco == 5400000) { loopfilter = loopfilter | 3U; loopfilter = loopfilter | 2048U; loopfilter = loopfilter | 65536U; tribuf_calcntr = 9U; } else if (vco <= 6200000) { loopfilter = loopfilter | 5U; loopfilter = loopfilter | 2816U; loopfilter = loopfilter | 196608U; tribuf_calcntr = 9U; } else if (vco <= 6480000) { loopfilter = loopfilter | 4U; loopfilter = loopfilter | 2304U; loopfilter = loopfilter | 196608U; tribuf_calcntr = 8U; } else { loopfilter = loopfilter | 4U; loopfilter = loopfilter | 2304U; loopfilter = loopfilter | 196608U; tribuf_calcntr = 0U; } vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32792U), loopfilter); dpio_val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32800U)); dpio_val = dpio_val & 4294966272U; dpio_val = dpio_val | tribuf_calcntr; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32800U), dpio_val); tmp___0 = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 4294967116U + 33080U)); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 4294967116U + 33080U), tmp___0 | 16384U); mutex_unlock(& dev_priv->sb_lock); return; } } void vlv_force_pll_on(struct drm_device *dev , enum pipe pipe , struct dpll const *dpll ) { struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_crtc *tmp ; struct intel_crtc_state pipe_config ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { tmp = intel_get_crtc_for_pipe(dev, (int )pipe); __mptr = (struct drm_crtc const *)tmp; crtc = (struct intel_crtc *)__mptr; pipe_config.base.crtc = & crtc->base; pipe_config.base.enable = (_Bool)0; pipe_config.base.active = (_Bool)0; pipe_config.base.planes_changed = (_Bool)0; pipe_config.base.mode_changed = (_Bool)0; pipe_config.base.active_changed = (_Bool)0; pipe_config.base.plane_mask = 0U; pipe_config.base.last_vblank_count = 0U; pipe_config.base.adjusted_mode.head.next = 0; pipe_config.base.adjusted_mode.head.prev = 0; pipe_config.base.adjusted_mode.base.id = 0U; pipe_config.base.adjusted_mode.base.type = 0U; pipe_config.base.adjusted_mode.base.properties = 0; pipe_config.base.adjusted_mode.name[0] = (char)0; pipe_config.base.adjusted_mode.name[1] = (char)0; pipe_config.base.adjusted_mode.name[2] = (char)0; pipe_config.base.adjusted_mode.name[3] = (char)0; pipe_config.base.adjusted_mode.name[4] = (char)0; pipe_config.base.adjusted_mode.name[5] = (char)0; pipe_config.base.adjusted_mode.name[6] = (char)0; pipe_config.base.adjusted_mode.name[7] = (char)0; pipe_config.base.adjusted_mode.name[8] = (char)0; pipe_config.base.adjusted_mode.name[9] = (char)0; pipe_config.base.adjusted_mode.name[10] = (char)0; pipe_config.base.adjusted_mode.name[11] = (char)0; pipe_config.base.adjusted_mode.name[12] = (char)0; pipe_config.base.adjusted_mode.name[13] = (char)0; pipe_config.base.adjusted_mode.name[14] = (char)0; pipe_config.base.adjusted_mode.name[15] = (char)0; pipe_config.base.adjusted_mode.name[16] = (char)0; pipe_config.base.adjusted_mode.name[17] = (char)0; pipe_config.base.adjusted_mode.name[18] = (char)0; pipe_config.base.adjusted_mode.name[19] = (char)0; pipe_config.base.adjusted_mode.name[20] = (char)0; pipe_config.base.adjusted_mode.name[21] = (char)0; pipe_config.base.adjusted_mode.name[22] = (char)0; pipe_config.base.adjusted_mode.name[23] = (char)0; pipe_config.base.adjusted_mode.name[24] = (char)0; pipe_config.base.adjusted_mode.name[25] = (char)0; pipe_config.base.adjusted_mode.name[26] = (char)0; pipe_config.base.adjusted_mode.name[27] = (char)0; pipe_config.base.adjusted_mode.name[28] = (char)0; pipe_config.base.adjusted_mode.name[29] = (char)0; pipe_config.base.adjusted_mode.name[30] = (char)0; pipe_config.base.adjusted_mode.name[31] = (char)0; pipe_config.base.adjusted_mode.status = 0; pipe_config.base.adjusted_mode.type = 0U; pipe_config.base.adjusted_mode.clock = 0; pipe_config.base.adjusted_mode.hdisplay = 0; pipe_config.base.adjusted_mode.hsync_start = 0; pipe_config.base.adjusted_mode.hsync_end = 0; pipe_config.base.adjusted_mode.htotal = 0; pipe_config.base.adjusted_mode.hskew = 0; pipe_config.base.adjusted_mode.vdisplay = 0; pipe_config.base.adjusted_mode.vsync_start = 0; pipe_config.base.adjusted_mode.vsync_end = 0; pipe_config.base.adjusted_mode.vtotal = 0; pipe_config.base.adjusted_mode.vscan = 0; pipe_config.base.adjusted_mode.flags = 0U; pipe_config.base.adjusted_mode.width_mm = 0; pipe_config.base.adjusted_mode.height_mm = 0; pipe_config.base.adjusted_mode.crtc_clock = 0; pipe_config.base.adjusted_mode.crtc_hdisplay = 0; pipe_config.base.adjusted_mode.crtc_hblank_start = 0; pipe_config.base.adjusted_mode.crtc_hblank_end = 0; pipe_config.base.adjusted_mode.crtc_hsync_start = 0; pipe_config.base.adjusted_mode.crtc_hsync_end = 0; pipe_config.base.adjusted_mode.crtc_htotal = 0; pipe_config.base.adjusted_mode.crtc_hskew = 0; pipe_config.base.adjusted_mode.crtc_vdisplay = 0; pipe_config.base.adjusted_mode.crtc_vblank_start = 0; pipe_config.base.adjusted_mode.crtc_vblank_end = 0; pipe_config.base.adjusted_mode.crtc_vsync_start = 0; pipe_config.base.adjusted_mode.crtc_vsync_end = 0; pipe_config.base.adjusted_mode.crtc_vtotal = 0; pipe_config.base.adjusted_mode.private = 0; pipe_config.base.adjusted_mode.private_flags = 0; pipe_config.base.adjusted_mode.vrefresh = 0; pipe_config.base.adjusted_mode.hsync = 0; pipe_config.base.adjusted_mode.picture_aspect_ratio = 0; pipe_config.base.mode.head.next = 0; pipe_config.base.mode.head.prev = 0; pipe_config.base.mode.base.id = 0U; pipe_config.base.mode.base.type = 0U; pipe_config.base.mode.base.properties = 0; pipe_config.base.mode.name[0] = (char)0; pipe_config.base.mode.name[1] = (char)0; pipe_config.base.mode.name[2] = (char)0; pipe_config.base.mode.name[3] = (char)0; pipe_config.base.mode.name[4] = (char)0; pipe_config.base.mode.name[5] = (char)0; pipe_config.base.mode.name[6] = (char)0; pipe_config.base.mode.name[7] = (char)0; pipe_config.base.mode.name[8] = (char)0; pipe_config.base.mode.name[9] = (char)0; pipe_config.base.mode.name[10] = (char)0; pipe_config.base.mode.name[11] = (char)0; pipe_config.base.mode.name[12] = (char)0; pipe_config.base.mode.name[13] = (char)0; pipe_config.base.mode.name[14] = (char)0; pipe_config.base.mode.name[15] = (char)0; pipe_config.base.mode.name[16] = (char)0; pipe_config.base.mode.name[17] = (char)0; pipe_config.base.mode.name[18] = (char)0; pipe_config.base.mode.name[19] = (char)0; pipe_config.base.mode.name[20] = (char)0; pipe_config.base.mode.name[21] = (char)0; pipe_config.base.mode.name[22] = (char)0; pipe_config.base.mode.name[23] = (char)0; pipe_config.base.mode.name[24] = (char)0; pipe_config.base.mode.name[25] = (char)0; pipe_config.base.mode.name[26] = (char)0; pipe_config.base.mode.name[27] = (char)0; pipe_config.base.mode.name[28] = (char)0; pipe_config.base.mode.name[29] = (char)0; pipe_config.base.mode.name[30] = (char)0; pipe_config.base.mode.name[31] = (char)0; pipe_config.base.mode.status = 0; pipe_config.base.mode.type = 0U; pipe_config.base.mode.clock = 0; pipe_config.base.mode.hdisplay = 0; pipe_config.base.mode.hsync_start = 0; pipe_config.base.mode.hsync_end = 0; pipe_config.base.mode.htotal = 0; pipe_config.base.mode.hskew = 0; pipe_config.base.mode.vdisplay = 0; pipe_config.base.mode.vsync_start = 0; pipe_config.base.mode.vsync_end = 0; pipe_config.base.mode.vtotal = 0; pipe_config.base.mode.vscan = 0; pipe_config.base.mode.flags = 0U; pipe_config.base.mode.width_mm = 0; pipe_config.base.mode.height_mm = 0; pipe_config.base.mode.crtc_clock = 0; pipe_config.base.mode.crtc_hdisplay = 0; pipe_config.base.mode.crtc_hblank_start = 0; pipe_config.base.mode.crtc_hblank_end = 0; pipe_config.base.mode.crtc_hsync_start = 0; pipe_config.base.mode.crtc_hsync_end = 0; pipe_config.base.mode.crtc_htotal = 0; pipe_config.base.mode.crtc_hskew = 0; pipe_config.base.mode.crtc_vdisplay = 0; pipe_config.base.mode.crtc_vblank_start = 0; pipe_config.base.mode.crtc_vblank_end = 0; pipe_config.base.mode.crtc_vsync_start = 0; pipe_config.base.mode.crtc_vsync_end = 0; pipe_config.base.mode.crtc_vtotal = 0; pipe_config.base.mode.private = 0; pipe_config.base.mode.private_flags = 0; pipe_config.base.mode.vrefresh = 0; pipe_config.base.mode.hsync = 0; pipe_config.base.mode.picture_aspect_ratio = 0; pipe_config.base.mode_blob = 0; pipe_config.base.event = 0; pipe_config.base.state = 0; pipe_config.quirks = 0UL; pipe_config.pipe_src_w = 0; pipe_config.pipe_src_h = 0; pipe_config.has_pch_encoder = (_Bool)0; pipe_config.has_infoframe = (_Bool)0; pipe_config.cpu_transcoder = 0; pipe_config.limited_color_range = (_Bool)0; pipe_config.has_dp_encoder = (_Bool)0; pipe_config.has_hdmi_sink = (_Bool)0; pipe_config.has_audio = (_Bool)0; pipe_config.dither = (_Bool)0; pipe_config.clock_set = (_Bool)0; pipe_config.sdvo_tv_clock = (_Bool)0; pipe_config.bw_constrained = (_Bool)0; pipe_config.dpll = *dpll; pipe_config.shared_dpll = 0; pipe_config.ddi_pll_sel = 0U; pipe_config.dpll_hw_state.dpll = 0U; pipe_config.dpll_hw_state.dpll_md = 0U; pipe_config.dpll_hw_state.fp0 = 0U; pipe_config.dpll_hw_state.fp1 = 0U; pipe_config.dpll_hw_state.wrpll = 0U; pipe_config.dpll_hw_state.ctrl1 = 0U; pipe_config.dpll_hw_state.cfgcr1 = 0U; pipe_config.dpll_hw_state.cfgcr2 = 0U; pipe_config.dpll_hw_state.ebb0 = 0U; pipe_config.dpll_hw_state.pll0 = 0U; pipe_config.dpll_hw_state.pll1 = 0U; pipe_config.dpll_hw_state.pll2 = 0U; pipe_config.dpll_hw_state.pll3 = 0U; pipe_config.dpll_hw_state.pll6 = 0U; pipe_config.dpll_hw_state.pll8 = 0U; pipe_config.dpll_hw_state.pll10 = 0U; pipe_config.dpll_hw_state.pcsdw12 = 0U; pipe_config.pipe_bpp = 0; pipe_config.dp_m_n.tu = 0U; pipe_config.dp_m_n.gmch_m = 0U; pipe_config.dp_m_n.gmch_n = 0U; pipe_config.dp_m_n.link_m = 0U; pipe_config.dp_m_n.link_n = 0U; pipe_config.dp_m2_n2.tu = 0U; pipe_config.dp_m2_n2.gmch_m = 0U; pipe_config.dp_m2_n2.gmch_n = 0U; pipe_config.dp_m2_n2.link_m = 0U; pipe_config.dp_m2_n2.link_n = 0U; pipe_config.has_drrs = (_Bool)0; pipe_config.port_clock = 0; pipe_config.pixel_multiplier = 1U; pipe_config.gmch_pfit.control = 0U; pipe_config.gmch_pfit.pgm_ratios = 0U; pipe_config.gmch_pfit.lvds_border_bits = 0U; pipe_config.pch_pfit.pos = 0U; pipe_config.pch_pfit.size = 0U; pipe_config.pch_pfit.enabled = (_Bool)0; pipe_config.pch_pfit.force_thru = (_Bool)0; pipe_config.fdi_lanes = 0; pipe_config.fdi_m_n.tu = 0U; pipe_config.fdi_m_n.gmch_m = 0U; pipe_config.fdi_m_n.gmch_n = 0U; pipe_config.fdi_m_n.link_m = 0U; pipe_config.fdi_m_n.link_n = 0U; pipe_config.ips_enabled = (_Bool)0; pipe_config.double_wide = (_Bool)0; pipe_config.dp_encoder_is_mst = (_Bool)0; pipe_config.pbn = 0; pipe_config.scaler_state.scalers[0].id = 0; pipe_config.scaler_state.scalers[0].in_use = 0; pipe_config.scaler_state.scalers[0].mode = 0U; pipe_config.scaler_state.scalers[1].id = 0; pipe_config.scaler_state.scalers[1].in_use = 0; pipe_config.scaler_state.scalers[1].mode = 0U; pipe_config.scaler_state.scaler_users = 0U; pipe_config.scaler_state.scaler_id = 0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { chv_update_pll(crtc, & pipe_config); chv_prepare_pll(crtc, (struct intel_crtc_state const *)(& pipe_config)); chv_enable_pll(crtc, (struct intel_crtc_state const *)(& pipe_config)); } else { vlv_update_pll(crtc, & pipe_config); vlv_prepare_pll(crtc, (struct intel_crtc_state const *)(& pipe_config)); vlv_enable_pll(crtc, (struct intel_crtc_state const *)(& pipe_config)); } } else { vlv_update_pll(crtc, & pipe_config); vlv_prepare_pll(crtc, (struct intel_crtc_state const *)(& pipe_config)); vlv_enable_pll(crtc, (struct intel_crtc_state const *)(& pipe_config)); } return; } } void vlv_force_pll_off(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *tmp ; struct drm_i915_private *tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { tmp = to_i915((struct drm_device const *)dev); chv_disable_pll(tmp, pipe); } else { tmp___0 = to_i915((struct drm_device const *)dev); vlv_disable_pll(tmp___0, pipe); } } else { tmp___0 = to_i915((struct drm_device const *)dev); vlv_disable_pll(tmp___0, pipe); } return; } } static void i9xx_update_pll(struct intel_crtc *crtc , struct intel_crtc_state *crtc_state , intel_clock_t *reduced_clock , int num_connectors ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 dpll ; bool is_sdvo ; struct dpll *clock ; bool tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; bool tmp___3 ; bool tmp___4 ; u32 dpll_md ; struct drm_i915_private *__p___5 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; clock = & crtc_state->dpll; i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); tmp = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 3); if ((int )tmp) { tmp___1 = 1; } else { tmp___0 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 6); if ((int )tmp___0) { tmp___1 = 1; } else { tmp___1 = 0; } } is_sdvo = (bool )tmp___1; dpll = 268435456U; tmp___2 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___2) { dpll = dpll | 134217728U; } else { dpll = dpll | 67108864U; } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 10098U) { dpll = ((crtc_state->pixel_multiplier - 1U) << 4) | dpll; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { dpll = ((crtc_state->pixel_multiplier - 1U) << 4) | dpll; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { dpll = ((crtc_state->pixel_multiplier - 1U) << 4) | dpll; } else { } } } if ((int )is_sdvo) { dpll = dpll | 1073741824U; } else { } if ((int )crtc_state->has_dp_encoder) { dpll = dpll | 1073741824U; } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) != 0U) { dpll = (u32 )((1 << (clock->p1 + -1)) << 15) | dpll; } else { dpll = (u32 )((1 << (clock->p1 + -1)) << 16) | dpll; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 44UL) != 0U && (unsigned long )reduced_clock != (unsigned long )((intel_clock_t *)0)) { dpll = (u32 )(1 << (reduced_clock->p1 + -1)) | dpll; } else { } } switch (clock->p2) { case 5: dpll = dpll | 16777216U; goto ldv_53311; case 7: dpll = dpll | 16777216U; goto ldv_53311; case 10: dpll = dpll; goto ldv_53311; case 14: dpll = dpll; goto ldv_53311; } ldv_53311: __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 3U) { dpll = dpll | 3072U; } else { } if ((int )crtc_state->sdvo_tv_clock) { dpll = dpll | 16384U; } else { tmp___3 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___3) { tmp___4 = intel_panel_use_ssc(dev_priv); if ((int )tmp___4) { if (num_connectors <= 1) { dpll = dpll | 24576U; } else { dpll = dpll; } } else { dpll = dpll; } } else { dpll = dpll; } } dpll = dpll | 2147483648U; crtc_state->dpll_hw_state.dpll = dpll; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) > 3U) { dpll_md = (crtc_state->pixel_multiplier - 1U) << 8; crtc_state->dpll_hw_state.dpll_md = dpll_md; } else { } return; } } static void i8xx_update_pll(struct intel_crtc *crtc , struct intel_crtc_state *crtc_state , intel_clock_t *reduced_clock , int num_connectors ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 dpll ; struct dpll *clock ; bool tmp ; struct drm_i915_private *__p ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; clock = & crtc_state->dpll; i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); dpll = 268435456U; tmp = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp) { dpll = (u32 )((1 << (clock->p1 + -1)) << 16) | dpll; } else { if (clock->p1 == 2) { dpll = dpll | 2097152U; } else { dpll = (u32 )((clock->p1 + -2) << 16) | dpll; } if (clock->p2 == 4) { dpll = dpll | 8388608U; } else { } } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) != 13687U) { tmp___0 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 2); if ((int )tmp___0) { dpll = dpll | 1073741824U; } else { } } else { } tmp___1 = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); if ((int )tmp___1) { tmp___2 = intel_panel_use_ssc(dev_priv); if ((int )tmp___2) { if (num_connectors <= 1) { dpll = dpll | 24576U; } else { dpll = dpll; } } else { dpll = dpll; } } else { dpll = dpll; } dpll = dpll | 2147483648U; crtc_state->dpll_hw_state.dpll = dpll; return; } } static void intel_set_pipe_timings(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum pipe pipe ; enum transcoder cpu_transcoder ; struct drm_display_mode *adjusted_mode ; uint32_t crtc_vtotal ; uint32_t crtc_vblank_end ; int vsyncshift ; bool tmp ; struct drm_i915_private *__p ; uint32_t tmp___0 ; struct drm_i915_private *__p___0 ; { dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = intel_crtc->pipe; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; adjusted_mode = & (intel_crtc->config)->base.adjusted_mode; vsyncshift = 0; crtc_vtotal = (uint32_t )adjusted_mode->crtc_vtotal; crtc_vblank_end = (uint32_t )adjusted_mode->crtc_vblank_end; if ((adjusted_mode->flags & 16U) != 0U) { crtc_vtotal = crtc_vtotal - 1U; crtc_vblank_end = crtc_vblank_end - 1U; tmp = intel_pipe_has_type(intel_crtc, 3); if ((int )tmp) { vsyncshift = (adjusted_mode->crtc_htotal + -1) / 2; } else { vsyncshift = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_htotal / 2; } if (vsyncshift < 0) { vsyncshift = adjusted_mode->crtc_htotal + vsyncshift; } else { } } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393256U), (uint32_t )vsyncshift, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393216U), (uint32_t )((adjusted_mode->crtc_hdisplay + -1) | ((adjusted_mode->crtc_htotal + -1) << 16)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393220U), (uint32_t )((adjusted_mode->crtc_hblank_start + -1) | ((adjusted_mode->crtc_hblank_end + -1) << 16)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393224U), (uint32_t )((adjusted_mode->crtc_hsync_start + -1) | ((adjusted_mode->crtc_hsync_end + -1) << 16)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393228U), (uint32_t )(adjusted_mode->crtc_vdisplay + -1) | ((crtc_vtotal - 1U) << 16), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393232U), (uint32_t )(adjusted_mode->crtc_vblank_start + -1) | ((crtc_vblank_end - 1U) << 16), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393236U), (uint32_t )((adjusted_mode->crtc_vsync_start + -1) | ((adjusted_mode->crtc_vsync_end + -1) << 16)), 1); __p___0 = to_i915((struct drm_device const *)dev); if (((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U && (unsigned int )cpu_transcoder == 3U) && ((int )pipe == 1 || (int )pipe == 2)) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393228U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393228U), tmp___0, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393244U), (uint32_t )((((intel_crtc->config)->pipe_src_w + -1) << 16) | ((intel_crtc->config)->pipe_src_h + -1)), 1); return; } } static void intel_get_pipe_timings(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum transcoder cpu_transcoder ; uint32_t tmp ; uint32_t tmp___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; cpu_transcoder = pipe_config->cpu_transcoder; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393216U), 1); pipe_config->base.adjusted_mode.crtc_hdisplay = (int )((tmp & 65535U) + 1U); pipe_config->base.adjusted_mode.crtc_htotal = (int )((tmp >> 16) + 1U); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393220U), 1); pipe_config->base.adjusted_mode.crtc_hblank_start = (int )((tmp & 65535U) + 1U); pipe_config->base.adjusted_mode.crtc_hblank_end = (int )((tmp >> 16) + 1U); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393224U), 1); pipe_config->base.adjusted_mode.crtc_hsync_start = (int )((tmp & 65535U) + 1U); pipe_config->base.adjusted_mode.crtc_hsync_end = (int )((tmp >> 16) + 1U); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393228U), 1); pipe_config->base.adjusted_mode.crtc_vdisplay = (int )((tmp & 65535U) + 1U); pipe_config->base.adjusted_mode.crtc_vtotal = (int )((tmp >> 16) + 1U); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393232U), 1); pipe_config->base.adjusted_mode.crtc_vblank_start = (int )((tmp & 65535U) + 1U); pipe_config->base.adjusted_mode.crtc_vblank_end = (int )((tmp >> 16) + 1U); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393236U), 1); pipe_config->base.adjusted_mode.crtc_vsync_start = (int )((tmp & 65535U) + 1U); pipe_config->base.adjusted_mode.crtc_vsync_end = (int )((tmp >> 16) + 1U); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); if ((tmp___0 & 14680064U) != 0U) { pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | 16U; pipe_config->base.adjusted_mode.crtc_vtotal = pipe_config->base.adjusted_mode.crtc_vtotal + 1; pipe_config->base.adjusted_mode.crtc_vblank_end = pipe_config->base.adjusted_mode.crtc_vblank_end + 1; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )crtc->pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393244U), 1); pipe_config->pipe_src_h = (int )((tmp & 65535U) + 1U); pipe_config->pipe_src_w = (int )((tmp >> 16) + 1U); pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; return; } } void intel_mode_from_pipe_config(struct drm_display_mode *mode , struct intel_crtc_state *pipe_config ) { { mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; mode->flags = pipe_config->base.adjusted_mode.flags; mode->clock = pipe_config->base.adjusted_mode.crtc_clock; mode->flags = mode->flags | pipe_config->base.adjusted_mode.flags; return; } } static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t pipeconf ; uint32_t tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; bool tmp___2 ; struct drm_i915_private *__p___3 ; { dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipeconf = 0U; if (((int )intel_crtc->pipe == 0 && (int )dev_priv->quirks & 1) || ((int )intel_crtc->pipe == 1 && (dev_priv->quirks & 16UL) != 0UL)) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )intel_crtc->pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); pipeconf = (tmp & 2147483648U) | pipeconf; } else { } if ((int )(intel_crtc->config)->double_wide) { pipeconf = pipeconf | 1073741824U; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { _L: /* CIL Label */ if ((int )(intel_crtc->config)->dither && (intel_crtc->config)->pipe_bpp != 30) { pipeconf = pipeconf | 16U; } else { } switch ((intel_crtc->config)->pipe_bpp) { case 18: pipeconf = pipeconf | 64U; goto ldv_53398; case 24: pipeconf = pipeconf; goto ldv_53398; case 30: pipeconf = pipeconf | 32U; goto ldv_53398; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (7626), "i" (12UL)); ldv_53402: ; goto ldv_53402; } ldv_53398: ; } else { } } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 46UL) != 0U) { if ((int )intel_crtc->lowfreq_avail) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i9xx_set_pipeconf", "enabling CxSR downclocking\n"); } else { } pipeconf = pipeconf | 65536U; } else { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i9xx_set_pipeconf", "disabling CxSR downclocking\n"); } else { } } } else { } if (((intel_crtc->config)->base.adjusted_mode.flags & 16U) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 3U) { pipeconf = pipeconf | 12582912U; } else { tmp___2 = intel_pipe_has_type(intel_crtc, 3); if ((int )tmp___2) { pipeconf = pipeconf | 12582912U; } else { pipeconf = pipeconf | 10485760U; } } } else { pipeconf = pipeconf; } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U && (int )(intel_crtc->config)->limited_color_range) { pipeconf = pipeconf | 8192U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )intel_crtc->pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), pipeconf, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )intel_crtc->pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 0); return; } } static int i9xx_crtc_compute_clock(struct intel_crtc *crtc , struct intel_crtc_state *crtc_state ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int refclk ; int num_connectors ; intel_clock_t clock ; intel_clock_t reduced_clock ; bool ok ; bool has_reduced_clock ; bool is_lvds ; bool is_dsi ; struct intel_encoder *encoder ; intel_limit_t const *limit ; struct drm_atomic_state *state ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; int i ; struct drm_encoder const *__mptr ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; num_connectors = 0; has_reduced_clock = 0; is_lvds = 0; is_dsi = 0; state = crtc_state->base.state; memset((void *)(& crtc_state->dpll_hw_state), 0, 68UL); i = 0; goto ldv_53450; ldv_53449: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )(& crtc->base)) { goto ldv_53442; } else { } __mptr = (struct drm_encoder const *)connector_state->best_encoder; encoder = (struct intel_encoder *)__mptr; switch ((unsigned int )encoder->type) { case 4U: is_lvds = 1; goto ldv_53446; case 9U: is_dsi = 1; goto ldv_53446; default: ; goto ldv_53446; } ldv_53446: num_connectors = num_connectors + 1; } else { } ldv_53442: i = i + 1; ldv_53450: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_53449; } else { } if ((int )is_dsi) { return (0); } else { } if (! crtc_state->clock_set) { refclk = i9xx_get_refclk((struct intel_crtc_state const *)crtc_state, num_connectors); limit = intel_limit(crtc_state, refclk); ok = (*(dev_priv->display.find_dpll))(limit, crtc_state, crtc_state->port_clock, refclk, (struct dpll *)0, & clock); if (! ok) { drm_err("Couldn\'t find PLL settings for mode!\n"); return (-22); } else { } if ((int )is_lvds && (int )dev_priv->lvds_downclock_avail) { has_reduced_clock = (*(dev_priv->display.find_dpll))(limit, crtc_state, dev_priv->lvds_downclock, refclk, & clock, & reduced_clock); } else { } crtc_state->dpll.n = clock.n; crtc_state->dpll.m1 = clock.m1; crtc_state->dpll.m2 = clock.m2; crtc_state->dpll.p1 = clock.p1; crtc_state->dpll.p2 = clock.p2; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 2U) { i8xx_update_pll(crtc, crtc_state, (int )has_reduced_clock ? & reduced_clock : (intel_clock_t *)0, num_connectors); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { chv_update_pll(crtc, crtc_state); } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { vlv_update_pll(crtc, crtc_state); } else { i9xx_update_pll(crtc, crtc_state, (int )has_reduced_clock ? & reduced_clock : (intel_clock_t *)0, num_connectors); } } } return (0); } } static void i9xx_get_pfit_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; uint32_t tmp___0 ; struct drm_i915_private *__p___3 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 3U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 13687U) { return; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) == 0U) { return; } else { } } } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397872U), 1); if ((int )tmp >= 0) { return; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 3U) { if ((int )crtc->pipe != 1) { return; } else { } } else if ((tmp & 1610612736U) != (uint32_t )((int )crtc->pipe << 29)) { return; } else { } pipe_config->gmch_pfit.control = tmp; pipe_config->gmch_pfit.pgm_ratios = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397876U), 1); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) <= 4U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397696L, 1); pipe_config->gmch_pfit.lvds_border_bits = tmp___0 & 32768U; } else { } return; } } static void vlv_crtc_clock_get(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; intel_clock_t clock ; u32 mdiv ; int refclk ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = (int )pipe_config->cpu_transcoder; refclk = 100000; if ((int )pipe_config->dpll_hw_state.dpll >= 0) { return; } else { } mutex_lock_nested(& dev_priv->sb_lock, 0U); mdiv = vlv_dpio_read(dev_priv, (enum pipe )pipe, pipe * 32 + 32780); mutex_unlock(& dev_priv->sb_lock); clock.m1 = (int )(mdiv >> 8) & 7; clock.m2 = (int )mdiv & 255; clock.n = (int )(mdiv >> 12) & 15; clock.p1 = (int )(mdiv >> 21) & 7; clock.p2 = (int )(mdiv >> 16) & 31; vlv_clock(refclk, & clock); pipe_config->port_clock = clock.dot / 5; return; } } static void i9xx_get_initial_plane_config(struct intel_crtc *crtc , struct intel_initial_plane_config *plane_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 val ; u32 base ; u32 offset ; int pipe ; int plane ; int fourcc ; int pixel_format ; unsigned int aligned_height ; struct drm_framebuffer *fb ; struct intel_framebuffer *intel_fb ; void *tmp ; long tmp___0 ; struct drm_i915_private *__p ; int tmp___1 ; uint32_t tmp___2 ; struct drm_i915_private *__p___0 ; long tmp___3 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; plane = (int )crtc->plane; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U), 1); if ((int )val >= 0) { return; } else { } tmp = kzalloc(176UL, 208U); intel_fb = (struct intel_framebuffer *)tmp; if ((unsigned long )intel_fb == (unsigned long )((struct intel_framebuffer *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i9xx_get_initial_plane_config", "failed to alloc fb\n"); } else { } return; } else { } fb = & intel_fb->base; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { if ((val & 1024U) != 0U) { plane_config->tiling = 1U; fb->modifier[0] = 72057594037927937ULL; } else { } } else { } pixel_format = (int )val & 1006632960; fourcc = i9xx_format_to_fourcc(pixel_format); fb->pixel_format = (uint32_t )fourcc; tmp___1 = drm_format_plane_cpp((uint32_t )fourcc, 0); fb->bits_per_pixel = tmp___1 * 8; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 3U) { if (plane_config->tiling != 0U) { offset = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), 1); } else { offset = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), 1); } tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 1); base = tmp___2 & 4294963200U; } else { base = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), 1); } plane_config->base = base; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393244U), 1); fb->width = ((val >> 16) & 4095U) + 1U; fb->height = (val & 4095U) + 1U; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459144U), 1); fb->pitches[0] = val & 4294967232U; aligned_height = intel_fb_align_height(dev, fb->height, fb->pixel_format, fb->modifier[0]); plane_config->size = (int )(fb->pitches[0] * aligned_height); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("i9xx_get_initial_plane_config", "pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe + 65, plane, fb->width, fb->height, fb->bits_per_pixel, base, fb->pitches[0], plane_config->size); } else { } plane_config->fb = intel_fb; return; } } static void chv_crtc_clock_get(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; enum dpio_channel port ; int tmp ; intel_clock_t clock ; u32 cmn_dw13 ; u32 pll_dw0 ; u32 pll_dw1 ; u32 pll_dw2 ; int refclk ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = (int )pipe_config->cpu_transcoder; tmp = vlv_pipe_to_channel((enum pipe )pipe); port = (enum dpio_channel )tmp; refclk = 100000; mutex_lock_nested(& dev_priv->sb_lock, 0U); cmn_dw13 = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 4294967116U + 33076U)); pll_dw0 = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32768U)); pll_dw1 = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32772U)); pll_dw2 = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 384U + 32776U)); mutex_unlock(& dev_priv->sb_lock); clock.m1 = (pll_dw1 & 7U) == 0U ? 2 : 0; clock.m2 = (int )(((pll_dw0 & 255U) << 22) | (pll_dw2 & 4194303U)); clock.n = (int )(pll_dw1 >> 8) & 15; clock.p1 = (int )(cmn_dw13 >> 13) & 7; clock.p2 = (int )(cmn_dw13 >> 8) & 31; chv_clock(refclk, & clock); pipe_config->port_clock = clock.dot / 5; return; } } static bool i9xx_get_pipe_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; bool tmp___0 ; int tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = intel_display_power_is_enabled(dev_priv, (enum intel_display_power_domain )crtc->pipe); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } pipe_config->cpu_transcoder = (enum transcoder )crtc->pipe; pipe_config->shared_dpll = -1; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )crtc->pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); if ((int )tmp >= 0) { return (0); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { _L: /* CIL Label */ switch (tmp & 224U) { case 64U: pipe_config->pipe_bpp = 18; goto ldv_53586; case 0U: pipe_config->pipe_bpp = 24; goto ldv_53586; case 32U: pipe_config->pipe_bpp = 30; goto ldv_53586; default: ; goto ldv_53586; } ldv_53586: ; } else { } } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U && (tmp & 8192U) != 0U) { pipe_config->limited_color_range = 1; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 3U) { pipe_config->double_wide = (tmp & 1073741824U) != 0U; } else { } intel_get_pipe_timings(crtc, pipe_config); i9xx_get_pfit_config(crtc, pipe_config); __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 3U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24604U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24608U : (unsigned int )dev_priv->info.display_mmio_offset + 24636U)), 1); pipe_config->pixel_multiplier = ((tmp & 16128U) >> 8) + 1U; pipe_config->dpll_hw_state.dpll_md = tmp; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___3->info.device_id) == 10098U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); pipe_config->pixel_multiplier = ((tmp & 255U) >> 4) + 1U; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 44UL) != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); pipe_config->pixel_multiplier = ((tmp & 255U) >> 4) + 1U; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 44UL) != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); pipe_config->pixel_multiplier = ((tmp & 255U) >> 4) + 1U; } else { pipe_config->pixel_multiplier = 1U; } } } } pipe_config->dpll_hw_state.dpll = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )crtc->pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) == 0U) { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___7->info.device_id) == 13687U) { pipe_config->dpll_hw_state.dpll = pipe_config->dpll_hw_state.dpll & 3221225471U; } else { } pipe_config->dpll_hw_state.fp0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )crtc->pipe + 3080) * 8), 1); pipe_config->dpll_hw_state.fp1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe * 8 + 24644), 1); } else { pipe_config->dpll_hw_state.dpll = pipe_config->dpll_hw_state.dpll & 4294934272U; } __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 45UL) != 0U) { __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___11->info.gen) == 8U) { chv_crtc_clock_get(crtc, pipe_config); } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 45UL) != 0U) { vlv_crtc_clock_get(crtc, pipe_config); } else { i9xx_crtc_clock_get(crtc, pipe_config); } } return (1); } } static void ironlake_init_pch_refclk(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_encoder *encoder ; u32 val ; u32 final ; bool has_lvds ; bool has_cpu_edp ; bool has_panel ; bool has_ck505 ; bool can_ssc ; struct list_head const *__mptr ; struct intel_digital_port *tmp ; struct list_head const *__mptr___0 ; struct drm_i915_private *__p ; long tmp___0 ; bool tmp___1 ; bool tmp___2 ; long tmp___3 ; bool tmp___4 ; long tmp___5 ; bool tmp___6 ; long tmp___7 ; long tmp___8 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; has_lvds = 0; has_cpu_edp = 0; has_panel = 0; has_ck505 = 0; can_ssc = 0; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_53677; ldv_53676: ; switch ((unsigned int )encoder->type) { case 4U: has_panel = 1; has_lvds = 1; goto ldv_53673; case 8U: has_panel = 1; tmp = enc_to_dig_port(& encoder->base); if ((unsigned int )tmp->port == 0U) { has_cpu_edp = 1; } else { } goto ldv_53673; default: ; goto ldv_53673; } ldv_53673: __mptr___0 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_53677: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_53676; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U) { has_ck505 = (int )dev_priv->vbt.display_clock_mode != 0; can_ssc = has_ck505; } else { has_ck505 = 0; can_ssc = 1; } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ironlake_init_pch_refclk", "has_panel %d has_lvds %d has_ck505 %d\n", (int )has_panel, (int )has_lvds, (int )has_ck505); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811520L, 1); final = val; final = final & 4294965759U; if ((int )has_ck505) { final = final | 512U; } else { final = final | 1024U; } final = final & 4294961151U; final = final & 4294942719U; final = final & 4294967293U; if ((int )has_panel) { final = final | 4096U; tmp___1 = intel_panel_use_ssc(dev_priv); if ((int )tmp___1 && (int )can_ssc) { final = final | 2U; } else { } if ((int )has_cpu_edp) { tmp___2 = intel_panel_use_ssc(dev_priv); if ((int )tmp___2 && (int )can_ssc) { final = final | 16384U; } else { final = final | 24576U; } } else { final = final; } } else { final = final; final = final; } if (final == val) { return; } else { } val = val & 4294965759U; if ((int )has_ck505) { val = val | 512U; } else { val = val | 1024U; } if ((int )has_panel) { val = val & 4294961151U; val = val | 4096U; tmp___4 = intel_panel_use_ssc(dev_priv); if ((int )tmp___4 && (int )can_ssc) { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("ironlake_init_pch_refclk", "Using SSC on panel\n"); } else { } val = val | 2U; } else { val = val & 4294967293U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811520L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811520L, 0); __const_udelay(859000UL); val = val & 4294942719U; if ((int )has_cpu_edp) { tmp___6 = intel_panel_use_ssc(dev_priv); if ((int )tmp___6 && (int )can_ssc) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("ironlake_init_pch_refclk", "Using SSC on eDP\n"); } else { } val = val | 16384U; } else { val = val | 24576U; } } else { val = val; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811520L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811520L, 0); __const_udelay(859000UL); } else { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("ironlake_init_pch_refclk", "Disabling SSC entirely\n"); } else { } val = val & 4294942719U; val = val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811520L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811520L, 0); __const_udelay(859000UL); val = val & 4294961151U; val = val; val = val & 4294967293U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811520L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811520L, 0); __const_udelay(859000UL); } tmp___8 = ldv__builtin_expect(val != final, 0L); if (tmp___8 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (8147), "i" (12UL)); ldv_53686: ; goto ldv_53686; } else { } return; } } static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv ) { uint32_t tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; uint32_t tmp___2 ; unsigned long timeout_____0 ; unsigned long tmp___3 ; int ret_____0 ; uint32_t tmp___4 ; uint32_t tmp___5 ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794628L, 1); tmp = tmp | 4096U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794628L, tmp, 1); tmp___0 = msecs_to_jiffies(1U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_53701; ldv_53700: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794628L, 1); if ((tmp___1 & 8192U) == 0U) { ret__ = -110; } else { } goto ldv_53699; } else { } cpu_relax(); ldv_53701: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794628L, 1); if ((tmp___2 & 8192U) == 0U) { goto ldv_53700; } else { } ldv_53699: ; if (ret__ != 0) { drm_err("FDI mPHY reset assert timeout\n"); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794628L, 1); tmp = tmp & 4294963199U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794628L, tmp, 1); tmp___3 = msecs_to_jiffies(1U); timeout_____0 = (tmp___3 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_53713; ldv_53712: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794628L, 1); if ((tmp___4 & 8192U) != 0U) { ret_____0 = -110; } else { } goto ldv_53711; } else { } cpu_relax(); ldv_53713: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794628L, 1); if ((tmp___5 & 8192U) != 0U) { goto ldv_53712; } else { } ldv_53711: ; if (ret_____0 != 0) { drm_err("FDI mPHY reset de-assert timeout\n"); } else { } return; } } static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv ) { uint32_t tmp ; { tmp = intel_sbi_read(dev_priv, 32776, 1); tmp = tmp & 16777215U; tmp = tmp | 301989888U; intel_sbi_write(dev_priv, 32776, tmp, 1); tmp = intel_sbi_read(dev_priv, 8200, 1); tmp = tmp | 2048U; intel_sbi_write(dev_priv, 8200, tmp, 1); tmp = intel_sbi_read(dev_priv, 8456, 1); tmp = tmp | 2048U; intel_sbi_write(dev_priv, 8456, tmp, 1); tmp = intel_sbi_read(dev_priv, 8300, 1); tmp = tmp | 19136512U; intel_sbi_write(dev_priv, 8300, tmp, 1); tmp = intel_sbi_read(dev_priv, 8556, 1); tmp = tmp | 19136512U; intel_sbi_write(dev_priv, 8556, tmp, 1); tmp = intel_sbi_read(dev_priv, 8320, 1); tmp = tmp & 4294909951U; tmp = tmp | 40960U; intel_sbi_write(dev_priv, 8320, tmp, 1); tmp = intel_sbi_read(dev_priv, 8576, 1); tmp = tmp & 4294909951U; tmp = tmp | 40960U; intel_sbi_write(dev_priv, 8576, tmp, 1); tmp = intel_sbi_read(dev_priv, 8332, 1); tmp = tmp & 4294967040U; tmp = tmp | 28U; intel_sbi_write(dev_priv, 8332, tmp, 1); tmp = intel_sbi_read(dev_priv, 8588, 1); tmp = tmp & 4294967040U; tmp = tmp | 28U; intel_sbi_write(dev_priv, 8588, tmp, 1); tmp = intel_sbi_read(dev_priv, 8344, 1); tmp = tmp & 4278255615U; tmp = tmp | 1835008U; intel_sbi_write(dev_priv, 8344, tmp, 1); tmp = intel_sbi_read(dev_priv, 8600, 1); tmp = tmp & 4278255615U; tmp = tmp | 1835008U; intel_sbi_write(dev_priv, 8600, tmp, 1); tmp = intel_sbi_read(dev_priv, 8388, 1); tmp = tmp | 134217728U; intel_sbi_write(dev_priv, 8388, tmp, 1); tmp = intel_sbi_read(dev_priv, 8644, 1); tmp = tmp | 134217728U; intel_sbi_write(dev_priv, 8644, tmp, 1); tmp = intel_sbi_read(dev_priv, 8428, 1); tmp = tmp & 268435455U; tmp = tmp | 1073741824U; intel_sbi_write(dev_priv, 8428, tmp, 1); tmp = intel_sbi_read(dev_priv, 8684, 1); tmp = tmp & 268435455U; tmp = tmp | 1073741824U; intel_sbi_write(dev_priv, 8684, tmp, 1); return; } } static void lpt_enable_clkout_dp(struct drm_device *dev , bool with_spread , bool with_fdi ) { struct drm_i915_private *dev_priv ; uint32_t reg ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; long tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __ret_warn_on = (int )with_fdi && ! with_spread; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 8258, "FDI requires downspread\n"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { with_spread = 1; } else { } __ret_warn_on___0 = (unsigned int )dev_priv->pch_id == 39936U && (int )with_fdi; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 8261, "LP PCH doesn\'t have FDI\n"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { with_fdi = 0; } else { } mutex_lock_nested(& dev_priv->sb_lock, 0U); tmp = intel_sbi_read(dev_priv, 524, 0); tmp = tmp & 4294967294U; tmp = tmp | 8U; intel_sbi_write(dev_priv, 524, tmp, 0); __const_udelay(103080UL); if ((int )with_spread) { tmp = intel_sbi_read(dev_priv, 524, 0); tmp = tmp & 4294967287U; intel_sbi_write(dev_priv, 524, tmp, 0); if ((int )with_fdi) { lpt_reset_fdi_mphy(dev_priv); lpt_program_fdi_mphy(dev_priv); } else { } } else { } reg = (unsigned int )dev_priv->pch_id == 39936U ? 7936U : 10752U; tmp = intel_sbi_read(dev_priv, (int )((u16 )reg), 0); tmp = tmp | 1U; intel_sbi_write(dev_priv, (int )((u16 )reg), tmp, 0); mutex_unlock(& dev_priv->sb_lock); return; } } static void lpt_disable_clkout_dp(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t reg ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev_priv->sb_lock, 0U); reg = (unsigned int )dev_priv->pch_id == 39936U ? 7936U : 10752U; tmp = intel_sbi_read(dev_priv, (int )((u16 )reg), 0); tmp = tmp & 4294967294U; intel_sbi_write(dev_priv, (int )((u16 )reg), tmp, 0); tmp = intel_sbi_read(dev_priv, 524, 0); if ((tmp & 1U) == 0U) { if ((tmp & 8U) == 0U) { tmp = tmp | 8U; intel_sbi_write(dev_priv, 524, tmp, 0); __const_udelay(137440UL); } else { } tmp = tmp | 1U; intel_sbi_write(dev_priv, 524, tmp, 0); } else { } mutex_unlock(& dev_priv->sb_lock); return; } } static void lpt_init_pch_refclk(struct drm_device *dev ) { struct intel_encoder *encoder ; bool has_vga ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { has_vga = 0; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_53750; ldv_53749: ; switch ((unsigned int )encoder->type) { case 1U: has_vga = 1; goto ldv_53747; default: ; goto ldv_53747; } ldv_53747: __mptr___0 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_53750: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_53749; } else { } if ((int )has_vga) { lpt_enable_clkout_dp(dev, 1, 1); } else { lpt_disable_clkout_dp(dev); } return; } } void intel_init_pch_refclk(struct drm_device *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 1U) { ironlake_init_pch_refclk(dev); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type == 2U) { ironlake_init_pch_refclk(dev); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 3U) { lpt_init_pch_refclk(dev); } else { } } } return; } } static int ironlake_get_refclk(struct intel_crtc_state *crtc_state ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_atomic_state *state ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; struct intel_encoder *encoder ; int num_connectors ; int i ; bool is_lvds ; struct drm_encoder const *__mptr ; long tmp ; bool tmp___0 ; { dev = (crtc_state->base.crtc)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; state = crtc_state->base.state; num_connectors = 0; is_lvds = 0; i = 0; goto ldv_53792; ldv_53791: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )crtc_state->base.crtc) { goto ldv_53785; } else { } __mptr = (struct drm_encoder const *)connector_state->best_encoder; encoder = (struct intel_encoder *)__mptr; switch ((unsigned int )encoder->type) { case 4U: is_lvds = 1; goto ldv_53789; default: ; goto ldv_53789; } ldv_53789: num_connectors = num_connectors + 1; } else { } ldv_53785: i = i + 1; ldv_53792: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_53791; } else { } if ((int )is_lvds) { tmp___0 = intel_panel_use_ssc(dev_priv); if ((int )tmp___0) { if (num_connectors <= 1) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ironlake_get_refclk", "using SSC reference clock of %d kHz\n", dev_priv->vbt.lvds_ssc_freq); } else { } return (dev_priv->vbt.lvds_ssc_freq); } else { } } else { } } else { } return (120000); } } static void ironlake_set_pipeconf(struct drm_crtc *crtc ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; uint32_t val ; { dev_priv = (struct drm_i915_private *)(crtc->dev)->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; val = 0U; switch ((intel_crtc->config)->pipe_bpp) { case 18: val = val | 64U; goto ldv_53805; case 24: val = val; goto ldv_53805; case 30: val = val | 32U; goto ldv_53805; case 36: val = val | 96U; goto ldv_53805; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (8413), "i" (12UL)); ldv_53810: ; goto ldv_53810; } ldv_53805: ; if ((int )(intel_crtc->config)->dither) { val = val | 16U; } else { } if (((intel_crtc->config)->base.adjusted_mode.flags & 16U) != 0U) { val = val | 6291456U; } else { val = val; } if ((int )(intel_crtc->config)->limited_color_range) { val = val | 8192U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 0); return; } } static void intel_set_pipe_csc(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; uint16_t coeff ; uint16_t postoff ; uint32_t mode ; struct drm_i915_private *__p ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; coeff = 30720U; if ((int )(intel_crtc->config)->limited_color_range) { coeff = 3512U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299024), (uint32_t )((int )coeff << 16), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299028), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299032), (uint32_t )coeff, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299036), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299040), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299044), (uint32_t )((int )coeff << 16), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299056), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299060), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299064), 0U, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 6U) { postoff = 0U; if ((int )(intel_crtc->config)->limited_color_range) { postoff = 257U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299072), (uint32_t )postoff, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299076), (uint32_t )postoff, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299080), (uint32_t )postoff, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299048), 0U, 1); } else { mode = 1U; if ((int )(intel_crtc->config)->limited_color_range) { mode = mode | 4U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 256 + 299048), mode, 1); } return; } } static void haswell_set_pipeconf(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; enum transcoder cpu_transcoder ; uint32_t val ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; val = 0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && (int )(intel_crtc->config)->dither) { val = val | 16U; } else { } if (((intel_crtc->config)->base.adjusted_mode.flags & 16U) != 0U) { val = val | 6291456U; } else { val = val; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )intel_crtc->pipe * 2048 + 304256), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )intel_crtc->pipe * 2048 + 304256), 0); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { goto _L; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 8U) { _L: /* CIL Label */ val = 0U; switch ((intel_crtc->config)->pipe_bpp) { case 18: val = val | 64U; goto ldv_53865; case 24: val = val; goto ldv_53865; case 30: val = val | 32U; goto ldv_53865; case 36: val = val | 96U; goto ldv_53865; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (8538), "i" (12UL)); ldv_53870: ; goto ldv_53870; } ldv_53865: ; if ((int )(intel_crtc->config)->dither) { val = val | 16U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458800U), val, 1); } else { } } return; } } static bool ironlake_compute_clocks(struct drm_crtc *crtc , struct intel_crtc_state *crtc_state , intel_clock_t *clock , bool *has_reduced_clock , intel_clock_t *reduced_clock ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int refclk ; intel_limit_t const *limit ; bool ret ; bool is_lvds ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; is_lvds = 0; is_lvds = intel_pipe_will_have_type((struct intel_crtc_state const *)crtc_state, 4); refclk = ironlake_get_refclk(crtc_state); limit = intel_limit(crtc_state, refclk); ret = (*(dev_priv->display.find_dpll))(limit, crtc_state, crtc_state->port_clock, refclk, (struct dpll *)0, clock); if (! ret) { return (0); } else { } if ((int )is_lvds && (int )dev_priv->lvds_downclock_avail) { *has_reduced_clock = (*(dev_priv->display.find_dpll))(limit, crtc_state, dev_priv->lvds_downclock, refclk, clock, reduced_clock); } else { } return (1); } } int ironlake_get_lanes_required(int target_clock , int link_bw , int bpp ) { u32 bps ; { bps = (u32 )(((target_clock * bpp) * 21) / 20); return ((int )((((u32 )(link_bw * 8) + bps) - 1U) / (u32 )(link_bw * 8))); } } static bool ironlake_needs_fb_cb_tune(struct dpll *dpll , int factor ) { uint32_t tmp ; { tmp = i9xx_dpll_compute_m(dpll); return (tmp < (uint32_t )(dpll->n * factor)); } } static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state , u32 *fp , intel_clock_t *reduced_clock , u32 *fp2 ) { struct drm_crtc *crtc ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_atomic_state *state ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; struct intel_encoder *encoder ; uint32_t dpll ; int factor ; int num_connectors ; int i ; bool is_lvds ; bool is_sdvo ; struct drm_encoder const *__mptr ; bool tmp ; struct drm_i915_private *__p ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; { crtc = & intel_crtc->base; dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; state = crtc_state->base.state; num_connectors = 0; is_lvds = 0; is_sdvo = 0; i = 0; goto ldv_53923; ldv_53922: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )crtc_state->base.crtc) { goto ldv_53914; } else { } __mptr = (struct drm_encoder const *)connector_state->best_encoder; encoder = (struct intel_encoder *)__mptr; switch ((unsigned int )encoder->type) { case 4U: is_lvds = 1; goto ldv_53918; case 3U: ; case 6U: is_sdvo = 1; goto ldv_53918; default: ; goto ldv_53918; } ldv_53918: num_connectors = num_connectors + 1; } else { } ldv_53914: i = i + 1; ldv_53923: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_53922; } else { } factor = 21; if ((int )is_lvds) { tmp = intel_panel_use_ssc(dev_priv); if ((int )tmp && dev_priv->vbt.lvds_ssc_freq == 100000) { factor = 25; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U) { tmp___0 = intel_is_dual_link_lvds(dev); if ((int )tmp___0) { factor = 25; } else { } } else { } } } else if ((int )crtc_state->sdvo_tv_clock) { factor = 20; } else { } tmp___1 = ironlake_needs_fb_cb_tune(& crtc_state->dpll, factor); if ((int )tmp___1) { *fp = *fp | 12582912U; } else { } if ((unsigned long )fp2 != (unsigned long )((u32 *)0U) && reduced_clock->m < reduced_clock->n * factor) { *fp2 = *fp2 | 12582912U; } else { } dpll = 0U; if ((int )is_lvds) { dpll = dpll | 134217728U; } else { dpll = dpll | 67108864U; } dpll = ((crtc_state->pixel_multiplier - 1U) << 9) | dpll; if ((int )is_sdvo) { dpll = dpll | 1073741824U; } else { } if ((int )crtc_state->has_dp_encoder) { dpll = dpll | 1073741824U; } else { } dpll = (uint32_t )((1 << (crtc_state->dpll.p1 + -1)) << 16) | dpll; dpll = (uint32_t )(1 << (crtc_state->dpll.p1 + -1)) | dpll; switch (crtc_state->dpll.p2) { case 5: dpll = dpll | 16777216U; goto ldv_53932; case 7: dpll = dpll | 16777216U; goto ldv_53932; case 10: dpll = dpll; goto ldv_53932; case 14: dpll = dpll; goto ldv_53932; } ldv_53932: ; if ((int )is_lvds) { tmp___2 = intel_panel_use_ssc(dev_priv); if ((int )tmp___2) { if (num_connectors <= 1) { dpll = dpll | 24576U; } else { dpll = dpll; } } else { dpll = dpll; } } else { dpll = dpll; } return (dpll | 2147483648U); } } static int ironlake_crtc_compute_clock(struct intel_crtc *crtc , struct intel_crtc_state *crtc_state ) { struct drm_device *dev ; intel_clock_t clock ; intel_clock_t reduced_clock ; u32 dpll ; u32 fp ; u32 fp2 ; bool ok ; bool has_reduced_clock ; bool is_lvds ; struct intel_shared_dpll *pll ; int __ret_warn_on ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp ; struct drm_i915_private *__p___1 ; long tmp___0 ; long tmp___1 ; { dev = crtc->base.dev; dpll = 0U; fp = 0U; fp2 = 0U; has_reduced_clock = 0; is_lvds = 0; memset((void *)(& crtc_state->dpll_hw_state), 0, 68UL); is_lvds = intel_pipe_has_type(crtc, 4); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 1U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 2U) { tmp = 1; } else { tmp = 0; } } else { tmp = 0; } __ret_warn_on = tmp; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __p___1 = to_i915((struct drm_device const *)dev); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 8721, "Unexpected PCH type %d\n", (unsigned int )__p___1->pch_type); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ok = ironlake_compute_clocks(& crtc->base, crtc_state, & clock, & has_reduced_clock, & reduced_clock); if (! ok && ! crtc_state->clock_set) { drm_err("Couldn\'t find PLL settings for mode!\n"); return (-22); } else { } if (! crtc_state->clock_set) { crtc_state->dpll.n = clock.n; crtc_state->dpll.m1 = clock.m1; crtc_state->dpll.m2 = clock.m2; crtc_state->dpll.p1 = clock.p1; crtc_state->dpll.p2 = clock.p2; } else { } if ((int )crtc_state->has_pch_encoder) { fp = i9xx_dpll_compute_fp(& crtc_state->dpll); if ((int )has_reduced_clock) { fp2 = i9xx_dpll_compute_fp(& reduced_clock); } else { } dpll = ironlake_compute_dpll(crtc, crtc_state, & fp, & reduced_clock, (int )has_reduced_clock ? & fp2 : (u32 *)0U); crtc_state->dpll_hw_state.dpll = dpll; crtc_state->dpll_hw_state.fp0 = fp; if ((int )has_reduced_clock) { crtc_state->dpll_hw_state.fp1 = fp2; } else { crtc_state->dpll_hw_state.fp1 = fp; } pll = intel_get_shared_dpll(crtc, crtc_state); if ((unsigned long )pll == (unsigned long )((struct intel_shared_dpll *)0)) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ironlake_crtc_compute_clock", "failed to find PLL for pipe %c\n", (int )crtc->pipe + 65); } else { } return (-22); } else { } } else { } if ((int )is_lvds && (int )has_reduced_clock) { crtc->lowfreq_avail = 1; } else { crtc->lowfreq_avail = 0; } return (0); } } static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc , struct intel_link_m_n *m_n ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum pipe pipe ; uint32_t tmp ; uint32_t tmp___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; m_n->link_m = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 917568), 1); m_n->link_n = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 917572), 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 917552), 1); m_n->gmch_m = tmp & 2181038079U; m_n->gmch_n = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 917556), 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 917552), 1); m_n->tu = ((tmp___0 & 2113929216U) >> 25) + 1U; return; } } static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc , enum transcoder transcoder , struct intel_link_m_n *m_n , struct intel_link_m_n *m2_n2 ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum pipe pipe ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; struct drm_i915_private *__p ; uint32_t tmp___3 ; uint32_t tmp___4 ; struct drm_i915_private *__p___0 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 4U) { m_n->link_m = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393280U), 1); m_n->link_n = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393284U), 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393264U), 1); m_n->gmch_m = tmp & 2181038079U; m_n->gmch_n = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393268U), 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393264U), 1); m_n->tu = ((tmp___0 & 2113929216U) >> 25) + 1U; if ((unsigned long )m2_n2 != (unsigned long )((struct intel_link_m_n *)0)) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 7U) { if ((int )(crtc->config)->has_drrs) { m2_n2->link_m = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393288U), 1); m2_n2->link_n = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393292U), 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393272U), 1); m2_n2->gmch_m = tmp___1 & 2181038079U; m2_n2->gmch_n = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393276U), 1); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393272U), 1); m2_n2->tu = ((tmp___2 & 2113929216U) >> 25) + 1U; } else { } } else { } } else { } } else { m_n->link_m = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 458848), 1); m_n->link_n = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 458852), 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 458832), 1); m_n->gmch_m = tmp___3 & 2181038079U; m_n->gmch_n = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 458836), 1); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 458832), 1); m_n->tu = ((tmp___4 & 2113929216U) >> 25) + 1U; } return; } } void intel_dp_get_m_n(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { { if ((int )pipe_config->has_pch_encoder) { intel_pch_transcoder_get_m_n(crtc, & pipe_config->dp_m_n); } else { intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, & pipe_config->dp_m_n, & pipe_config->dp_m2_n2); } return; } } static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { { intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, & pipe_config->fdi_m_n, (struct intel_link_m_n *)0); return; } } static void skylake_get_pfit_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc_scaler_state *scaler_state ; uint32_t ps_ctrl ; int id ; int i ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; scaler_state = & pipe_config->scaler_state; ps_ctrl = 0U; id = -1; i = 0; goto ldv_54019; ldv_54018: ps_ctrl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((i * 256 + (int )crtc->pipe * ((i * 256 + 428416) + (i * -256 + -426368))) + 426368), 1); if ((int )ps_ctrl < 0 && (ps_ctrl & 234881024U) == 0U) { id = i; pipe_config->pch_pfit.enabled = 1; pipe_config->pch_pfit.pos = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((i * 256 + (int )crtc->pipe * ((i * 256 + 428400) + (i * -256 + -426352))) + 426352), 1); pipe_config->pch_pfit.size = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((i * 256 + (int )crtc->pipe * ((i * 256 + 428404) + (i * -256 + -426356))) + 426356), 1); goto ldv_54017; } else { } i = i + 1; ldv_54019: ; if (crtc->num_scalers > i) { goto ldv_54018; } else { } ldv_54017: scaler_state->scaler_id = id; if (id >= 0) { scaler_state->scaler_users = scaler_state->scaler_users | 2147483648U; } else { scaler_state->scaler_users = scaler_state->scaler_users & 2147483647U; } return; } } static void skylake_get_initial_plane_config(struct intel_crtc *crtc , struct intel_initial_plane_config *plane_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 val ; u32 base ; u32 offset ; u32 stride_mult ; u32 tiling ; int pipe ; int fourcc ; int pixel_format ; unsigned int aligned_height ; struct drm_framebuffer *fb ; struct intel_framebuffer *intel_fb ; void *tmp ; long tmp___0 ; int tmp___1 ; int __ret_warn_on ; long tmp___2 ; uint32_t tmp___3 ; long tmp___4 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; tmp = kzalloc(176UL, 208U); intel_fb = (struct intel_framebuffer *)tmp; if ((unsigned long )intel_fb == (unsigned long )((struct intel_framebuffer *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("skylake_get_initial_plane_config", "failed to alloc fb\n"); } else { } return; } else { } fb = & intel_fb->base; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 459136), 1); if ((int )val >= 0) { goto error; } else { } pixel_format = (int )val & 251658240; fourcc = skl_format_to_fourcc(pixel_format, (val & 1048576U) != 0U, (val & 48U) != 0U); fb->pixel_format = (uint32_t )fourcc; tmp___1 = drm_format_plane_cpp((uint32_t )fourcc, 0); fb->bits_per_pixel = tmp___1 * 8; tiling = val & 7168U; switch (tiling) { case 0U: fb->modifier[0] = 0ULL; goto ldv_54040; case 1024U: plane_config->tiling = 1U; fb->modifier[0] = 72057594037927937ULL; goto ldv_54040; case 4096U: fb->modifier[0] = 72057594037927938ULL; goto ldv_54040; case 5120U: fb->modifier[0] = 72057594037927939ULL; goto ldv_54040; default: __ret_warn_on = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 8925, "Missing switch case (%lu) in %s\n", (long )tiling, "skylake_get_initial_plane_config"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); goto error; } ldv_54040: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 459164), 1); base = tmp___3 & 4294963200U; plane_config->base = base; offset = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 459172), 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 459152), 1); fb->height = ((val >> 16) & 4095U) + 1U; fb->width = (val & 8191U) + 1U; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 459144), 1); stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0], fb->pixel_format); fb->pitches[0] = (val & 1023U) * stride_mult; aligned_height = intel_fb_align_height(dev, fb->height, fb->pixel_format, fb->modifier[0]); plane_config->size = (int )(fb->pitches[0] * aligned_height); tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("skylake_get_initial_plane_config", "pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe + 65, fb->width, fb->height, fb->bits_per_pixel, base, fb->pitches[0], plane_config->size); } else { } plane_config->fb = intel_fb; return; error: kfree((void const *)fb); return; } } static void ironlake_get_pfit_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe * 2048 + 426112), 1); if ((int )tmp < 0) { pipe_config->pch_pfit.enabled = 1; pipe_config->pch_pfit.pos = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe * 2048 + 426096), 1); pipe_config->pch_pfit.size = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe * 2048 + 426100), 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 7U) { __ret_warn_on = (tmp & 1610612736U) != (uint32_t )((int )crtc->pipe << 29); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 8980, "WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } } else { } return; } } static void ironlake_get_initial_plane_config(struct intel_crtc *crtc , struct intel_initial_plane_config *plane_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 val ; u32 base ; u32 offset ; int pipe ; int fourcc ; int pixel_format ; unsigned int aligned_height ; struct drm_framebuffer *fb ; struct intel_framebuffer *intel_fb ; void *tmp ; long tmp___0 ; struct drm_i915_private *__p ; int tmp___1 ; uint32_t tmp___2 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; long tmp___3 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = crtc->pipe; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U), 1); if ((int )val >= 0) { return; } else { } tmp = kzalloc(176UL, 208U); intel_fb = (struct intel_framebuffer *)tmp; if ((unsigned long )intel_fb == (unsigned long )((struct intel_framebuffer *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ironlake_get_initial_plane_config", "failed to alloc fb\n"); } else { } return; } else { } fb = & intel_fb->base; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { if ((val & 1024U) != 0U) { plane_config->tiling = 1U; fb->modifier[0] = 72057594037927937ULL; } else { } } else { } pixel_format = (int )val & 1006632960; fourcc = i9xx_format_to_fourcc(pixel_format); fb->pixel_format = (uint32_t )fourcc; tmp___1 = drm_format_plane_cpp((uint32_t )fourcc, 0); fb->bits_per_pixel = tmp___1 * 8; tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 1); base = tmp___2 & 4294963200U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { offset = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), 1); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { offset = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), 1); } else { goto _L; } } else _L: /* CIL Label */ if (plane_config->tiling != 0U) { offset = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), 1); } else { offset = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), 1); } } plane_config->base = base; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393244U), 1); fb->width = ((val >> 16) & 4095U) + 1U; fb->height = (val & 4095U) + 1U; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459144U), 1); fb->pitches[0] = val & 4294967232U; aligned_height = intel_fb_align_height(dev, fb->height, fb->pixel_format, fb->modifier[0]); plane_config->size = (int )(fb->pitches[0] * aligned_height); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("ironlake_get_initial_plane_config", "pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe + 65, fb->width, fb->height, fb->bits_per_pixel, base, fb->pitches[0], plane_config->size); } else { } plane_config->fb = intel_fb; return; } } static bool ironlake_get_pipe_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; bool tmp___0 ; int tmp___1 ; struct intel_shared_dpll *pll ; struct drm_i915_private *__p ; int __ret_warn_on ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; uint32_t tmp___5 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = intel_display_power_is_enabled(dev_priv, (enum intel_display_power_domain )crtc->pipe); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } pipe_config->cpu_transcoder = (enum transcoder )crtc->pipe; pipe_config->shared_dpll = -1; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )crtc->pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); if ((int )tmp >= 0) { return (0); } else { } switch (tmp & 224U) { case 64U: pipe_config->pipe_bpp = 18; goto ldv_54110; case 0U: pipe_config->pipe_bpp = 24; goto ldv_54110; case 32U: pipe_config->pipe_bpp = 30; goto ldv_54110; case 96U: pipe_config->pipe_bpp = 36; goto ldv_54110; default: ; goto ldv_54110; } ldv_54110: ; if ((tmp & 8192U) != 0U) { pipe_config->limited_color_range = 1; } else { } tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe * 4096 + 983048), 1); if ((int )tmp___5 < 0) { pipe_config->has_pch_encoder = 1; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe * 4096 + 983052), 1); pipe_config->fdi_lanes = (int )(((tmp & 3670016U) >> 19) + 1U); ironlake_get_fdi_m_n_config(crtc, pipe_config); __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p->pch_type == 1U) { pipe_config->shared_dpll = (enum intel_dpll_id )crtc->pipe; } else { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815104L, 1); if (((uint32_t )(1 << (int )crtc->pipe * 4) & tmp) != 0U) { pipe_config->shared_dpll = 1; } else { pipe_config->shared_dpll = 0; } } pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )pipe_config->shared_dpll; tmp___2 = (*(pll->get_hw_state))(dev_priv, pll, & pipe_config->dpll_hw_state); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } __ret_warn_on = tmp___3; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9117, "WARN_ON(!pll->get_hw_state(dev_priv, pll, &pipe_config->dpll_hw_state))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp = pipe_config->dpll_hw_state.dpll; pipe_config->pixel_multiplier = ((tmp & 3584U) >> 9) + 1U; ironlake_pch_clock_get(crtc, pipe_config); } else { pipe_config->pixel_multiplier = 1U; } intel_get_pipe_timings(crtc, pipe_config); ironlake_get_pfit_config(crtc, pipe_config); return (1); } } static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct intel_crtc *crtc ; struct list_head const *__mptr ; int __ret_warn_on ; int __ret_warn_on___0 ; long tmp ; long tmp___0 ; struct list_head const *__mptr___0 ; int __ret_warn_on___1 ; uint32_t tmp___1 ; int __ret_warn_on___2 ; long tmp___2 ; long tmp___3 ; int __ret_warn_on___3 ; uint32_t tmp___4 ; int __ret_warn_on___4 ; long tmp___5 ; long tmp___6 ; int __ret_warn_on___5 ; uint32_t tmp___7 ; int __ret_warn_on___6 ; long tmp___8 ; long tmp___9 ; int __ret_warn_on___7 ; uint32_t tmp___10 ; int __ret_warn_on___8 ; long tmp___11 ; long tmp___12 ; int __ret_warn_on___9 ; uint32_t tmp___13 ; int __ret_warn_on___10 ; long tmp___14 ; long tmp___15 ; int __ret_warn_on___11 ; uint32_t tmp___16 ; int __ret_warn_on___12 ; long tmp___17 ; long tmp___18 ; int __ret_warn_on___13 ; uint32_t tmp___19 ; int __ret_warn_on___14 ; long tmp___20 ; long tmp___21 ; struct drm_i915_private *__p ; int __ret_warn_on___15 ; uint32_t tmp___22 ; int __ret_warn_on___16 ; long tmp___23 ; long tmp___24 ; int __ret_warn_on___17 ; uint32_t tmp___25 ; int __ret_warn_on___18 ; long tmp___26 ; long tmp___27 ; int __ret_warn_on___19 ; uint32_t tmp___28 ; int __ret_warn_on___20 ; long tmp___29 ; long tmp___30 ; int __ret_warn_on___21 ; bool tmp___31 ; int __ret_warn_on___22 ; long tmp___32 ; long tmp___33 ; { dev = dev_priv->dev; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_54138; ldv_54137: __ret_warn_on = (int )crtc->active; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9143, "CRTC for pipe %c enabled\n", (int )crtc->pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("CRTC for pipe %c enabled\n", (int )crtc->pipe + 65); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_54138: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_54137; } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); __ret_warn_on___1 = tmp___1 != 0U; tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9145, "Power well on\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("Power well on\n"); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286752L, 1); __ret_warn_on___3 = (int )tmp___4 < 0; tmp___6 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___6 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___4 = 1; tmp___5 = ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9146, "SPLL enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); } else { drm_err("SPLL enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286784L, 1); __ret_warn_on___5 = (int )tmp___7 < 0; tmp___9 = ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); if (tmp___9 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___6 = 1; tmp___8 = ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9147, "WRPLL1 enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); } else { drm_err("WRPLL1 enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286816L, 1); __ret_warn_on___7 = (int )tmp___10 < 0; tmp___12 = ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); if (tmp___12 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___8 = 1; tmp___11 = ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); if (tmp___11 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9148, "WRPLL2 enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); } else { drm_err("WRPLL2 enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); tmp___13 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815616L, 1); __ret_warn_on___9 = (int )tmp___13 < 0; tmp___15 = ldv__builtin_expect(__ret_warn_on___9 != 0, 0L); if (tmp___15 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___10 = 1; tmp___14 = ldv__builtin_expect(__ret_warn_on___10 != 0, 0L); if (tmp___14 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9149, "Panel power on\n"); } else { } ldv__builtin_expect(__ret_warn_on___10 != 0, 0L); } else { drm_err("Panel power on\n"); } } else { } ldv__builtin_expect(__ret_warn_on___9 != 0, 0L); tmp___16 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295504L, 1); __ret_warn_on___11 = (int )tmp___16 < 0; tmp___18 = ldv__builtin_expect(__ret_warn_on___11 != 0, 0L); if (tmp___18 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___12 = 1; tmp___17 = ldv__builtin_expect(__ret_warn_on___12 != 0, 0L); if (tmp___17 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9151, "CPU PWM1 enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___12 != 0, 0L); } else { drm_err("CPU PWM1 enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___11 != 0, 0L); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { tmp___19 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295760L, 1); __ret_warn_on___13 = (int )tmp___19 < 0; tmp___21 = ldv__builtin_expect(__ret_warn_on___13 != 0, 0L); if (tmp___21 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___14 = 1; tmp___20 = ldv__builtin_expect(__ret_warn_on___14 != 0, 0L); if (tmp___20 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9154, "CPU PWM2 enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___14 != 0, 0L); } else { drm_err("CPU PWM2 enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___13 != 0, 0L); } else { } tmp___22 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 1); __ret_warn_on___15 = (int )tmp___22 < 0; tmp___24 = ldv__builtin_expect(__ret_warn_on___15 != 0, 0L); if (tmp___24 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___16 = 1; tmp___23 = ldv__builtin_expect(__ret_warn_on___16 != 0, 0L); if (tmp___23 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9156, "PCH PWM1 enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___16 != 0, 0L); } else { drm_err("PCH PWM1 enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___15 != 0, 0L); tmp___25 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295936L, 1); __ret_warn_on___17 = (int )tmp___25 < 0; tmp___27 = ldv__builtin_expect(__ret_warn_on___17 != 0, 0L); if (tmp___27 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___18 = 1; tmp___26 = ldv__builtin_expect(__ret_warn_on___18 != 0, 0L); if (tmp___26 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9158, "Utility pin enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___18 != 0, 0L); } else { drm_err("Utility pin enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___17 != 0, 0L); tmp___28 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 946176L, 1); __ret_warn_on___19 = (int )tmp___28 < 0; tmp___30 = ldv__builtin_expect(__ret_warn_on___19 != 0, 0L); if (tmp___30 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___20 = 1; tmp___29 = ldv__builtin_expect(__ret_warn_on___20 != 0, 0L); if (tmp___29 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9159, "PCH GTC enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___20 != 0, 0L); } else { drm_err("PCH GTC enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___19 != 0, 0L); tmp___31 = intel_irqs_enabled(dev_priv); __ret_warn_on___21 = (int )tmp___31; tmp___33 = ldv__builtin_expect(__ret_warn_on___21 != 0, 0L); if (tmp___33 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___22 = 1; tmp___32 = ldv__builtin_expect(__ret_warn_on___22 != 0, 0L); if (tmp___32 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9167, "IRQs enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on___22 != 0, 0L); } else { drm_err("IRQs enabled\n"); } } else { } ldv__builtin_expect(__ret_warn_on___21 != 0, 0L); return; } } static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; uint32_t tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1335052L, 1); return (tmp); } else { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278276L, 1); return (tmp___0); } } } static void hsw_write_dcomp(struct drm_i915_private *dev_priv , uint32_t val ) { struct drm_device *dev ; int tmp ; struct drm_i915_private *__p ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { mutex_lock_nested(& dev_priv->rps.hw_lock, 0U); tmp = sandybridge_pcode_write(dev_priv, 17U, val); if (tmp != 0) { drm_err("Failed to write to D_COMP\n"); } else { } mutex_unlock(& dev_priv->rps.hw_lock); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278276L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278276L, 0); } return; } } static void hsw_disable_lcpll(struct drm_i915_private *dev_priv , bool switch_to_fclk , bool allow_power_down ) { uint32_t val ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; uint32_t tmp___1 ; unsigned long timeout_____0 ; unsigned long tmp___2 ; int ret_____0 ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; unsigned long timeout_____1 ; unsigned long tmp___6 ; int ret_____1 ; uint32_t tmp___7 ; bool tmp___8 ; uint32_t tmp___9 ; { assert_can_disable_lcpll(dev_priv); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((int )switch_to_fclk) { val = val | 2097152U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245248L, val, 1); tmp = msecs_to_jiffies(1U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_54227; ldv_54226: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((tmp___0 & 524288U) == 0U) { ret__ = -110; } else { } goto ldv_54225; } else { } cpu_relax(); ldv_54227: tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((tmp___1 & 524288U) == 0U) { goto ldv_54226; } else { } ldv_54225: ; if (ret__ != 0) { drm_err("Switching to FCLK failed\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); } else { } val = val | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245248L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 0); tmp___2 = msecs_to_jiffies(1U); timeout_____0 = (tmp___2 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_54239; ldv_54238: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((tmp___3 & 1073741824U) != 0U) { ret_____0 = -110; } else { } goto ldv_54237; } else { } tmp___4 = drm_can_sleep___5(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_54239: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((tmp___5 & 1073741824U) != 0U) { goto ldv_54238; } else { } ldv_54237: ; if (ret_____0 != 0) { drm_err("LCPLL still locked\n"); } else { } val = hsw_read_dcomp(dev_priv); val = val | 1U; hsw_write_dcomp(dev_priv, val); __const_udelay(500UL); tmp___6 = msecs_to_jiffies(1U); timeout_____1 = (tmp___6 + (unsigned long )jiffies) + 1UL; ret_____1 = 0; goto ldv_54251; ldv_54250: ; if ((long )(timeout_____1 - (unsigned long )jiffies) < 0L) { tmp___7 = hsw_read_dcomp(dev_priv); if ((tmp___7 & 512U) != 0U) { ret_____1 = -110; } else { } goto ldv_54249; } else { } tmp___8 = drm_can_sleep___5(); if ((int )tmp___8) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_54251: tmp___9 = hsw_read_dcomp(dev_priv); if ((tmp___9 & 512U) != 0U) { goto ldv_54250; } else { } ldv_54249: ; if (ret_____1 != 0) { drm_err("D_COMP RCOMP still in progress\n"); } else { } if ((int )allow_power_down) { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); val = val | 4194304U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245248L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 0); } else { } return; } } static void hsw_restore_lcpll(struct drm_i915_private *dev_priv ) { uint32_t val ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; unsigned long timeout_____0 ; unsigned long tmp___3 ; int ret_____0 ; uint32_t tmp___4 ; uint32_t tmp___5 ; { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((val & 3227516928U) == 1073741824U) { return; } else { } intel_uncore_forcewake_get(dev_priv, 7); if ((val & 4194304U) != 0U) { val = val & 4290772991U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245248L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 0); } else { } val = hsw_read_dcomp(dev_priv); val = val | 256U; val = val & 4294967294U; hsw_write_dcomp(dev_priv, val); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245248L, val, 1); tmp = msecs_to_jiffies(5U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_54267; ldv_54266: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((tmp___0 & 1073741824U) == 0U) { ret__ = -110; } else { } goto ldv_54265; } else { } tmp___1 = drm_can_sleep___5(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_54267: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((tmp___2 & 1073741824U) == 0U) { goto ldv_54266; } else { } ldv_54265: ; if (ret__ != 0) { drm_err("LCPLL not locked yet\n"); } else { } if ((val & 2097152U) != 0U) { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); val = val & 4292870143U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1245248L, val, 1); tmp___3 = msecs_to_jiffies(1U); timeout_____0 = (tmp___3 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_54279; ldv_54278: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((tmp___4 & 524288U) != 0U) { ret_____0 = -110; } else { } goto ldv_54277; } else { } cpu_relax(); ldv_54279: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); if ((tmp___5 & 524288U) != 0U) { goto ldv_54278; } else { } ldv_54277: ; if (ret_____0 != 0) { drm_err("Switching back to LCPLL failed\n"); } else { } } else { } intel_uncore_forcewake_put(dev_priv, 7); return; } } void hsw_enable_pc8(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; uint32_t val ; long tmp ; { dev = dev_priv->dev; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("hsw_enable_pc8", "Enabling package C8+\n"); } else { } if ((unsigned int )dev_priv->pch_id == 39936U) { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794656L, 1); val = val & 4294963199U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794656L, val, 1); } else { } lpt_disable_clkout_dp(dev); hsw_disable_lcpll(dev_priv, 1, 1); return; } } void hsw_disable_pc8(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; uint32_t val ; long tmp ; { dev = dev_priv->dev; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("hsw_disable_pc8", "Disabling package C8+\n"); } else { } hsw_restore_lcpll(dev_priv); lpt_init_pch_refclk(dev); if ((unsigned int )dev_priv->pch_id == 39936U) { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794656L, 1); val = val | 4096U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 794656L, val, 1); } else { } intel_prepare_ddi(dev); return; } } static void broxton_modeset_global_resources(struct drm_atomic_state *old_state ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int max_pixclk ; int tmp ; int req_cdclk ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; { dev = old_state->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_mode_max_pixclk(dev, (struct drm_atomic_state *)0); max_pixclk = tmp; __ret_warn_on = max_pixclk < 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9366, "WARN_ON(max_pixclk < 0)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return; } else { } req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk); if ((unsigned int )req_cdclk != dev_priv->cdclk_freq) { broxton_set_cdclk(dev, req_cdclk); } else { } return; } } static int haswell_crtc_compute_clock(struct intel_crtc *crtc , struct intel_crtc_state *crtc_state ) { bool tmp ; int tmp___0 ; { tmp = intel_ddi_pll_select(crtc, crtc_state); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-22); } else { } crtc->lowfreq_avail = 0; return (0); } } static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv , enum port port , struct intel_crtc_state *pipe_config ) { { switch ((unsigned int )port) { case 0U: pipe_config->ddi_pll_sel = 0U; pipe_config->shared_dpll = 0; goto ldv_54312; case 1U: pipe_config->ddi_pll_sel = 1U; pipe_config->shared_dpll = 1; goto ldv_54312; case 2U: pipe_config->ddi_pll_sel = 2U; pipe_config->shared_dpll = 2; goto ldv_54312; default: drm_err("Incorrect port type\n"); } ldv_54312: ; return; } } static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv , enum port port , struct intel_crtc_state *pipe_config ) { u32 temp ; u32 dpll_ctl1 ; uint32_t tmp ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442460L, 1); temp = tmp & (uint32_t )(3 << (int )((unsigned int )port * 3U + 1U)); pipe_config->ddi_pll_sel = temp >> (int )((unsigned int )port * 3U + 1U); switch (pipe_config->ddi_pll_sel) { case 0U: dpll_ctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 1); pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 63U; goto ldv_54324; case 1U: pipe_config->shared_dpll = 0; goto ldv_54324; case 2U: pipe_config->shared_dpll = 1; goto ldv_54324; case 3U: pipe_config->shared_dpll = 2; goto ldv_54324; } ldv_54324: ; return; } } static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv , enum port port , struct intel_crtc_state *pipe_config ) { { pipe_config->ddi_pll_sel = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 71744U) * 4U), 1); switch (pipe_config->ddi_pll_sel) { case 2147483648U: pipe_config->shared_dpll = 0; goto ldv_54334; case 2684354560U: pipe_config->shared_dpll = 1; goto ldv_54334; } ldv_54334: ; return; } } static void haswell_get_ddi_port_state(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_shared_dpll *pll ; enum port port ; uint32_t tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int __ret_warn_on ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; struct drm_i915_private *__p___2 ; uint32_t tmp___3 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )pipe_config->cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), 1); port = (enum port )((tmp & 1879048192U) >> 28); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { skylake_get_ddi_pll(dev_priv, port, pipe_config); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { bxt_get_ddi_pll(dev_priv, port, pipe_config); } else { haswell_get_ddi_pll(dev_priv, port, pipe_config); } } else { haswell_get_ddi_pll(dev_priv, port, pipe_config); } } if ((int )pipe_config->shared_dpll >= 0) { pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )pipe_config->shared_dpll; tmp___0 = (*(pll->get_hw_state))(dev_priv, pll, & pipe_config->dpll_hw_state); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9479, "WARN_ON(!pll->get_hw_state(dev_priv, pll, &pipe_config->dpll_hw_state))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 8U && (unsigned int )port == 4U) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983048L, 1); if ((int )tmp___3 < 0) { pipe_config->has_pch_encoder = 1; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983052L, 1); pipe_config->fdi_lanes = (int )(((tmp & 3670016U) >> 19) + 1U); ironlake_get_fdi_m_n_config(crtc, pipe_config); } else { } } else { } return; } } static bool haswell_get_pipe_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum intel_display_power_domain pfit_domain ; uint32_t tmp ; bool tmp___0 ; int tmp___1 ; enum pipe trans_edp_pipe ; int __ret_warn_on ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int __ret_warn_on___0 ; struct drm_i915_private *__p___1 ; long tmp___5 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; bool tmp___6 ; bool tmp___7 ; uint32_t tmp___8 ; int tmp___9 ; struct drm_i915_private *__p___4 ; uint32_t tmp___10 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = intel_display_power_is_enabled(dev_priv, (enum intel_display_power_domain )crtc->pipe); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } pipe_config->cpu_transcoder = (enum transcoder )crtc->pipe; pipe_config->shared_dpll = -1; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[3] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), 1); if ((int )tmp < 0) { switch (tmp & 28672U) { default: __ret_warn_on = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9519, "unknown pipe linked to edp transcoder\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); case 16384U: ; case 0U: trans_edp_pipe = 0; goto ldv_54385; case 20480U: trans_edp_pipe = 1; goto ldv_54385; case 24576U: trans_edp_pipe = 2; goto ldv_54385; } ldv_54385: ; if ((int )crtc->pipe == (int )trans_edp_pipe) { pipe_config->cpu_transcoder = 3; } else { } } else { } tmp___3 = intel_display_power_is_enabled(dev_priv, (unsigned int )pipe_config->cpu_transcoder != 3U ? (enum intel_display_power_domain )((unsigned int )pipe_config->cpu_transcoder + 6U) : 9); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return (0); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )pipe_config->cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); if ((int )tmp >= 0) { return (0); } else { } haswell_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { skl_init_scalers(dev, crtc, pipe_config); } else { } pfit_domain = (enum intel_display_power_domain )((int )crtc->pipe + 3); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 8U) { pipe_config->scaler_state.scaler_id = -1; pipe_config->scaler_state.scaler_users = pipe_config->scaler_state.scaler_users & 2147483647U; } else { } tmp___6 = intel_display_power_is_enabled(dev_priv, pfit_domain); if ((int )tmp___6) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { skylake_get_pfit_config(crtc, pipe_config); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 8U) { ironlake_get_pfit_config(crtc, pipe_config); } else { __ret_warn_on___0 = 1; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { __p___1 = to_i915((struct drm_device const *)dev); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9565, "Missing switch case (%lu) in %s\n", (long )__p___1->info.gen, "haswell_get_pipe_config"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } } } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { tmp___7 = hsw_crtc_supports_ips(crtc); if ((int )tmp___7) { tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 275464L, 1); if ((int )tmp___8 < 0) { tmp___9 = 1; } else { tmp___9 = 0; } } else { tmp___9 = 0; } pipe_config->ips_enabled = (bool )tmp___9; } else { } if ((unsigned int )pipe_config->cpu_transcoder != 3U) { tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )pipe_config->cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393260U), 1); pipe_config->pixel_multiplier = tmp___10 + 1U; } else { pipe_config->pixel_multiplier = 1U; } return (1); } } static void i845_update_cursor(struct drm_crtc *crtc , u32 base ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; uint32_t cntl ; uint32_t size ; unsigned int width ; unsigned int height ; unsigned int stride ; unsigned long tmp ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; int tmp___3 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; cntl = 0U; size = 0U; if (base != 0U) { width = ((intel_crtc->base.cursor)->state)->crtc_w; height = ((intel_crtc->base.cursor)->state)->crtc_h; tmp = __roundup_pow_of_two((unsigned long )width); stride = (unsigned int )tmp * 4U; switch (stride) { default: __ret_warn_once = 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9597, "Invalid cursor width/stride, width=%u, stride=%u\n", width, stride); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); stride = 256U; case 256U: ; case 512U: ; case 1024U: ; case 2048U: ; goto ldv_54451; } ldv_54451: tmp___3 = ffs((int )stride); cntl = ((uint32_t )((tmp___3 + -9) << 28) | cntl) | 3288334336U; size = (height << 12) | width; } else { } if (intel_crtc->cursor_cntl != 0U && ((intel_crtc->cursor_base != base || intel_crtc->cursor_size != size) || intel_crtc->cursor_cntl != cntl)) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 458880L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 458880L, 0); intel_crtc->cursor_cntl = 0U; } else { } if (intel_crtc->cursor_base != base) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 458884L, base, 1); intel_crtc->cursor_base = base; } else { } if (intel_crtc->cursor_size != size) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 458912L, size, 1); intel_crtc->cursor_size = size; } else { } if (intel_crtc->cursor_cntl != cntl) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 458880L, cntl, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 458880L, 0); intel_crtc->cursor_cntl = cntl; } else { } return; } } static void i9xx_update_cursor(struct drm_crtc *crtc , u32 base ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; uint32_t cntl ; int __ret_warn_on ; long tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; cntl = 0U; if (base != 0U) { cntl = 67108864U; switch (((intel_crtc->base.cursor)->state)->crtc_w) { case 64U: cntl = cntl | 39U; goto ldv_54464; case 128U: cntl = cntl | 34U; goto ldv_54464; case 256U: cntl = cntl | 35U; goto ldv_54464; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 9666, "Missing switch case (%lu) in %s\n", (long )((intel_crtc->base.cursor)->state)->crtc_w, "i9xx_update_cursor"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } ldv_54464: cntl = (uint32_t )(pipe << 28) | cntl; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { cntl = cntl | 16777216U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { cntl = cntl | 16777216U; } else { } } else { } } } else { } if (((crtc->cursor)->state)->rotation == 4U) { cntl = cntl | 32768U; } else { } if (intel_crtc->cursor_cntl != cntl) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458880U), cntl, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458880U), 0); intel_crtc->cursor_cntl = cntl; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458884U), base, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458884U), 0); intel_crtc->cursor_base = base; return; } } static void intel_crtc_update_cursor(struct drm_crtc *crtc , bool on ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int pipe ; int x ; int y ; u32 base ; u32 pos ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; x = crtc->cursor_x; y = crtc->cursor_y; base = 0U; pos = 0U; if ((int )on) { base = intel_crtc->cursor_addr; } else { } if ((intel_crtc->config)->pipe_src_w <= x) { base = 0U; } else { } if ((intel_crtc->config)->pipe_src_h <= y) { base = 0U; } else { } if (x < 0) { if (((intel_crtc->base.cursor)->state)->crtc_w + (uint32_t )x == 0U) { base = 0U; } else { } pos = pos | 32768U; x = - x; } else { } pos = pos | (u32 )x; if (y < 0) { if (((intel_crtc->base.cursor)->state)->crtc_h + (uint32_t )y == 0U) { base = 0U; } else { } pos = pos | 2147483648U; y = - y; } else { } pos = (u32 )(y << 16) | pos; if (base == 0U && intel_crtc->cursor_base == 0U) { return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[pipe] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458888U), pos, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { _L: /* CIL Label */ if (((crtc->cursor)->state)->rotation == 4U) { base = (((intel_crtc->base.cursor)->state)->crtc_h * ((intel_crtc->base.cursor)->state)->crtc_w + 1073741823U) * 4U + base; } else { } } else { } } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) == 9570U) { i845_update_cursor(crtc, base); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) == 9586U) { i845_update_cursor(crtc, base); } else { i9xx_update_cursor(crtc, base); } } return; } } static bool cursor_size_ok(struct drm_device *dev , uint32_t width , uint32_t height ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { if (width == 0U || height == 0U) { return (0); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) == 9570U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) == 9586U) { _L: /* CIL Label */ if ((width & 63U) != 0U) { return (0); } else { } __p = to_i915((struct drm_device const *)dev); if (((unsigned int )((unsigned short )__p->info.device_id) == 9570U ? 64U : 512U) < width) { return (0); } else { } if (height > 1023U) { return (0); } else { } } else { switch (width | height) { case 256U: ; case 128U: __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 2U) { return (0); } else { } case 64U: ; goto ldv_54559; default: ; return (0); } ldv_54559: ; } } return (1); } } static void intel_crtc_gamma_set(struct drm_crtc *crtc , u16 *red , u16 *green , u16 *blue , uint32_t start , uint32_t size ) { int end ; int i ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; { end = (int )(256U < start + size ? 256U : start + size); __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; i = (int )start; goto ldv_54575; ldv_54574: intel_crtc->lut_r[i] = (u8 )((int )*(red + (unsigned long )i) >> 8); intel_crtc->lut_g[i] = (u8 )((int )*(green + (unsigned long )i) >> 8); intel_crtc->lut_b[i] = (u8 )((int )*(blue + (unsigned long )i) >> 8); i = i + 1; ldv_54575: ; if (i < end) { goto ldv_54574; } else { } intel_crtc_load_lut(crtc); return; } } static struct drm_display_mode load_detect_mode = {{0, 0}, {0U, 3739147998U, 0}, {'6', '4', '0', 'x', '4', '8', '0', '\000'}, 0, 16U, 31500, 640, 664, 704, 832, 0, 480, 489, 491, 520, 0, 10U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; struct drm_framebuffer *__intel_framebuffer_create(struct drm_device *dev , struct drm_mode_fb_cmd2 *mode_cmd , struct drm_i915_gem_object *obj ) { struct intel_framebuffer *intel_fb ; int ret ; void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = kzalloc(176UL, 208U); intel_fb = (struct intel_framebuffer *)tmp; if ((unsigned long )intel_fb == (unsigned long )((struct intel_framebuffer *)0)) { drm_gem_object_unreference___12(& obj->base); tmp___0 = ERR_PTR(-12L); return ((struct drm_framebuffer *)tmp___0); } else { } ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret != 0) { goto err; } else { } return (& intel_fb->base); err: drm_gem_object_unreference___12(& obj->base); kfree((void const *)intel_fb); tmp___1 = ERR_PTR((long )ret); return ((struct drm_framebuffer *)tmp___1); } } static struct drm_framebuffer *intel_framebuffer_create(struct drm_device *dev , struct drm_mode_fb_cmd2 *mode_cmd , struct drm_i915_gem_object *obj ) { struct drm_framebuffer *fb ; int ret ; void *tmp ; { ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { tmp = ERR_PTR((long )ret); return ((struct drm_framebuffer *)tmp); } else { } fb = __intel_framebuffer_create(dev, mode_cmd, obj); mutex_unlock(& dev->struct_mutex); return (fb); } } static u32 intel_framebuffer_pitch_for_width(int width , int bpp ) { u32 pitch ; { pitch = (u32 )((width * bpp + 7) / 8); return ((pitch + 63U) & 4294967232U); } } static u32 intel_framebuffer_size_for_mode(struct drm_display_mode *mode , int bpp ) { u32 pitch ; u32 tmp ; { tmp = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); pitch = tmp; return (((u32 )mode->vdisplay * pitch + 4095U) & 4294963200U); } } static struct drm_framebuffer *intel_framebuffer_create_for_mode(struct drm_device *dev , struct drm_display_mode *mode , int depth , int bpp ) { struct drm_i915_gem_object *obj ; struct drm_mode_fb_cmd2 mode_cmd ; u32 tmp ; void *tmp___0 ; struct drm_framebuffer *tmp___1 ; { mode_cmd.fb_id = 0U; mode_cmd.width = 0U; mode_cmd.height = 0U; mode_cmd.pixel_format = 0U; mode_cmd.flags = 0U; mode_cmd.handles[0] = 0U; mode_cmd.handles[1] = 0U; mode_cmd.handles[2] = 0U; mode_cmd.handles[3] = 0U; mode_cmd.pitches[0] = 0U; mode_cmd.pitches[1] = 0U; mode_cmd.pitches[2] = 0U; mode_cmd.pitches[3] = 0U; mode_cmd.offsets[0] = 0U; mode_cmd.offsets[1] = 0U; mode_cmd.offsets[2] = 0U; mode_cmd.offsets[3] = 0U; mode_cmd.modifier[0] = 0ULL; mode_cmd.modifier[1] = 0ULL; mode_cmd.modifier[2] = 0ULL; mode_cmd.modifier[3] = 0ULL; tmp = intel_framebuffer_size_for_mode(mode, bpp); obj = i915_gem_alloc_object(dev, (size_t )tmp); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___0 = ERR_PTR(-12L); return ((struct drm_framebuffer *)tmp___0); } else { } mode_cmd.width = (__u32 )mode->hdisplay; mode_cmd.height = (__u32 )mode->vdisplay; mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width((int )mode_cmd.width, bpp); mode_cmd.pixel_format = drm_mode_legacy_fb_format((uint32_t )bpp, (uint32_t )depth); tmp___1 = intel_framebuffer_create(dev, & mode_cmd, obj); return (tmp___1); } } static struct drm_framebuffer *mode_fits_in_fbdev(struct drm_device *dev , struct drm_display_mode *mode ) { struct drm_i915_private *dev_priv ; struct drm_i915_gem_object *obj ; struct drm_framebuffer *fb ; long tmp ; u32 tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->fbdev == (unsigned long )((struct intel_fbdev *)0)) { return ((struct drm_framebuffer *)0); } else { } if ((unsigned long )(dev_priv->fbdev)->fb == (unsigned long )((struct intel_framebuffer *)0)) { return ((struct drm_framebuffer *)0); } else { } obj = ((dev_priv->fbdev)->fb)->obj; tmp = ldv__builtin_expect((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (9901), "i" (12UL)); ldv_54618: ; goto ldv_54618; } else { } fb = & ((dev_priv->fbdev)->fb)->base; tmp___0 = intel_framebuffer_pitch_for_width(mode->hdisplay, fb->bits_per_pixel); if (fb->pitches[0] < tmp___0) { return ((struct drm_framebuffer *)0); } else { } if (obj->base.size < (size_t )((unsigned int )mode->vdisplay * fb->pitches[0])) { return ((struct drm_framebuffer *)0); } else { } return (fb); } } static int intel_modeset_setup_plane_state(struct drm_atomic_state *state , struct drm_crtc *crtc , struct drm_display_mode *mode , struct drm_framebuffer *fb , int x , int y ) { struct drm_plane_state *plane_state ; int hdisplay ; int vdisplay ; int ret ; long tmp ; bool tmp___0 ; { plane_state = drm_atomic_get_plane_state(state, crtc->primary); tmp___0 = IS_ERR((void const *)plane_state); if ((int )tmp___0) { tmp = PTR_ERR((void const *)plane_state); return ((int )tmp); } else { } if ((unsigned long )mode != (unsigned long )((struct drm_display_mode *)0)) { drm_crtc_get_hv_timing((struct drm_display_mode const *)mode, & hdisplay, & vdisplay); } else { vdisplay = 0; hdisplay = vdisplay; } ret = drm_atomic_set_crtc_for_plane(plane_state, (unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0) ? crtc : (struct drm_crtc *)0); if (ret != 0) { return (ret); } else { } drm_atomic_set_fb_for_plane(plane_state, fb); plane_state->crtc_x = 0; plane_state->crtc_y = 0; plane_state->crtc_w = (uint32_t )hdisplay; plane_state->crtc_h = (uint32_t )vdisplay; plane_state->src_x = (uint32_t )(x << 16); plane_state->src_y = (uint32_t )(y << 16); plane_state->src_w = (uint32_t )(hdisplay << 16); plane_state->src_h = (uint32_t )(vdisplay << 16); return (0); } } bool intel_get_load_detect_pipe(struct drm_connector *connector , struct drm_display_mode *mode , struct intel_load_detect_pipe *old , struct drm_modeset_acquire_ctx *ctx ) { struct intel_crtc *intel_crtc ; struct intel_encoder *intel_encoder ; struct intel_encoder *tmp ; struct drm_crtc *possible_crtc ; struct drm_encoder *encoder ; struct drm_crtc *crtc ; struct drm_device *dev ; struct drm_framebuffer *fb ; struct drm_mode_config *config ; struct drm_atomic_state *state ; struct drm_connector_state *connector_state ; struct intel_crtc_state *crtc_state ; int ret ; int i ; long tmp___0 ; struct list_head const *__mptr ; struct drm_crtc const *__mptr___0 ; struct list_head const *__mptr___1 ; long tmp___1 ; struct drm_crtc const *__mptr___2 ; struct drm_connector const *__mptr___3 ; struct drm_crtc const *__mptr___4 ; long tmp___2 ; bool tmp___3 ; long tmp___4 ; bool tmp___5 ; bool tmp___6 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; bool tmp___10 ; long tmp___11 ; int tmp___12 ; { tmp = intel_attached_encoder(connector); intel_encoder = tmp; encoder = & intel_encoder->base; crtc = (struct drm_crtc *)0; dev = encoder->dev; config = & dev->mode_config; state = (struct drm_atomic_state *)0; i = -1; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_get_load_detect_pipe", "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, encoder->base.id, encoder->name); } else { } retry: ret = drm_modeset_lock(& config->connection_mutex, ctx); if (ret != 0) { goto fail_unlock; } else { } if ((unsigned long )encoder->crtc != (unsigned long )((struct drm_crtc *)0)) { crtc = encoder->crtc; ret = drm_modeset_lock(& crtc->mutex, ctx); if (ret != 0) { goto fail_unlock; } else { } ret = drm_modeset_lock(& (crtc->primary)->mutex, ctx); if (ret != 0) { goto fail_unlock; } else { } old->dpms_mode = connector->dpms; old->load_detect_temp = 0; if (connector->dpms != 0) { (*((connector->funcs)->dpms))(connector, 0); } else { } return (1); } else { } __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; possible_crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_54662; ldv_54661: i = i + 1; if ((encoder->possible_crtcs & (uint32_t )(1 << i)) == 0U) { goto ldv_54657; } else { } if ((int )(possible_crtc->state)->enable) { goto ldv_54657; } else { } __mptr___0 = (struct drm_crtc const *)possible_crtc; if ((int )((struct intel_crtc *)__mptr___0)->new_enabled) { goto ldv_54657; } else { } crtc = possible_crtc; goto ldv_54660; ldv_54657: __mptr___1 = (struct list_head const *)possible_crtc->head.next; possible_crtc = (struct drm_crtc *)__mptr___1 + 0xfffffffffffffff0UL; ldv_54662: ; if ((unsigned long )(& possible_crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_54661; } else { } ldv_54660: ; if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_get_load_detect_pipe", "no pipe available for load-detect\n"); } else { } goto fail_unlock; } else { } ret = drm_modeset_lock(& crtc->mutex, ctx); if (ret != 0) { goto fail_unlock; } else { } ret = drm_modeset_lock(& (crtc->primary)->mutex, ctx); if (ret != 0) { goto fail_unlock; } else { } __mptr___2 = (struct drm_crtc const *)crtc; intel_encoder->new_crtc = (struct intel_crtc *)__mptr___2; __mptr___3 = (struct drm_connector const *)connector; ((struct intel_connector *)__mptr___3)->new_encoder = intel_encoder; __mptr___4 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___4; intel_crtc->new_enabled = 1; old->dpms_mode = connector->dpms; old->load_detect_temp = 1; old->release_fb = (struct drm_framebuffer *)0; state = drm_atomic_state_alloc(dev); if ((unsigned long )state == (unsigned long )((struct drm_atomic_state *)0)) { return (0); } else { } state->acquire_ctx = ctx; connector_state = drm_atomic_get_connector_state(state, connector); tmp___3 = IS_ERR((void const *)connector_state); if ((int )tmp___3) { tmp___2 = PTR_ERR((void const *)connector_state); ret = (int )tmp___2; goto fail; } else { } connector_state->crtc = crtc; connector_state->best_encoder = & intel_encoder->base; crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); tmp___5 = IS_ERR((void const *)crtc_state); if ((int )tmp___5) { tmp___4 = PTR_ERR((void const *)crtc_state); ret = (int )tmp___4; goto fail; } else { } tmp___6 = 1; crtc_state->base.enable = tmp___6; crtc_state->base.active = tmp___6; if ((unsigned long )mode == (unsigned long )((struct drm_display_mode *)0)) { mode = & load_detect_mode; } else { } fb = mode_fits_in_fbdev(dev, mode); if ((unsigned long )fb == (unsigned long )((struct drm_framebuffer *)0)) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_get_load_detect_pipe", "creating tmp fb for load-detection\n"); } else { } fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); old->release_fb = fb; } else { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_get_load_detect_pipe", "reusing fbdev for load-detection framebuffer\n"); } else { } } tmp___10 = IS_ERR((void const *)fb); if ((int )tmp___10) { tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("intel_get_load_detect_pipe", "failed to allocate framebuffer for load-detection\n"); } else { } goto fail; } else { } ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); if (ret != 0) { goto fail; } else { } drm_mode_copy(& crtc_state->base.mode, (struct drm_display_mode const *)mode); tmp___12 = intel_set_mode(crtc, state, 1); if (tmp___12 != 0) { tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("intel_get_load_detect_pipe", "failed to set mode on load-detect pipe\n"); } else { } if ((unsigned long )old->release_fb != (unsigned long )((struct drm_framebuffer *)0)) { (*(((old->release_fb)->funcs)->destroy))(old->release_fb); } else { } goto fail; } else { } (crtc->primary)->crtc = crtc; intel_wait_for_vblank(dev, (int )intel_crtc->pipe); return (1); fail: intel_crtc->new_enabled = (crtc->state)->enable; fail_unlock: drm_atomic_state_free(state); state = (struct drm_atomic_state *)0; if (ret == -35) { drm_modeset_backoff(ctx); goto retry; } else { } return (0); } } void intel_release_load_detect_pipe(struct drm_connector *connector , struct intel_load_detect_pipe *old , struct drm_modeset_acquire_ctx *ctx ) { struct drm_device *dev ; struct intel_encoder *intel_encoder ; struct intel_encoder *tmp ; struct drm_encoder *encoder ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_atomic_state *state ; struct drm_connector_state *connector_state ; struct intel_crtc_state *crtc_state ; int ret ; long tmp___0 ; bool tmp___1 ; bool tmp___2 ; struct drm_connector const *__mptr___0 ; bool tmp___3 ; long tmp___4 ; { dev = connector->dev; tmp = intel_attached_encoder(connector); intel_encoder = tmp; encoder = & intel_encoder->base; crtc = encoder->crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_release_load_detect_pipe", "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, encoder->base.id, encoder->name); } else { } if ((int )old->load_detect_temp) { state = drm_atomic_state_alloc(dev); if ((unsigned long )state == (unsigned long )((struct drm_atomic_state *)0)) { goto fail; } else { } state->acquire_ctx = ctx; connector_state = drm_atomic_get_connector_state(state, connector); tmp___1 = IS_ERR((void const *)connector_state); if ((int )tmp___1) { goto fail; } else { } crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); tmp___2 = IS_ERR((void const *)crtc_state); if ((int )tmp___2) { goto fail; } else { } __mptr___0 = (struct drm_connector const *)connector; ((struct intel_connector *)__mptr___0)->new_encoder = (struct intel_encoder *)0; intel_encoder->new_crtc = (struct intel_crtc *)0; intel_crtc->new_enabled = 0; connector_state->best_encoder = (struct drm_encoder *)0; connector_state->crtc = (struct drm_crtc *)0; tmp___3 = 0; crtc_state->base.active = tmp___3; crtc_state->base.enable = tmp___3; ret = intel_modeset_setup_plane_state(state, crtc, (struct drm_display_mode *)0, (struct drm_framebuffer *)0, 0, 0); if (ret != 0) { goto fail; } else { } ret = intel_set_mode(crtc, state, 1); if (ret != 0) { goto fail; } else { } if ((unsigned long )old->release_fb != (unsigned long )((struct drm_framebuffer *)0)) { drm_framebuffer_unregister_private(old->release_fb); drm_framebuffer_unreference(old->release_fb); } else { } return; } else { } if (old->dpms_mode != 0) { (*((connector->funcs)->dpms))(connector, old->dpms_mode); } else { } return; fail: tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_release_load_detect_pipe", "Couldn\'t release load detect pipe.\n"); } else { } drm_atomic_state_free(state); return; } } static int i9xx_pll_refclk(struct drm_device *dev , struct intel_crtc_state const *pipe_config ) { struct drm_i915_private *dev_priv ; u32 dpll ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dpll = pipe_config->dpll_hw_state.dpll; if ((dpll & 24576U) == 24576U) { return (dev_priv->vbt.lvds_ssc_freq); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 0U) { return (120000); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { return (96000); } else { return (48000); } } } } } static void i9xx_crtc_clock_get(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int pipe ; u32 dpll ; u32 fp ; intel_clock_t clock ; int refclk ; int tmp ; int tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; long tmp___1 ; struct drm_i915_private *__p___1 ; u32 lvds ; uint32_t tmp___2 ; uint32_t tmp___3 ; struct drm_i915_private *__p___3 ; bool is_lvds ; struct drm_i915_private *__p___4 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = (int )pipe_config->cpu_transcoder; dpll = pipe_config->dpll_hw_state.dpll; tmp = i9xx_pll_refclk(dev, (struct intel_crtc_state const *)pipe_config); refclk = tmp; if ((dpll & 256U) == 0U) { fp = pipe_config->dpll_hw_state.fp0; } else { fp = pipe_config->dpll_hw_state.fp1; } clock.m1 = (int )((fp & 16128U) >> 8); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { tmp___0 = ffs((int )((fp & 16711680U) >> 16)); clock.n = tmp___0 + -1; clock.m2 = (int )fp & 255; } else { clock.n = (int )((fp & 4128768U) >> 16); clock.m2 = (int )fp & 63; } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) != 2U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { clock.p1 = ffs((int )((dpll & 16744448U) >> 15)); } else { clock.p1 = ffs((int )((dpll & 16711680U) >> 16)); } switch (dpll & 201326592U) { case 67108864U: clock.p2 = (dpll & 16777216U) != 0U ? 5 : 10; goto ldv_54738; case 134217728U: clock.p2 = (dpll & 16777216U) != 0U ? 7 : 14; goto ldv_54738; default: tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("i9xx_crtc_clock_get", "Unknown DPLL mode %08x in programmed mode\n", (int )dpll & 201326592); } else { } return; } ldv_54738: __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { pineview_clock(refclk, & clock); } else { i9xx_clock(refclk, & clock); } } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___3->info.device_id) != 13687U) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397696L, 1); tmp___3 = tmp___2; } else { tmp___3 = 0U; } lvds = tmp___3; is_lvds = (bool )(pipe == 1 && (int )lvds < 0); if ((int )is_lvds) { clock.p1 = ffs((int )((dpll & 4128768U) >> 16)); if ((lvds & 48U) != 0U) { clock.p2 = 7; } else { clock.p2 = 14; } } else { if ((dpll & 2097152U) != 0U) { clock.p1 = 2; } else { clock.p1 = (int )(((dpll & 2031616U) >> 16) + 2U); } if ((dpll & 8388608U) != 0U) { clock.p2 = 4; } else { clock.p2 = 2; } } i9xx_clock(refclk, & clock); } pipe_config->port_clock = clock.dot; return; } } int intel_dotclock_calculate(int link_freq , struct intel_link_m_n const *m_n ) { u64 tmp ; { if ((unsigned int )m_n->link_n == 0U) { return (0); } else { } tmp = div_u64((unsigned long long )m_n->link_m * (unsigned long long )link_freq, m_n->link_n); return ((int )tmp); } } static void ironlake_pch_clock_get(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; u32 tmp ; { dev = crtc->base.dev; i9xx_crtc_clock_get(crtc, pipe_config); tmp = intel_fdi_link_freq(dev); pipe_config->base.adjusted_mode.crtc_clock = intel_dotclock_calculate((int )(tmp * 10000U), (struct intel_link_m_n const *)(& pipe_config->fdi_m_n)); return; } } struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev , struct drm_crtc *crtc ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum transcoder cpu_transcoder ; struct drm_display_mode *mode ; struct intel_crtc_state pipe_config ; int htot ; uint32_t tmp ; int hsync ; uint32_t tmp___0 ; int vtot ; uint32_t tmp___1 ; int vsync ; uint32_t tmp___2 ; enum pipe pipe ; void *tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393216U), 1); htot = (int )tmp; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393224U), 1); hsync = (int )tmp___0; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393228U), 1); vtot = (int )tmp___1; tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393236U), 1); vsync = (int )tmp___2; pipe = intel_crtc->pipe; tmp___3 = kzalloc(208UL, 208U); mode = (struct drm_display_mode *)tmp___3; if ((unsigned long )mode == (unsigned long )((struct drm_display_mode *)0)) { return ((struct drm_display_mode *)0); } else { } pipe_config.cpu_transcoder = (enum transcoder )pipe; pipe_config.pixel_multiplier = 1U; pipe_config.dpll_hw_state.dpll = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); pipe_config.dpll_hw_state.fp0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe + 3080) * 8), 1); pipe_config.dpll_hw_state.fp1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 8 + 24644), 1); i9xx_crtc_clock_get(intel_crtc, & pipe_config); mode->clock = (int )((unsigned int )pipe_config.port_clock / pipe_config.pixel_multiplier); mode->hdisplay = (htot & 65535) + 1; mode->htotal = (int )(((unsigned int )htot >> 16) + 1U); mode->hsync_start = (hsync & 65535) + 1; mode->hsync_end = (int )(((unsigned int )hsync >> 16) + 1U); mode->vdisplay = (vtot & 65535) + 1; mode->vtotal = (int )(((unsigned int )vtot >> 16) + 1U); mode->vsync_start = (vsync & 65535) + 1; mode->vsync_end = (int )(((unsigned int )vsync >> 16) + 1U); drm_mode_set_name(mode); return (mode); } } static void intel_decrease_pllclock(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int pipe ; int dpll_reg ; int dpll ; long tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; long tmp___2 ; struct drm_i915_private *__p___1 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { return; } else { } } else { } if (! dev_priv->lvds_downclock_avail) { return; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 46UL) == 0U && (int )intel_crtc->lowfreq_avail) { pipe = intel_crtc->pipe; dpll_reg = (int )(pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : (pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)); tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_decrease_pllclock", "downclocking LVDS\n"); } else { } assert_panel_unlocked(dev_priv, (enum pipe )pipe); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dpll_reg, 1); dpll = (int )tmp___0; dpll = dpll | 256; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )dpll_reg, (uint32_t )dpll, 1); intel_wait_for_vblank(dev, pipe); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dpll_reg, 1); dpll = (int )tmp___1; if ((dpll & 256) == 0) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_decrease_pllclock", "failed to downclock LVDS!\n"); } else { } } else { } } else { } return; } } void intel_mark_busy(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((int )dev_priv->mm.busy) { return; } else { } intel_runtime_pm_get(dev_priv); i915_update_gfx_val(dev_priv); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { gen6_rps_busy(dev_priv); } else { } dev_priv->mm.busy = 1; return; } } void intel_mark_idle(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (! dev_priv->mm.busy) { return; } else { } dev_priv->mm.busy = 0; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_54832; ldv_54831: ; if ((unsigned long )(crtc->primary)->fb == (unsigned long )((struct drm_framebuffer *)0)) { goto ldv_54830; } else { } intel_decrease_pllclock(crtc); ldv_54830: __mptr___0 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_54832: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_54831; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 5U) { gen6_rps_idle((struct drm_i915_private *)dev->dev_private); } else { } intel_runtime_pm_put(dev_priv); return; } } static void intel_crtc_destroy(struct drm_crtc *crtc ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; struct intel_unpin_work *work ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; dev = crtc->dev; spin_lock_irq(& dev->event_lock); work = intel_crtc->unpin_work; intel_crtc->unpin_work = (struct intel_unpin_work *)0; spin_unlock_irq(& dev->event_lock); if ((unsigned long )work != (unsigned long )((struct intel_unpin_work *)0)) { ldv_cancel_work_sync_550(& work->work); kfree((void const *)work); } else { } drm_crtc_cleanup(crtc); kfree((void const *)intel_crtc); return; } } static void intel_unpin_work_fn(struct work_struct *__work ) { struct intel_unpin_work *work ; struct work_struct const *__mptr ; struct drm_device *dev ; enum pipe pipe ; struct drm_crtc const *__mptr___0 ; struct drm_crtc const *__mptr___1 ; int tmp ; long tmp___0 ; struct drm_crtc const *__mptr___2 ; { __mptr = (struct work_struct const *)__work; work = (struct intel_unpin_work *)__mptr; dev = (work->crtc)->dev; __mptr___0 = (struct drm_crtc const *)work->crtc; pipe = ((struct intel_crtc *)__mptr___0)->pipe; mutex_lock_nested(& dev->struct_mutex, 0U); intel_unpin_fb_obj(work->old_fb, (struct drm_plane_state const *)((work->crtc)->primary)->state); drm_gem_object_unreference___12(& (work->pending_flip_obj)->base); intel_fbc_update(dev); if ((unsigned long )work->flip_queued_req != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_assign___3(& work->flip_queued_req, (struct drm_i915_gem_request *)0); } else { } mutex_unlock(& dev->struct_mutex); intel_frontbuffer_flip_complete(dev, (unsigned int )(1 << (int )pipe * 4)); drm_framebuffer_unreference(work->old_fb); __mptr___1 = (struct drm_crtc const *)work->crtc; tmp = atomic_read((atomic_t const *)(& ((struct intel_crtc *)__mptr___1)->unpin_work_count)); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (10501), "i" (12UL)); ldv_54860: ; goto ldv_54860; } else { } __mptr___2 = (struct drm_crtc const *)work->crtc; atomic_dec(& ((struct intel_crtc *)__mptr___2)->unpin_work_count); kfree((void const *)work); return; } } static void do_intel_finish_page_flip(struct drm_device *dev , struct drm_crtc *crtc ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_unpin_work *work ; unsigned long flags ; raw_spinlock_t *tmp ; int tmp___0 ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; if ((unsigned long )intel_crtc == (unsigned long )((struct intel_crtc *)0)) { return; } else { } tmp = spinlock_check(& dev->event_lock); flags = _raw_spin_lock_irqsave(tmp); work = intel_crtc->unpin_work; __asm__ volatile ("": : : "memory"); if ((unsigned long )work == (unsigned long )((struct intel_unpin_work *)0)) { spin_unlock_irqrestore(& dev->event_lock, flags); return; } else { tmp___0 = atomic_read((atomic_t const *)(& work->pending)); if (tmp___0 <= 1) { spin_unlock_irqrestore(& dev->event_lock, flags); return; } else { } } page_flip_completed(intel_crtc); spin_unlock_irqrestore(& dev->event_lock, flags); return; } } void intel_finish_page_flip(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; { dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = dev_priv->pipe_to_crtc_mapping[pipe]; do_intel_finish_page_flip(dev, crtc); return; } } void intel_finish_page_flip_plane(struct drm_device *dev , int plane ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; { dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = dev_priv->plane_to_crtc_mapping[plane]; do_intel_finish_page_flip(dev, crtc); return; } } static bool g4x_flip_count_after_eq(u32 a , u32 b ) { { return ((int )(a - b) >= 0); } } static bool page_flip_finished(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; bool tmp ; int tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; bool tmp___3 ; int tmp___4 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = i915_reset_in_progress(& dev_priv->gpu_error); if ((int )tmp) { return (1); } else { tmp___0 = atomic_read((atomic_t const *)(& dev_priv->gpu_error.reset_counter)); if (crtc->reset_counter != (unsigned int )tmp___0) { return (1); } else { } } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) == 0U) { return (1); } else { } } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )crtc->plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459180U), 1); if ((tmp___1 & 4294963200U) == (crtc->unpin_work)->gtt_offset) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )crtc->pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458820U), 1); tmp___3 = g4x_flip_count_after_eq(tmp___2, (crtc->unpin_work)->flip_count); if ((int )tmp___3) { tmp___4 = 1; } else { tmp___4 = 0; } } else { tmp___4 = 0; } return ((bool )tmp___4); } } void intel_prepare_page_flip(struct drm_device *dev , int plane ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; unsigned long flags ; raw_spinlock_t *tmp ; bool tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)dev_priv->plane_to_crtc_mapping[plane]; intel_crtc = (struct intel_crtc *)__mptr; tmp = spinlock_check(& dev->event_lock); flags = _raw_spin_lock_irqsave(tmp); if ((unsigned long )intel_crtc->unpin_work != (unsigned long )((struct intel_unpin_work *)0)) { tmp___0 = page_flip_finished(intel_crtc); if ((int )tmp___0) { atomic_add_unless___3(& (intel_crtc->unpin_work)->pending, 1, 0); } else { } } else { } spin_unlock_irqrestore(& dev->event_lock, flags); return; } } __inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc ) { { __asm__ volatile ("": : : "memory"); atomic_set(& (intel_crtc->unpin_work)->pending, 1); __asm__ volatile ("": : : "memory"); return; } } static int intel_gen2_queue_flip(struct drm_device *dev , struct drm_crtc *crtc , struct drm_framebuffer *fb , struct drm_i915_gem_object *obj , struct intel_engine_cs *ring , uint32_t flags ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; u32 flip_mask ; int ret ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } if ((unsigned int )intel_crtc->plane != 0U) { flip_mask = 64U; } else { flip_mask = 4U; } intel_ring_emit(ring, flip_mask | 25165824U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, ((unsigned int )intel_crtc->plane << 20) | 167772162U); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, (intel_crtc->unpin_work)->gtt_offset); intel_ring_emit(ring, 0U); intel_mark_page_flip_active(intel_crtc); __intel_ring_advance(ring); return (0); } } static int intel_gen3_queue_flip(struct drm_device *dev , struct drm_crtc *crtc , struct drm_framebuffer *fb , struct drm_i915_gem_object *obj , struct intel_engine_cs *ring , uint32_t flags ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; u32 flip_mask ; int ret ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } if ((unsigned int )intel_crtc->plane != 0U) { flip_mask = 64U; } else { flip_mask = 4U; } intel_ring_emit(ring, flip_mask | 25165824U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, ((unsigned int )intel_crtc->plane << 20) | 167772161U); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, (intel_crtc->unpin_work)->gtt_offset); intel_ring_emit(ring, 0U); intel_mark_page_flip_active(intel_crtc); __intel_ring_advance(ring); return (0); } } static int intel_gen4_queue_flip(struct drm_device *dev , struct drm_crtc *crtc , struct drm_framebuffer *fb , struct drm_i915_gem_object *obj , struct intel_engine_cs *ring , uint32_t flags ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; uint32_t pf ; uint32_t pipesrc ; int ret ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, ((unsigned int )intel_crtc->plane << 20) | 167772162U); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, (intel_crtc->unpin_work)->gtt_offset | (u32 )obj->tiling_mode); pf = 0U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )intel_crtc->pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393244U), 1); pipesrc = tmp & 268374015U; intel_ring_emit(ring, pf | pipesrc); intel_mark_page_flip_active(intel_crtc); __intel_ring_advance(ring); return (0); } } static int intel_gen6_queue_flip(struct drm_device *dev , struct drm_crtc *crtc , struct drm_framebuffer *fb , struct drm_i915_gem_object *obj , struct intel_engine_cs *ring , uint32_t flags ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; uint32_t pf ; uint32_t pipesrc ; int ret ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, ((unsigned int )intel_crtc->plane << 20) | 167772162U); intel_ring_emit(ring, fb->pitches[0] | (unsigned int )obj->tiling_mode); intel_ring_emit(ring, (intel_crtc->unpin_work)->gtt_offset); pf = 0U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )intel_crtc->pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393244U), 1); pipesrc = tmp & 268374015U; intel_ring_emit(ring, pf | pipesrc); intel_mark_page_flip_active(intel_crtc); __intel_ring_advance(ring); return (0); } } static int intel_gen7_queue_flip(struct drm_device *dev , struct drm_crtc *crtc , struct drm_framebuffer *fb , struct drm_i915_gem_object *obj , struct intel_engine_cs *ring , uint32_t flags ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; uint32_t plane_bit ; int len ; int ret ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; plane_bit = 0U; switch ((unsigned int )intel_crtc->plane) { case 0U: plane_bit = 0U; goto ldv_54994; case 1U: plane_bit = 524288U; goto ldv_54994; case 2U: plane_bit = 2097152U; goto ldv_54994; default: __ret_warn_once = 1; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 10795, "unknown plane in flip command\n"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return (-19); } ldv_54994: len = 4; if ((unsigned int )ring->id == 0U) { len = len + 6; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 8U) { len = len + 2; } else { } } else { } ret = intel_ring_cacheline_align(ring); if (ret != 0) { return (ret); } else { } ret = intel_ring_begin(ring, len); if (ret != 0) { return (ret); } else { } if ((unsigned int )ring->id == 0U) { intel_ring_emit(ring, 285212673U); intel_ring_emit(ring, 278608U); intel_ring_emit(ring, 4294934013U); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { intel_ring_emit(ring, 306184194U); } else { intel_ring_emit(ring, 306184193U); } intel_ring_emit(ring, 278608U); intel_ring_emit(ring, ring->scratch.gtt_offset + 256U); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); } else { } } else { } intel_ring_emit(ring, plane_bit | 167772161U); intel_ring_emit(ring, fb->pitches[0] | (unsigned int )obj->tiling_mode); intel_ring_emit(ring, (intel_crtc->unpin_work)->gtt_offset); intel_ring_emit(ring, 0U); intel_mark_page_flip_active(intel_crtc); __intel_ring_advance(ring); return (0); } } static bool use_mmio_flip(struct intel_engine_cs *ring , struct drm_i915_gem_object *obj ) { struct drm_i915_private *__p ; struct intel_engine_cs *tmp ; { if ((unsigned long )ring == (unsigned long )((struct intel_engine_cs *)0)) { return (1); } else { } __p = to_i915((struct drm_device const *)ring->dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { return (0); } else { } if (i915.use_mmio_flip < 0) { return (0); } else if (i915.use_mmio_flip > 0) { return (1); } else if (i915.enable_execlists != 0) { return (1); } else { tmp = i915_gem_request_get_ring(obj->last_write_req); return ((unsigned long )tmp != (unsigned long )ring); } } } static void skl_do_mmio_flip(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_framebuffer *fb ; enum pipe pipe ; u32 ctl ; u32 stride ; int __ret_warn_on ; long tmp ; u32 tmp___0 ; { dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; fb = (intel_crtc->base.primary)->fb; pipe = intel_crtc->pipe; ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 459136), 1); ctl = ctl & 4294960127U; switch (fb->modifier[0]) { case 0ULL: ; goto ldv_55041; case 1ULL: ctl = ctl | 1024U; goto ldv_55041; case 2ULL: ctl = ctl | 4096U; goto ldv_55041; case 3ULL: ctl = ctl | 5120U; goto ldv_55041; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 10918, "Missing switch case (%lu) in %s\n", (long )fb->modifier[0], "skl_do_mmio_flip"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } ldv_55041: tmp___0 = intel_fb_stride_alignment(dev, fb->modifier[0], fb->pixel_format); stride = fb->pitches[0] / tmp___0; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459136), ctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459144), stride, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459164), (intel_crtc->unpin_work)->gtt_offset, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 459164), 0); return; } } static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_framebuffer *intel_fb ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *obj ; u32 dspcntr ; u32 reg ; { dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_framebuffer const *)(intel_crtc->base.primary)->fb; intel_fb = (struct intel_framebuffer *)__mptr; obj = intel_fb->obj; reg = ((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )intel_crtc->plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U; dspcntr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { dspcntr = dspcntr | 1024U; } else { dspcntr = dspcntr & 4294966271U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, dspcntr, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )intel_crtc->plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), (intel_crtc->unpin_work)->gtt_offset, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )intel_crtc->plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 0); return; } } static void intel_do_mmio_flip(struct intel_crtc *intel_crtc ) { struct drm_device *dev ; bool atomic_update ; u32 start_vbl_count ; struct drm_i915_private *__p ; { dev = intel_crtc->base.dev; intel_mark_page_flip_active(intel_crtc); atomic_update = intel_pipe_update_start(intel_crtc, & start_vbl_count); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { skl_do_mmio_flip(intel_crtc); } else { ilk_do_mmio_flip(intel_crtc); } if ((int )atomic_update) { intel_pipe_update_end(intel_crtc, start_vbl_count); } else { } return; } } static void intel_mmio_flip_work_func(struct work_struct *work ) { struct intel_mmio_flip *mmio_flip ; struct work_struct const *__mptr ; int __ret_warn_on ; int tmp ; long tmp___0 ; { __mptr = (struct work_struct const *)work; mmio_flip = (struct intel_mmio_flip *)__mptr; if ((unsigned long )mmio_flip->req != (unsigned long )((struct drm_i915_gem_request *)0)) { tmp = __i915_wait_request(mmio_flip->req, (mmio_flip->crtc)->reset_counter, 0, (s64 *)0LL, & (mmio_flip->i915)->rps.mmioflips); __ret_warn_on = tmp != 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 10999, "WARN_ON(__i915_wait_request(mmio_flip->req, mmio_flip->crtc->reset_counter, false, NULL, &mmio_flip->i915->rps.mmioflips))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } intel_do_mmio_flip(mmio_flip->crtc); i915_gem_request_unreference__unlocked___1(mmio_flip->req); kfree((void const *)mmio_flip); return; } } static int intel_queue_mmio_flip(struct drm_device *dev , struct drm_crtc *crtc , struct drm_framebuffer *fb , struct drm_i915_gem_object *obj , struct intel_engine_cs *ring , uint32_t flags ) { struct intel_mmio_flip *mmio_flip ; void *tmp ; struct drm_crtc const *__mptr ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; { tmp = kmalloc(104UL, 208U); mmio_flip = (struct intel_mmio_flip *)tmp; if ((unsigned long )mmio_flip == (unsigned long )((struct intel_mmio_flip *)0)) { return (-12); } else { } mmio_flip->i915 = to_i915((struct drm_device const *)dev); mmio_flip->req = i915_gem_request_reference___4(obj->last_write_req); __mptr = (struct drm_crtc const *)crtc; mmio_flip->crtc = (struct intel_crtc *)__mptr; __init_work(& mmio_flip->work, 0); __constr_expr_0___0.counter = 137438953408L; mmio_flip->work.data = __constr_expr_0___0; lockdep_init_map(& mmio_flip->work.lockdep_map, "(&mmio_flip->work)", & __key, 0); INIT_LIST_HEAD(& mmio_flip->work.entry); mmio_flip->work.func = & intel_mmio_flip_work_func; schedule_work___1(& mmio_flip->work); return (0); } } static int intel_default_queue_flip(struct drm_device *dev , struct drm_crtc *crtc , struct drm_framebuffer *fb , struct drm_i915_gem_object *obj , struct intel_engine_cs *ring , uint32_t flags ) { { return (-19); } } static bool __intel_pageflip_stall_check(struct drm_device *dev , struct drm_crtc *crtc ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_unpin_work *work ; u32 addr ; int tmp ; bool tmp___0 ; int tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; uint32_t tmp___4 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; work = intel_crtc->unpin_work; tmp = atomic_read((atomic_t const *)(& work->pending)); if (tmp > 1) { return (1); } else { } if (! work->enable_stall_check) { return (0); } else { } if (work->flip_ready_vblank == 0) { if ((unsigned long )work->flip_queued_req != (unsigned long )((struct drm_i915_gem_request *)0)) { tmp___0 = i915_gem_request_completed___3(work->flip_queued_req, 1); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } } else { } tmp___2 = drm_crtc_vblank_count(crtc); work->flip_ready_vblank = (int )tmp___2; } else { } tmp___3 = drm_crtc_vblank_count(crtc); if (tmp___3 - (u32 )work->flip_ready_vblank <= 2U) { return (0); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )intel_crtc->plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 1); addr = tmp___4 & 4294963200U; } else { addr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )intel_crtc->plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), 1); } return (work->gtt_offset == addr); } } void intel_check_page_flip(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_unpin_work *work ; int __ret_warn_on ; int tmp ; long tmp___0 ; bool __warned ; int __ret_warn_once ; int __ret_warn_on___0 ; u32 tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; bool tmp___5 ; u32 tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = dev_priv->pipe_to_crtc_mapping[pipe]; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = preempt_count(); __ret_warn_on = ((unsigned long )tmp & 2096896UL) == 0UL; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 11086, "WARN_ON(!in_interrupt())"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { return; } else { } spin_lock(& dev->event_lock); work = intel_crtc->unpin_work; if ((unsigned long )work != (unsigned long )((struct intel_unpin_work *)0)) { tmp___5 = __intel_pageflip_stall_check(dev, crtc); if ((int )tmp___5) { __ret_warn_once = 1; tmp___4 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___4 != 0L) { __ret_warn_on___0 = ! __warned; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { tmp___1 = drm_vblank_count(dev, pipe); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 11095, "Kicking stuck page flip: queued at %d, now %d\n", work->flip_queued_vblank, tmp___1); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); page_flip_completed(intel_crtc); work = (struct intel_unpin_work *)0; } else { } } else { } if ((unsigned long )work != (unsigned long )((struct intel_unpin_work *)0)) { tmp___6 = drm_vblank_count(dev, pipe); if (tmp___6 - (u32 )work->flip_queued_vblank > 1U) { intel_queue_rps_boost_for_request(dev, work->flip_queued_req); } else { } } else { } spin_unlock(& dev->event_lock); return; } } static int intel_crtc_page_flip(struct drm_crtc *crtc , struct drm_framebuffer *fb , struct drm_pending_vblank_event *event , uint32_t page_flip_flags ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_framebuffer *old_fb ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr___0 ; struct drm_plane *primary ; enum pipe pipe ; struct intel_unpin_work *work ; struct intel_engine_cs *ring ; bool mmio_flip ; int ret ; int __ret_warn_on ; struct drm_framebuffer const *__mptr___1 ; int tmp___0 ; long tmp___1 ; long tmp___2 ; struct drm_i915_private *__p ; bool tmp___3 ; void *tmp___4 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; long tmp___5 ; long tmp___6 ; bool tmp___7 ; int tmp___8 ; int tmp___9 ; uint32_t tmp___10 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_framebuffer const *__mptr___2 ; struct drm_i915_gem_object *tmp___11 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct intel_engine_cs *tmp___12 ; struct intel_engine_cs *tmp___13 ; struct drm_plane const *__mptr___3 ; unsigned long tmp___14 ; struct drm_i915_gem_request *tmp___15 ; u32 tmp___16 ; struct drm_framebuffer const *__mptr___4 ; struct drm_i915_gem_object *tmp___17 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; old_fb = (crtc->primary)->fb; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; primary = crtc->primary; pipe = intel_crtc->pipe; if ((unsigned long )old_fb == (unsigned long )((struct drm_framebuffer *)0)) { tmp___0 = 1; } else { __mptr___1 = (struct drm_framebuffer const *)old_fb; if ((unsigned long )((struct intel_framebuffer *)__mptr___1)->obj == (unsigned long )((struct drm_i915_gem_object *)0)) { tmp___0 = 1; } else { tmp___0 = 0; } } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 11127, "WARN_ON(intel_fb_obj(old_fb) == NULL)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return (-16); } else { } if (fb->pixel_format != ((crtc->primary)->fb)->pixel_format) { return (-22); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U && (fb->offsets[0] != ((crtc->primary)->fb)->offsets[0] || fb->pitches[0] != ((crtc->primary)->fb)->pitches[0])) { return (-22); } else { } tmp___3 = i915_terminally_wedged(& dev_priv->gpu_error); if ((int )tmp___3) { goto out_hang; } else { } tmp___4 = kzalloc(152UL, 208U); work = (struct intel_unpin_work *)tmp___4; if ((unsigned long )work == (unsigned long )((struct intel_unpin_work *)0)) { return (-12); } else { } work->event = event; work->crtc = crtc; work->old_fb = old_fb; __init_work(& work->work, 0); __constr_expr_0___0.counter = 137438953408L; work->work.data = __constr_expr_0___0; lockdep_init_map(& work->work.lockdep_map, "(&work->work)", & __key, 0); INIT_LIST_HEAD(& work->work.entry); work->work.func = & intel_unpin_work_fn; ret = drm_crtc_vblank_get(crtc); if (ret != 0) { goto free_work; } else { } spin_lock_irq(& dev->event_lock); if ((unsigned long )intel_crtc->unpin_work != (unsigned long )((struct intel_unpin_work *)0)) { tmp___7 = __intel_pageflip_stall_check(dev, crtc); if ((int )tmp___7) { tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_crtc_page_flip", "flip queue: previous flip completed, continuing\n"); } else { } page_flip_completed(intel_crtc); } else { tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_crtc_page_flip", "flip queue: crtc already busy\n"); } else { } spin_unlock_irq(& dev->event_lock); drm_crtc_vblank_put(crtc); kfree((void const *)work); return (-16); } } else { } intel_crtc->unpin_work = work; spin_unlock_irq(& dev->event_lock); tmp___8 = atomic_read((atomic_t const *)(& intel_crtc->unpin_work_count)); if (tmp___8 > 1) { ldv_flush_workqueue_551(dev_priv->wq); } else { } drm_framebuffer_reference(work->old_fb); drm_gem_object_reference___5(& obj->base); (crtc->primary)->fb = fb; update_state_fb(crtc->primary); work->pending_flip_obj = obj; ret = i915_mutex_lock_interruptible(dev); if (ret != 0) { goto cleanup; } else { } atomic_inc(& intel_crtc->unpin_work_count); tmp___9 = atomic_read((atomic_t const *)(& dev_priv->gpu_error.reset_counter)); intel_crtc->reset_counter = (unsigned int )tmp___9; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 4U) { tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458820U), 1); work->flip_count = tmp___10 + 1U; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458820U), 1); work->flip_count = tmp___10 + 1U; } else { } } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { ring = (struct intel_engine_cs *)(& dev_priv->ring) + 2UL; if ((unsigned long )work->old_fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___2 = (struct drm_framebuffer const *)work->old_fb; tmp___11 = ((struct intel_framebuffer *)__mptr___2)->obj; } else { tmp___11 = (struct drm_i915_gem_object *)0; } if ((int )obj->tiling_mode != (int )tmp___11->tiling_mode) { ring = (struct intel_engine_cs *)0; } else { } } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { ring = (struct intel_engine_cs *)(& dev_priv->ring) + 2UL; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { ring = (struct intel_engine_cs *)(& dev_priv->ring) + 2UL; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 6U) { ring = i915_gem_request_get_ring(obj->last_write_req); if ((unsigned long )ring == (unsigned long )((struct intel_engine_cs *)0) || (unsigned int )ring->id != 0U) { ring = (struct intel_engine_cs *)(& dev_priv->ring) + 2UL; } else { } } else { ring = (struct intel_engine_cs *)(& dev_priv->ring); } } } } mmio_flip = use_mmio_flip(ring, obj); if ((int )mmio_flip) { tmp___12 = i915_gem_request_get_ring(obj->last_write_req); tmp___13 = tmp___12; } else { tmp___13 = ring; } ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, (struct drm_plane_state const *)(crtc->primary)->state, tmp___13); if (ret != 0) { goto cleanup_pending; } else { } __mptr___3 = (struct drm_plane const *)primary; tmp___14 = intel_plane_obj_offset((struct intel_plane *)__mptr___3, obj); work->gtt_offset = (u32 )tmp___14 + (u32 )intel_crtc->dspaddr_offset; if ((int )mmio_flip) { ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, page_flip_flags); if (ret != 0) { goto cleanup_unpin; } else { } i915_gem_request_assign___3(& work->flip_queued_req, obj->last_write_req); } else { if ((unsigned long )obj->last_write_req != (unsigned long )((struct drm_i915_gem_request *)0)) { ret = i915_gem_check_olr(obj->last_write_req); if (ret != 0) { goto cleanup_unpin; } else { } } else { } ret = (*(dev_priv->display.queue_flip))(dev, crtc, fb, obj, ring, page_flip_flags); if (ret != 0) { goto cleanup_unpin; } else { } tmp___15 = intel_ring_get_request___2(ring); i915_gem_request_assign___3(& work->flip_queued_req, tmp___15); } tmp___16 = drm_crtc_vblank_count(crtc); work->flip_queued_vblank = (int )tmp___16; work->enable_stall_check = 1; if ((unsigned long )work->old_fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___4 = (struct drm_framebuffer const *)work->old_fb; tmp___17 = ((struct intel_framebuffer *)__mptr___4)->obj; } else { tmp___17 = (struct drm_i915_gem_object *)0; } i915_gem_track_fb(tmp___17, obj, (unsigned int )(1 << (int )pipe * 4)); intel_fbc_disable(dev); intel_frontbuffer_flip_prepare(dev, (unsigned int )(1 << (int )pipe * 4)); mutex_unlock(& dev->struct_mutex); trace_i915_flip_request((int )intel_crtc->plane, obj); return (0); cleanup_unpin: intel_unpin_fb_obj(fb, (struct drm_plane_state const *)(crtc->primary)->state); cleanup_pending: atomic_dec(& intel_crtc->unpin_work_count); mutex_unlock(& dev->struct_mutex); cleanup: (crtc->primary)->fb = old_fb; update_state_fb(crtc->primary); drm_gem_object_unreference_unlocked___2(& obj->base); drm_framebuffer_unreference(work->old_fb); spin_lock_irq(& dev->event_lock); intel_crtc->unpin_work = (struct intel_unpin_work *)0; spin_unlock_irq(& dev->event_lock); drm_crtc_vblank_put(crtc); free_work: kfree((void const *)work); if (ret == -5) { out_hang: ret = intel_plane_restore(primary); if (ret == 0 && (unsigned long )event != (unsigned long )((struct drm_pending_vblank_event *)0)) { spin_lock_irq(& dev->event_lock); drm_send_vblank_event(dev, (int )pipe, event); spin_unlock_irq(& dev->event_lock); } else { } } else { } return (ret); } } static struct drm_crtc_helper_funcs const intel_helper_funcs = {0, 0, 0, 0, 0, 0, 0, & intel_pipe_set_base_atomic, & intel_crtc_load_lut, 0, 0, 0, & intel_begin_crtc_commit, & intel_finish_crtc_commit}; static void intel_modeset_update_staged_output_state(struct drm_device *dev ) { struct intel_crtc *crtc ; struct intel_encoder *encoder ; struct intel_connector *connector ; struct list_head const *__mptr ; struct drm_encoder const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct drm_crtc const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; { __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_55229; ldv_55228: __mptr___0 = (struct drm_encoder const *)connector->base.encoder; connector->new_encoder = (struct intel_encoder *)__mptr___0; __mptr___1 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_55229: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_55228; } else { } __mptr___2 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_55238; ldv_55237: __mptr___3 = (struct drm_crtc const *)encoder->base.crtc; encoder->new_crtc = (struct intel_crtc *)__mptr___3; __mptr___4 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___4 + 0xfffffffffffffff8UL; ldv_55238: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_55237; } else { } __mptr___5 = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr___5 + 0xfffffffffffffff0UL; goto ldv_55245; ldv_55244: crtc->new_enabled = (crtc->base.state)->enable; __mptr___6 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___6 + 0xfffffffffffffff0UL; ldv_55245: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_55244; } else { } return; } } static void intel_modeset_update_connector_atomic_state(struct drm_device *dev ) { struct intel_connector *connector ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_55256; ldv_55255: ; if ((unsigned long )connector->base.encoder != (unsigned long )((struct drm_encoder *)0)) { (connector->base.state)->best_encoder = connector->base.encoder; (connector->base.state)->crtc = (connector->base.encoder)->crtc; } else { (connector->base.state)->best_encoder = (struct drm_encoder *)0; (connector->base.state)->crtc = (struct drm_crtc *)0; } __mptr___0 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_55256: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_55255; } else { } return; } } static void intel_modeset_fixup_state(struct drm_atomic_state *state ) { struct intel_crtc *crtc ; struct intel_encoder *encoder ; struct intel_connector *connector ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int num_connectors ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct drm_crtc_state const *__mptr___6 ; struct list_head const *__mptr___7 ; { __mptr = (struct list_head const *)(state->dev)->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_55269; ldv_55268: connector->base.encoder = (connector->base.state)->best_encoder; if ((unsigned long )connector->base.encoder != (unsigned long )((struct drm_encoder *)0)) { (connector->base.encoder)->crtc = (connector->base.state)->crtc; } else { } __mptr___0 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_55269: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& (state->dev)->mode_config.connector_list)) { goto ldv_55268; } else { } __mptr___1 = (struct list_head const *)(state->dev)->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; goto ldv_55284; ldv_55283: num_connectors = 0; __mptr___2 = (struct list_head const *)(state->dev)->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr___2 + 0xffffffffffffffe8UL; goto ldv_55281; ldv_55280: ; if ((unsigned long )connector->base.encoder == (unsigned long )(& encoder->base)) { num_connectors = num_connectors + 1; } else { } __mptr___3 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___3 + 0xffffffffffffffe8UL; ldv_55281: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& (state->dev)->mode_config.connector_list)) { goto ldv_55280; } else { } if (num_connectors == 0) { encoder->base.crtc = (struct drm_crtc *)0; } else { } __mptr___4 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___4 + 0xfffffffffffffff8UL; ldv_55284: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& (state->dev)->mode_config.encoder_list)) { goto ldv_55283; } else { } __mptr___5 = (struct list_head const *)(state->dev)->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr___5 + 0xfffffffffffffff0UL; goto ldv_55293; ldv_55292: crtc->base.enabled = (crtc->base.state)->enable; __mptr___6 = (struct drm_crtc_state const *)crtc->base.state; crtc->config = (struct intel_crtc_state *)__mptr___6; __mptr___7 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___7 + 0xfffffffffffffff0UL; ldv_55293: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& (state->dev)->mode_config.crtc_list)) { goto ldv_55292; } else { } return; } } static void connected_sink_compute_bpp(struct intel_connector *connector , struct intel_crtc_state *pipe_config ) { int bpp ; long tmp ; long tmp___0 ; long tmp___1 ; { bpp = pipe_config->pipe_bpp; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("connected_sink_compute_bpp", "[CONNECTOR:%d:%s] checking for sink bpp constrains\n", connector->base.base.id, connector->base.name); } else { } if (connector->base.display_info.bpc != 0U && connector->base.display_info.bpc * 3U < (unsigned int )bpp) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("connected_sink_compute_bpp", "clamping display bpp (was %d) to EDID reported max of %d\n", bpp, connector->base.display_info.bpc * 3U); } else { } pipe_config->pipe_bpp = (int )(connector->base.display_info.bpc * 3U); } else { } if (connector->base.display_info.bpc == 0U && bpp > 24) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("connected_sink_compute_bpp", "clamping display bpp (was %d) to default limit of 24\n", bpp); } else { } pipe_config->pipe_bpp = 24; } else { } return; } } static int compute_baseline_pipe_bpp(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_atomic_state *state ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; int bpp ; int i ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_connector const *__mptr ; { dev = crtc->base.dev; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { bpp = 30; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { bpp = 30; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { bpp = 36; } else { bpp = 24; } } } pipe_config->pipe_bpp = bpp; state = pipe_config->base.state; i = 0; goto ldv_55333; ldv_55332: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )(& crtc->base)) { goto ldv_55329; } else { } __mptr = (struct drm_connector const *)connector; connected_sink_compute_bpp((struct intel_connector *)__mptr, pipe_config); } else { } ldv_55329: i = i + 1; ldv_55333: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_55332; } else { } return (bpp); } } static void intel_dump_crtc_timings(struct drm_display_mode const *mode ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dump_crtc_timings", "crtc timings: %d %d %d %d %d %d %d %d %d, type: 0x%x flags: 0x%x\n", mode->crtc_clock, mode->crtc_hdisplay, mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal, mode->crtc_vdisplay, mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); } else { } return; } } static void intel_dump_pipe_config(struct intel_crtc *crtc , struct intel_crtc_state *pipe_config , char const *context ) { struct drm_device *dev ; struct drm_plane *plane ; struct intel_plane *intel_plane ; struct intel_plane_state *state ; struct drm_framebuffer *fb ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; long tmp___11 ; long tmp___12 ; long tmp___13 ; long tmp___14 ; long tmp___15 ; long tmp___16 ; long tmp___17 ; long tmp___18 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; long tmp___19 ; struct list_head const *__mptr ; struct drm_plane const *__mptr___0 ; struct drm_plane_state const *__mptr___1 ; unsigned int tmp___20 ; long tmp___21 ; unsigned int tmp___22 ; long tmp___23 ; long tmp___24 ; int tmp___25 ; int tmp___26 ; int tmp___27 ; int tmp___28 ; long tmp___29 ; struct list_head const *__mptr___2 ; { dev = crtc->base.dev; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, context, pipe_config, (int )crtc->pipe + 65); } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "cpu_transcoder: %c\n", (unsigned int )pipe_config->cpu_transcoder + 65U); } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "pipe bpp: %i, dithering: %i\n", pipe_config->pipe_bpp, (int )pipe_config->dither); } else { } tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", (int )pipe_config->has_pch_encoder, pipe_config->fdi_lanes, pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, pipe_config->fdi_m_n.tu); } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", (int )pipe_config->has_dp_encoder, pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, pipe_config->dp_m_n.tu); } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", (int )pipe_config->has_dp_encoder, pipe_config->dp_m2_n2.gmch_m, pipe_config->dp_m2_n2.gmch_n, pipe_config->dp_m2_n2.link_m, pipe_config->dp_m2_n2.link_n, pipe_config->dp_m2_n2.tu); } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "audio: %i, infoframes: %i\n", (int )pipe_config->has_audio, (int )pipe_config->has_infoframe); } else { } tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "requested mode:\n"); } else { } drm_mode_debug_printmodeline((struct drm_display_mode const *)(& pipe_config->base.mode)); tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "adjusted mode:\n"); } else { } drm_mode_debug_printmodeline((struct drm_display_mode const *)(& pipe_config->base.adjusted_mode)); intel_dump_crtc_timings((struct drm_display_mode const *)(& pipe_config->base.adjusted_mode)); tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "port clock: %d\n", pipe_config->port_clock); } else { } tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "pipe src size: %dx%d\n", pipe_config->pipe_src_w, pipe_config->pipe_src_h); } else { } tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", crtc->num_scalers, pipe_config->scaler_state.scaler_users, pipe_config->scaler_state.scaler_id); } else { } tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", pipe_config->gmch_pfit.control, pipe_config->gmch_pfit.pgm_ratios, pipe_config->gmch_pfit.lvds_border_bits); } else { } tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", pipe_config->pch_pfit.pos, pipe_config->pch_pfit.size, (int )pipe_config->pch_pfit.enabled ? (char *)"enabled" : (char *)"disabled"); } else { } tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "ips: %i\n", (int )pipe_config->ips_enabled); } else { } tmp___14 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "double wide: %i\n", (int )pipe_config->double_wide); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { tmp___15 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___15 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n", pipe_config->ddi_pll_sel, pipe_config->dpll_hw_state.ebb0, pipe_config->dpll_hw_state.pll0, pipe_config->dpll_hw_state.pll1, pipe_config->dpll_hw_state.pll2, pipe_config->dpll_hw_state.pll3, pipe_config->dpll_hw_state.pll6, pipe_config->dpll_hw_state.pll8, pipe_config->dpll_hw_state.pcsdw12); } else { } } else { goto _L; } } else { _L: /* CIL Label */ __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { tmp___16 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___16 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "ddi_pll_sel: %u; dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", pipe_config->ddi_pll_sel, pipe_config->dpll_hw_state.ctrl1, pipe_config->dpll_hw_state.cfgcr1, pipe_config->dpll_hw_state.cfgcr2); } else { } } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { tmp___17 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___17 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", pipe_config->ddi_pll_sel, pipe_config->dpll_hw_state.wrpll); } else { } } else { tmp___18 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___18 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, fp0: 0x%x, fp1: 0x%x\n", pipe_config->dpll_hw_state.dpll, pipe_config->dpll_hw_state.dpll_md, pipe_config->dpll_hw_state.fp0, pipe_config->dpll_hw_state.fp1); } else { } } } } tmp___19 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___19 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "planes on this crtc\n"); } else { } __mptr = (struct list_head const *)dev->mode_config.plane_list.next; plane = (struct drm_plane *)__mptr + 0xfffffffffffffff8UL; goto ldv_55384; ldv_55383: __mptr___0 = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr___0; if ((int )intel_plane->pipe != (int )crtc->pipe) { goto ldv_55380; } else { } __mptr___1 = (struct drm_plane_state const *)plane->state; state = (struct intel_plane_state *)__mptr___1; fb = state->base.fb; if ((unsigned long )fb == (unsigned long )((struct drm_framebuffer *)0)) { tmp___21 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___21 != 0L) { tmp___20 = drm_plane_index(plane); drm_ut_debug_printk("intel_dump_pipe_config", "%s PLANE:%d plane: %u.%u idx: %d disabled, scaler_id = %d\n", (unsigned int )plane->type == 2U ? (char *)"CURSOR" : (char *)"STANDARD", plane->base.id, (int )intel_plane->pipe, (unsigned long )crtc->base.primary != (unsigned long )plane ? intel_plane->plane + 1 : 0, tmp___20, state->scaler_id); } else { } goto ldv_55380; } else { } tmp___23 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___23 != 0L) { tmp___22 = drm_plane_index(plane); drm_ut_debug_printk("intel_dump_pipe_config", "%s PLANE:%d plane: %u.%u idx: %d enabled", (unsigned int )plane->type == 2U ? (char *)"CURSOR" : (char *)"STANDARD", plane->base.id, (int )intel_plane->pipe, (unsigned long )crtc->base.primary != (unsigned long )plane ? intel_plane->plane + 1 : 0, tmp___22); } else { } tmp___24 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___24 != 0L) { drm_ut_debug_printk("intel_dump_pipe_config", "\tFB:%d, fb = %ux%u format = 0x%x", fb->base.id, fb->width, fb->height, fb->pixel_format); } else { } tmp___29 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___29 != 0L) { tmp___25 = drm_rect_height((struct drm_rect const *)(& state->dst)); tmp___26 = drm_rect_width((struct drm_rect const *)(& state->dst)); tmp___27 = drm_rect_height((struct drm_rect const *)(& state->src)); tmp___28 = drm_rect_width((struct drm_rect const *)(& state->src)); drm_ut_debug_printk("intel_dump_pipe_config", "\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", state->scaler_id, state->src.x1 >> 16, state->src.y1 >> 16, tmp___28 >> 16, tmp___27 >> 16, state->dst.x1, state->dst.y1, tmp___26, tmp___25); } else { } ldv_55380: __mptr___2 = (struct list_head const *)plane->head.next; plane = (struct drm_plane *)__mptr___2 + 0xfffffffffffffff8UL; ldv_55384: ; if ((unsigned long )(& plane->head) != (unsigned long )(& dev->mode_config.plane_list)) { goto ldv_55383; } else { } return; } } static bool encoders_cloneable(struct intel_encoder const *a , struct intel_encoder const *b ) { { return ((bool )((unsigned long )a == (unsigned long )b || (((unsigned int )a->cloneable & (unsigned int )(1 << (int )b->type)) != 0U && ((unsigned int )b->cloneable & (unsigned int )(1 << (int )a->type)) != 0U))); } } static bool check_single_encoder_cloning(struct drm_atomic_state *state , struct intel_crtc *crtc , struct intel_encoder *encoder ) { struct intel_encoder *source_encoder ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; int i ; struct drm_encoder const *__mptr ; bool tmp ; int tmp___0 ; { i = 0; goto ldv_55403; ldv_55402: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )(& crtc->base)) { goto ldv_55399; } else { } __mptr = (struct drm_encoder const *)connector_state->best_encoder; source_encoder = (struct intel_encoder *)__mptr; tmp = encoders_cloneable((struct intel_encoder const *)encoder, (struct intel_encoder const *)source_encoder); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } } else { } ldv_55399: i = i + 1; ldv_55403: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_55402; } else { } return (1); } } static bool check_encoder_cloning(struct drm_atomic_state *state , struct intel_crtc *crtc ) { struct intel_encoder *encoder ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; int i ; struct drm_encoder const *__mptr ; bool tmp ; int tmp___0 ; { i = 0; goto ldv_55417; ldv_55416: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )(& crtc->base)) { goto ldv_55413; } else { } __mptr = (struct drm_encoder const *)connector_state->best_encoder; encoder = (struct intel_encoder *)__mptr; tmp = check_single_encoder_cloning(state, crtc, encoder); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } } else { } ldv_55413: i = i + 1; ldv_55417: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_55416; } else { } return (1); } } static bool check_digital_port_conflicts(struct drm_atomic_state *state ) { struct drm_device *dev ; struct intel_encoder *encoder ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; unsigned int used_ports ; int i ; struct drm_encoder const *__mptr ; int __ret_warn_on ; long tmp ; unsigned int port_mask ; int __ret_warn_on___0 ; struct drm_i915_private *__p ; long tmp___0 ; long tmp___1 ; struct intel_digital_port *tmp___2 ; { dev = state->dev; used_ports = 0U; i = 0; goto ldv_55449; ldv_55448: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->best_encoder == (unsigned long )((struct drm_encoder *)0)) { goto ldv_55428; } else { } __mptr = (struct drm_encoder const *)connector_state->best_encoder; encoder = (struct intel_encoder *)__mptr; __ret_warn_on = (unsigned long )connector_state->crtc == (unsigned long )((struct drm_crtc *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 11663, "WARN_ON(!connector_state->crtc)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); switch ((unsigned int )encoder->type) { case 10U: __p = to_i915((struct drm_device const *)dev); __ret_warn_on___0 = (unsigned int )*((unsigned char *)__p + 46UL) == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 11668, "WARN_ON(!HAS_DDI(dev))"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { goto ldv_55443; } else { } case 7U: ; case 6U: ; case 8U: tmp___2 = enc_to_dig_port(& encoder->base); port_mask = (unsigned int )(1 << (int )tmp___2->port); if ((used_ports & port_mask) != 0U) { return (0); } else { } used_ports = used_ports | port_mask; default: ; goto ldv_55443; } ldv_55443: ; } else { } ldv_55428: i = i + 1; ldv_55449: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_55448; } else { } return (1); } } static void clear_intel_crtc_state(struct intel_crtc_state *crtc_state ) { struct drm_crtc_state tmp_state ; struct intel_crtc_scaler_state scaler_state ; struct intel_dpll_hw_state dpll_hw_state ; enum intel_dpll_id shared_dpll ; uint32_t ddi_pll_sel ; { tmp_state = crtc_state->base; scaler_state = crtc_state->scaler_state; shared_dpll = crtc_state->shared_dpll; dpll_hw_state = crtc_state->dpll_hw_state; ddi_pll_sel = crtc_state->ddi_pll_sel; memset((void *)crtc_state, 0, 752UL); crtc_state->base = tmp_state; crtc_state->scaler_state = scaler_state; crtc_state->shared_dpll = shared_dpll; crtc_state->dpll_hw_state = dpll_hw_state; crtc_state->ddi_pll_sel = ddi_pll_sel; return; } } static int intel_modeset_pipe_config(struct drm_crtc *crtc , struct drm_atomic_state *state , struct intel_crtc_state *pipe_config ) { struct intel_encoder *encoder ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; int base_bpp ; int ret ; int i ; bool retry ; long tmp ; struct drm_crtc const *__mptr ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; struct drm_crtc const *__mptr___0 ; struct drm_crtc const *__mptr___1 ; struct drm_encoder const *__mptr___2 ; long tmp___5 ; bool tmp___6 ; int tmp___7 ; struct drm_crtc const *__mptr___3 ; long tmp___8 ; int __ret_warn_on ; long tmp___9 ; long tmp___10 ; long tmp___11 ; long tmp___12 ; { ret = -22; retry = 1; __mptr = (struct drm_crtc const *)crtc; tmp___0 = check_encoder_cloning(state, (struct intel_crtc *)__mptr); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_modeset_pipe_config", "rejecting invalid cloning configuration\n"); } else { } return (-22); } else { } tmp___3 = check_digital_port_conflicts(state); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_modeset_pipe_config", "rejecting conflicting digital port configuration\n"); } else { } return (-22); } else { } clear_intel_crtc_state(pipe_config); __mptr___0 = (struct drm_crtc const *)crtc; pipe_config->cpu_transcoder = (enum transcoder )((struct intel_crtc *)__mptr___0)->pipe; if ((pipe_config->base.adjusted_mode.flags & 3U) == 0U) { pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | 2U; } else { } if ((pipe_config->base.adjusted_mode.flags & 12U) == 0U) { pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | 8U; } else { } __mptr___1 = (struct drm_crtc const *)crtc; base_bpp = compute_baseline_pipe_bpp((struct intel_crtc *)__mptr___1, pipe_config); if (base_bpp < 0) { goto fail; } else { } drm_crtc_get_hv_timing((struct drm_display_mode const *)(& pipe_config->base.mode), & pipe_config->pipe_src_w, & pipe_config->pipe_src_h); encoder_retry: pipe_config->port_clock = 0; pipe_config->pixel_multiplier = 1U; drm_mode_set_crtcinfo(& pipe_config->base.adjusted_mode, 2); i = 0; goto ldv_55484; ldv_55483: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )crtc) { goto ldv_55480; } else { } __mptr___2 = (struct drm_encoder const *)connector_state->best_encoder; encoder = (struct intel_encoder *)__mptr___2; tmp___6 = (*(encoder->compute_config))(encoder, pipe_config); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } if (tmp___7) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_modeset_pipe_config", "Encoder config failure\n"); } else { } goto fail; } else { } } else { } ldv_55480: i = i + 1; ldv_55484: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_55483; } else { } if (pipe_config->port_clock == 0) { pipe_config->port_clock = (int )((unsigned int )pipe_config->base.adjusted_mode.crtc_clock * pipe_config->pixel_multiplier); } else { } __mptr___3 = (struct drm_crtc const *)crtc; ret = intel_crtc_compute_config((struct intel_crtc *)__mptr___3, pipe_config); if (ret < 0) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_modeset_pipe_config", "CRTC fixup failed\n"); } else { } goto fail; } else { } if (ret == 1) { __ret_warn_on = ! retry; tmp___9 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 11816, "loop in pipe configuration computation\n"); } else { } tmp___10 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___10 != 0L) { ret = -22; goto fail; } else { } tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("intel_modeset_pipe_config", "CRTC bw constrained, retrying\n"); } else { } retry = 0; goto encoder_retry; } else { } pipe_config->dither = pipe_config->pipe_bpp != base_bpp; tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("intel_modeset_pipe_config", "plane bpp: %i, pipe bpp: %i, dithering: %i\n", base_bpp, pipe_config->pipe_bpp, (int )pipe_config->dither); } else { } return (0); fail: ; return (ret); } } static bool intel_crtc_in_use(struct drm_crtc *crtc ) { struct drm_encoder *encoder ; struct drm_device *dev ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = crtc->dev; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct drm_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_55500; ldv_55499: ; if ((unsigned long )encoder->crtc == (unsigned long )crtc) { return (1); } else { } __mptr___0 = (struct list_head const *)encoder->head.next; encoder = (struct drm_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_55500: ; if ((unsigned long )(& encoder->head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_55499; } else { } return (0); } } static bool needs_modeset(struct drm_crtc_state *state ) { { return ((bool )((int )state->mode_changed || (int )state->active_changed)); } } static void intel_modeset_update_state(struct drm_atomic_state *state ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_encoder *intel_encoder ; struct drm_crtc *crtc ; struct drm_crtc_state *crtc_state ; struct drm_connector *connector ; int i ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int __ret_warn_on ; bool tmp___0 ; long tmp___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct drm_property *dpms_property ; struct drm_encoder const *__mptr___4 ; bool tmp___2 ; struct list_head const *__mptr___5 ; { dev = state->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_shared_dpll_commit(dev_priv); __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_55525; ldv_55524: ; if ((unsigned long )intel_encoder->base.crtc == (unsigned long )((struct drm_crtc *)0)) { goto ldv_55519; } else { } i = 0; goto ldv_55523; ldv_55522: ; if ((unsigned long )crtc_state != (unsigned long )((struct drm_crtc_state *)0)) { if ((unsigned long )intel_encoder->base.crtc != (unsigned long )crtc) { goto ldv_55520; } else { } if ((int )crtc_state->enable) { tmp = needs_modeset(crtc_state); if ((int )tmp) { intel_encoder->connectors_active = 0; } else { } } else { } goto ldv_55521; } else { } ldv_55520: i = i + 1; ldv_55523: ; if ((state->dev)->mode_config.num_crtc > i) { crtc = *(state->crtcs + (unsigned long )i); crtc_state = *(state->crtc_states + (unsigned long )i); goto ldv_55522; } else { } ldv_55521: ; ldv_55519: __mptr___0 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_55525: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_55524; } else { } drm_atomic_helper_swap_state(state->dev, state); intel_modeset_fixup_state(state); __mptr___1 = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr___1 + 0xfffffffffffffff0UL; goto ldv_55534; ldv_55533: tmp___0 = intel_crtc_in_use(crtc); __ret_warn_on = (int )(crtc->state)->enable != (int )tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 11886, "WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __mptr___2 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___2 + 0xfffffffffffffff0UL; ldv_55534: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_55533; } else { } __mptr___3 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct drm_connector *)__mptr___3 + 0xffffffffffffffe8UL; goto ldv_55549; ldv_55548: ; if ((unsigned long )connector->encoder == (unsigned long )((struct drm_encoder *)0) || (unsigned long )(connector->encoder)->crtc == (unsigned long )((struct drm_crtc *)0)) { goto ldv_55540; } else { } i = 0; goto ldv_55547; ldv_55546: ; if ((unsigned long )crtc_state != (unsigned long )((struct drm_crtc_state *)0)) { if ((unsigned long )(connector->encoder)->crtc != (unsigned long )crtc) { goto ldv_55541; } else { } if ((int )(crtc->state)->enable) { tmp___2 = needs_modeset(crtc->state); if ((int )tmp___2) { dpms_property = dev->mode_config.dpms_property; connector->dpms = 0; drm_object_property_set_value(& connector->base, dpms_property, 0ULL); __mptr___4 = (struct drm_encoder const *)connector->encoder; intel_encoder = (struct intel_encoder *)__mptr___4; intel_encoder->connectors_active = 1; } else { } } else { } goto ldv_55545; } else { } ldv_55541: i = i + 1; ldv_55547: ; if ((state->dev)->mode_config.num_crtc > i) { crtc = *(state->crtcs + (unsigned long )i); crtc_state = *(state->crtc_states + (unsigned long )i); goto ldv_55546; } else { } ldv_55545: ; ldv_55540: __mptr___5 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___5 + 0xffffffffffffffe8UL; ldv_55549: ; if ((unsigned long )(& connector->head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_55548; } else { } return; } } static bool intel_fuzzy_clock_check(int clock1 , int clock2 ) { int diff ; long ret ; int __x___0 ; { if (clock1 == clock2) { return (1); } else { } if (clock1 == 0 || clock2 == 0) { return (0); } else { } __x___0 = clock1 - clock2; ret = (long )(__x___0 < 0 ? - __x___0 : __x___0); diff = (int )ret; if ((((diff + clock1) + clock2) * 100) / (clock1 + clock2) <= 104) { return (1); } else { } return (0); } } static bool intel_pipe_config_compare(struct drm_device *dev , struct intel_crtc_state *current_config , struct intel_crtc_state *pipe_config ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { if ((unsigned int )current_config->cpu_transcoder != (unsigned int )pipe_config->cpu_transcoder) { drm_err("mismatch in cpu_transcoder (expected %i, found %i)\n", (unsigned int )current_config->cpu_transcoder, (unsigned int )pipe_config->cpu_transcoder); return (0); } else { } if ((int )current_config->has_pch_encoder != (int )pipe_config->has_pch_encoder) { drm_err("mismatch in has_pch_encoder (expected %i, found %i)\n", (int )current_config->has_pch_encoder, (int )pipe_config->has_pch_encoder); return (0); } else { } if (current_config->fdi_lanes != pipe_config->fdi_lanes) { drm_err("mismatch in fdi_lanes (expected %i, found %i)\n", current_config->fdi_lanes, pipe_config->fdi_lanes); return (0); } else { } if (current_config->fdi_m_n.gmch_m != pipe_config->fdi_m_n.gmch_m) { drm_err("mismatch in fdi_m_n.gmch_m (expected %i, found %i)\n", current_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_m); return (0); } else { } if (current_config->fdi_m_n.gmch_n != pipe_config->fdi_m_n.gmch_n) { drm_err("mismatch in fdi_m_n.gmch_n (expected %i, found %i)\n", current_config->fdi_m_n.gmch_n, pipe_config->fdi_m_n.gmch_n); return (0); } else { } if (current_config->fdi_m_n.link_m != pipe_config->fdi_m_n.link_m) { drm_err("mismatch in fdi_m_n.link_m (expected %i, found %i)\n", current_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_m); return (0); } else { } if (current_config->fdi_m_n.link_n != pipe_config->fdi_m_n.link_n) { drm_err("mismatch in fdi_m_n.link_n (expected %i, found %i)\n", current_config->fdi_m_n.link_n, pipe_config->fdi_m_n.link_n); return (0); } else { } if (current_config->fdi_m_n.tu != pipe_config->fdi_m_n.tu) { drm_err("mismatch in fdi_m_n.tu (expected %i, found %i)\n", current_config->fdi_m_n.tu, pipe_config->fdi_m_n.tu); return (0); } else { } if ((int )current_config->has_dp_encoder != (int )pipe_config->has_dp_encoder) { drm_err("mismatch in has_dp_encoder (expected %i, found %i)\n", (int )current_config->has_dp_encoder, (int )pipe_config->has_dp_encoder); return (0); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 7U) { if (current_config->dp_m_n.gmch_m != pipe_config->dp_m_n.gmch_m) { drm_err("mismatch in dp_m_n.gmch_m (expected %i, found %i)\n", current_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_m); return (0); } else { } if (current_config->dp_m_n.gmch_n != pipe_config->dp_m_n.gmch_n) { drm_err("mismatch in dp_m_n.gmch_n (expected %i, found %i)\n", current_config->dp_m_n.gmch_n, pipe_config->dp_m_n.gmch_n); return (0); } else { } if (current_config->dp_m_n.link_m != pipe_config->dp_m_n.link_m) { drm_err("mismatch in dp_m_n.link_m (expected %i, found %i)\n", current_config->dp_m_n.link_m, pipe_config->dp_m_n.link_m); return (0); } else { } if (current_config->dp_m_n.link_n != pipe_config->dp_m_n.link_n) { drm_err("mismatch in dp_m_n.link_n (expected %i, found %i)\n", current_config->dp_m_n.link_n, pipe_config->dp_m_n.link_n); return (0); } else { } if (current_config->dp_m_n.tu != pipe_config->dp_m_n.tu) { drm_err("mismatch in dp_m_n.tu (expected %i, found %i)\n", current_config->dp_m_n.tu, pipe_config->dp_m_n.tu); return (0); } else { } if ((int )current_config->has_drrs) { if (current_config->dp_m2_n2.gmch_m != pipe_config->dp_m2_n2.gmch_m) { drm_err("mismatch in dp_m2_n2.gmch_m (expected %i, found %i)\n", current_config->dp_m2_n2.gmch_m, pipe_config->dp_m2_n2.gmch_m); return (0); } else { } if (current_config->dp_m2_n2.gmch_n != pipe_config->dp_m2_n2.gmch_n) { drm_err("mismatch in dp_m2_n2.gmch_n (expected %i, found %i)\n", current_config->dp_m2_n2.gmch_n, pipe_config->dp_m2_n2.gmch_n); return (0); } else { } if (current_config->dp_m2_n2.link_m != pipe_config->dp_m2_n2.link_m) { drm_err("mismatch in dp_m2_n2.link_m (expected %i, found %i)\n", current_config->dp_m2_n2.link_m, pipe_config->dp_m2_n2.link_m); return (0); } else { } if (current_config->dp_m2_n2.link_n != pipe_config->dp_m2_n2.link_n) { drm_err("mismatch in dp_m2_n2.link_n (expected %i, found %i)\n", current_config->dp_m2_n2.link_n, pipe_config->dp_m2_n2.link_n); return (0); } else { } if (current_config->dp_m2_n2.tu != pipe_config->dp_m2_n2.tu) { drm_err("mismatch in dp_m2_n2.tu (expected %i, found %i)\n", current_config->dp_m2_n2.tu, pipe_config->dp_m2_n2.tu); return (0); } else { } } else { } } else { if (current_config->dp_m_n.gmch_m != pipe_config->dp_m_n.gmch_m && current_config->dp_m2_n2.gmch_m != pipe_config->dp_m_n.gmch_m) { drm_err("mismatch in dp_m_n.gmch_m (expected %i or %i, found %i)\n", current_config->dp_m_n.gmch_m, current_config->dp_m2_n2.gmch_m, pipe_config->dp_m_n.gmch_m); return (0); } else { } if (current_config->dp_m_n.gmch_n != pipe_config->dp_m_n.gmch_n && current_config->dp_m2_n2.gmch_n != pipe_config->dp_m_n.gmch_n) { drm_err("mismatch in dp_m_n.gmch_n (expected %i or %i, found %i)\n", current_config->dp_m_n.gmch_n, current_config->dp_m2_n2.gmch_n, pipe_config->dp_m_n.gmch_n); return (0); } else { } if (current_config->dp_m_n.link_m != pipe_config->dp_m_n.link_m && current_config->dp_m2_n2.link_m != pipe_config->dp_m_n.link_m) { drm_err("mismatch in dp_m_n.link_m (expected %i or %i, found %i)\n", current_config->dp_m_n.link_m, current_config->dp_m2_n2.link_m, pipe_config->dp_m_n.link_m); return (0); } else { } if (current_config->dp_m_n.link_n != pipe_config->dp_m_n.link_n && current_config->dp_m2_n2.link_n != pipe_config->dp_m_n.link_n) { drm_err("mismatch in dp_m_n.link_n (expected %i or %i, found %i)\n", current_config->dp_m_n.link_n, current_config->dp_m2_n2.link_n, pipe_config->dp_m_n.link_n); return (0); } else { } if (current_config->dp_m_n.tu != pipe_config->dp_m_n.tu && current_config->dp_m2_n2.tu != pipe_config->dp_m_n.tu) { drm_err("mismatch in dp_m_n.tu (expected %i or %i, found %i)\n", current_config->dp_m_n.tu, current_config->dp_m2_n2.tu, pipe_config->dp_m_n.tu); return (0); } else { } } if (current_config->base.adjusted_mode.crtc_hdisplay != pipe_config->base.adjusted_mode.crtc_hdisplay) { drm_err("mismatch in base.adjusted_mode.crtc_hdisplay (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_hdisplay, pipe_config->base.adjusted_mode.crtc_hdisplay); return (0); } else { } if (current_config->base.adjusted_mode.crtc_htotal != pipe_config->base.adjusted_mode.crtc_htotal) { drm_err("mismatch in base.adjusted_mode.crtc_htotal (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_htotal, pipe_config->base.adjusted_mode.crtc_htotal); return (0); } else { } if (current_config->base.adjusted_mode.crtc_hblank_start != pipe_config->base.adjusted_mode.crtc_hblank_start) { drm_err("mismatch in base.adjusted_mode.crtc_hblank_start (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_hblank_start, pipe_config->base.adjusted_mode.crtc_hblank_start); return (0); } else { } if (current_config->base.adjusted_mode.crtc_hblank_end != pipe_config->base.adjusted_mode.crtc_hblank_end) { drm_err("mismatch in base.adjusted_mode.crtc_hblank_end (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_hblank_end, pipe_config->base.adjusted_mode.crtc_hblank_end); return (0); } else { } if (current_config->base.adjusted_mode.crtc_hsync_start != pipe_config->base.adjusted_mode.crtc_hsync_start) { drm_err("mismatch in base.adjusted_mode.crtc_hsync_start (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_hsync_start, pipe_config->base.adjusted_mode.crtc_hsync_start); return (0); } else { } if (current_config->base.adjusted_mode.crtc_hsync_end != pipe_config->base.adjusted_mode.crtc_hsync_end) { drm_err("mismatch in base.adjusted_mode.crtc_hsync_end (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_hsync_end, pipe_config->base.adjusted_mode.crtc_hsync_end); return (0); } else { } if (current_config->base.adjusted_mode.crtc_vdisplay != pipe_config->base.adjusted_mode.crtc_vdisplay) { drm_err("mismatch in base.adjusted_mode.crtc_vdisplay (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_vdisplay, pipe_config->base.adjusted_mode.crtc_vdisplay); return (0); } else { } if (current_config->base.adjusted_mode.crtc_vtotal != pipe_config->base.adjusted_mode.crtc_vtotal) { drm_err("mismatch in base.adjusted_mode.crtc_vtotal (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_vtotal, pipe_config->base.adjusted_mode.crtc_vtotal); return (0); } else { } if (current_config->base.adjusted_mode.crtc_vblank_start != pipe_config->base.adjusted_mode.crtc_vblank_start) { drm_err("mismatch in base.adjusted_mode.crtc_vblank_start (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_vblank_start, pipe_config->base.adjusted_mode.crtc_vblank_start); return (0); } else { } if (current_config->base.adjusted_mode.crtc_vblank_end != pipe_config->base.adjusted_mode.crtc_vblank_end) { drm_err("mismatch in base.adjusted_mode.crtc_vblank_end (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_vblank_end, pipe_config->base.adjusted_mode.crtc_vblank_end); return (0); } else { } if (current_config->base.adjusted_mode.crtc_vsync_start != pipe_config->base.adjusted_mode.crtc_vsync_start) { drm_err("mismatch in base.adjusted_mode.crtc_vsync_start (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_vsync_start, pipe_config->base.adjusted_mode.crtc_vsync_start); return (0); } else { } if (current_config->base.adjusted_mode.crtc_vsync_end != pipe_config->base.adjusted_mode.crtc_vsync_end) { drm_err("mismatch in base.adjusted_mode.crtc_vsync_end (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_vsync_end, pipe_config->base.adjusted_mode.crtc_vsync_end); return (0); } else { } if (current_config->pixel_multiplier != pipe_config->pixel_multiplier) { drm_err("mismatch in pixel_multiplier (expected %i, found %i)\n", current_config->pixel_multiplier, pipe_config->pixel_multiplier); return (0); } else { } if ((int )current_config->has_hdmi_sink != (int )pipe_config->has_hdmi_sink) { drm_err("mismatch in has_hdmi_sink (expected %i, found %i)\n", (int )current_config->has_hdmi_sink, (int )pipe_config->has_hdmi_sink); return (0); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 7U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { goto _L; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { _L: /* CIL Label */ if ((int )current_config->limited_color_range != (int )pipe_config->limited_color_range) { drm_err("mismatch in limited_color_range (expected %i, found %i)\n", (int )current_config->limited_color_range, (int )pipe_config->limited_color_range); return (0); } else { } } else { } } if ((int )current_config->has_infoframe != (int )pipe_config->has_infoframe) { drm_err("mismatch in has_infoframe (expected %i, found %i)\n", (int )current_config->has_infoframe, (int )pipe_config->has_infoframe); return (0); } else { } if ((int )current_config->has_audio != (int )pipe_config->has_audio) { drm_err("mismatch in has_audio (expected %i, found %i)\n", (int )current_config->has_audio, (int )pipe_config->has_audio); return (0); } else { } if (((current_config->base.adjusted_mode.flags ^ pipe_config->base.adjusted_mode.flags) & 16U) != 0U) { drm_err("mismatch in base.adjusted_mode.flags(DRM_MODE_FLAG_INTERLACE) (expected %i, found %i)\n", current_config->base.adjusted_mode.flags & 16U, pipe_config->base.adjusted_mode.flags & 16U); return (0); } else { } if (((current_config->quirks | pipe_config->quirks) & 1UL) == 0UL) { if ((int )(current_config->base.adjusted_mode.flags ^ pipe_config->base.adjusted_mode.flags) & 1) { drm_err("mismatch in base.adjusted_mode.flags(DRM_MODE_FLAG_PHSYNC) (expected %i, found %i)\n", current_config->base.adjusted_mode.flags & 1U, pipe_config->base.adjusted_mode.flags & 1U); return (0); } else { } if (((current_config->base.adjusted_mode.flags ^ pipe_config->base.adjusted_mode.flags) & 2U) != 0U) { drm_err("mismatch in base.adjusted_mode.flags(DRM_MODE_FLAG_NHSYNC) (expected %i, found %i)\n", current_config->base.adjusted_mode.flags & 2U, pipe_config->base.adjusted_mode.flags & 2U); return (0); } else { } if (((current_config->base.adjusted_mode.flags ^ pipe_config->base.adjusted_mode.flags) & 4U) != 0U) { drm_err("mismatch in base.adjusted_mode.flags(DRM_MODE_FLAG_PVSYNC) (expected %i, found %i)\n", current_config->base.adjusted_mode.flags & 4U, pipe_config->base.adjusted_mode.flags & 4U); return (0); } else { } if (((current_config->base.adjusted_mode.flags ^ pipe_config->base.adjusted_mode.flags) & 8U) != 0U) { drm_err("mismatch in base.adjusted_mode.flags(DRM_MODE_FLAG_NVSYNC) (expected %i, found %i)\n", current_config->base.adjusted_mode.flags & 8U, pipe_config->base.adjusted_mode.flags & 8U); return (0); } else { } } else { } if (current_config->pipe_src_w != pipe_config->pipe_src_w) { drm_err("mismatch in pipe_src_w (expected %i, found %i)\n", current_config->pipe_src_w, pipe_config->pipe_src_w); return (0); } else { } if (current_config->pipe_src_h != pipe_config->pipe_src_h) { drm_err("mismatch in pipe_src_h (expected %i, found %i)\n", current_config->pipe_src_h, pipe_config->pipe_src_h); return (0); } else { } if (((current_config->quirks | pipe_config->quirks) & 2UL) == 0UL) { if (current_config->gmch_pfit.control != pipe_config->gmch_pfit.control) { drm_err("mismatch in gmch_pfit.control (expected %i, found %i)\n", current_config->gmch_pfit.control, pipe_config->gmch_pfit.control); return (0); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) <= 3U) { if (current_config->gmch_pfit.pgm_ratios != pipe_config->gmch_pfit.pgm_ratios) { drm_err("mismatch in gmch_pfit.pgm_ratios (expected %i, found %i)\n", current_config->gmch_pfit.pgm_ratios, pipe_config->gmch_pfit.pgm_ratios); return (0); } else { } } else { } if (current_config->gmch_pfit.lvds_border_bits != pipe_config->gmch_pfit.lvds_border_bits) { drm_err("mismatch in gmch_pfit.lvds_border_bits (expected %i, found %i)\n", current_config->gmch_pfit.lvds_border_bits, pipe_config->gmch_pfit.lvds_border_bits); return (0); } else { } } else { } if ((int )current_config->pch_pfit.enabled != (int )pipe_config->pch_pfit.enabled) { drm_err("mismatch in pch_pfit.enabled (expected %i, found %i)\n", (int )current_config->pch_pfit.enabled, (int )pipe_config->pch_pfit.enabled); return (0); } else { } if ((int )current_config->pch_pfit.enabled) { if (current_config->pch_pfit.pos != pipe_config->pch_pfit.pos) { drm_err("mismatch in pch_pfit.pos (expected %i, found %i)\n", current_config->pch_pfit.pos, pipe_config->pch_pfit.pos); return (0); } else { } if (current_config->pch_pfit.size != pipe_config->pch_pfit.size) { drm_err("mismatch in pch_pfit.size (expected %i, found %i)\n", current_config->pch_pfit.size, pipe_config->pch_pfit.size); return (0); } else { } } else { } if (current_config->scaler_state.scaler_id != pipe_config->scaler_state.scaler_id) { drm_err("mismatch in scaler_state.scaler_id (expected %i, found %i)\n", current_config->scaler_state.scaler_id, pipe_config->scaler_state.scaler_id); return (0); } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { if ((int )current_config->ips_enabled != (int )pipe_config->ips_enabled) { drm_err("mismatch in ips_enabled (expected %i, found %i)\n", (int )current_config->ips_enabled, (int )pipe_config->ips_enabled); return (0); } else { } } else { } if ((int )current_config->double_wide != (int )pipe_config->double_wide) { drm_err("mismatch in double_wide (expected %i, found %i)\n", (int )current_config->double_wide, (int )pipe_config->double_wide); return (0); } else { } if (current_config->ddi_pll_sel != pipe_config->ddi_pll_sel) { drm_err("mismatch in ddi_pll_sel (expected 0x%08x, found 0x%08x)\n", current_config->ddi_pll_sel, pipe_config->ddi_pll_sel); return (0); } else { } if ((int )current_config->shared_dpll != (int )pipe_config->shared_dpll) { drm_err("mismatch in shared_dpll (expected %i, found %i)\n", (int )current_config->shared_dpll, (int )pipe_config->shared_dpll); return (0); } else { } if (current_config->dpll_hw_state.dpll != pipe_config->dpll_hw_state.dpll) { drm_err("mismatch in dpll_hw_state.dpll (expected 0x%08x, found 0x%08x)\n", current_config->dpll_hw_state.dpll, pipe_config->dpll_hw_state.dpll); return (0); } else { } if (current_config->dpll_hw_state.dpll_md != pipe_config->dpll_hw_state.dpll_md) { drm_err("mismatch in dpll_hw_state.dpll_md (expected 0x%08x, found 0x%08x)\n", current_config->dpll_hw_state.dpll_md, pipe_config->dpll_hw_state.dpll_md); return (0); } else { } if (current_config->dpll_hw_state.fp0 != pipe_config->dpll_hw_state.fp0) { drm_err("mismatch in dpll_hw_state.fp0 (expected 0x%08x, found 0x%08x)\n", current_config->dpll_hw_state.fp0, pipe_config->dpll_hw_state.fp0); return (0); } else { } if (current_config->dpll_hw_state.fp1 != pipe_config->dpll_hw_state.fp1) { drm_err("mismatch in dpll_hw_state.fp1 (expected 0x%08x, found 0x%08x)\n", current_config->dpll_hw_state.fp1, pipe_config->dpll_hw_state.fp1); return (0); } else { } if (current_config->dpll_hw_state.wrpll != pipe_config->dpll_hw_state.wrpll) { drm_err("mismatch in dpll_hw_state.wrpll (expected 0x%08x, found 0x%08x)\n", current_config->dpll_hw_state.wrpll, pipe_config->dpll_hw_state.wrpll); return (0); } else { } if (current_config->dpll_hw_state.ctrl1 != pipe_config->dpll_hw_state.ctrl1) { drm_err("mismatch in dpll_hw_state.ctrl1 (expected 0x%08x, found 0x%08x)\n", current_config->dpll_hw_state.ctrl1, pipe_config->dpll_hw_state.ctrl1); return (0); } else { } if (current_config->dpll_hw_state.cfgcr1 != pipe_config->dpll_hw_state.cfgcr1) { drm_err("mismatch in dpll_hw_state.cfgcr1 (expected 0x%08x, found 0x%08x)\n", current_config->dpll_hw_state.cfgcr1, pipe_config->dpll_hw_state.cfgcr1); return (0); } else { } if (current_config->dpll_hw_state.cfgcr2 != pipe_config->dpll_hw_state.cfgcr2) { drm_err("mismatch in dpll_hw_state.cfgcr2 (expected 0x%08x, found 0x%08x)\n", current_config->dpll_hw_state.cfgcr2, pipe_config->dpll_hw_state.cfgcr2); return (0); } else { } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 44UL) != 0U) { goto _L___1; } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 4U) { _L___1: /* CIL Label */ if (current_config->pipe_bpp != pipe_config->pipe_bpp) { drm_err("mismatch in pipe_bpp (expected %i, found %i)\n", current_config->pipe_bpp, pipe_config->pipe_bpp); return (0); } else { } } else { } } tmp = intel_fuzzy_clock_check(current_config->base.adjusted_mode.crtc_clock, pipe_config->base.adjusted_mode.crtc_clock); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { drm_err("mismatch in base.adjusted_mode.crtc_clock (expected %i, found %i)\n", current_config->base.adjusted_mode.crtc_clock, pipe_config->base.adjusted_mode.crtc_clock); return (0); } else { } tmp___1 = intel_fuzzy_clock_check(current_config->port_clock, pipe_config->port_clock); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { drm_err("mismatch in port_clock (expected %i, found %i)\n", current_config->port_clock, pipe_config->port_clock); return (0); } else { } return (1); } } static void check_wm_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct skl_ddb_allocation hw_ddb ; struct skl_ddb_allocation *sw_ddb ; struct intel_crtc *intel_crtc ; int plane ; struct drm_i915_private *__p ; struct list_head const *__mptr ; struct skl_ddb_entry *hw_entry ; struct skl_ddb_entry *sw_entry ; enum pipe pipe ; bool tmp ; struct drm_i915_private *__p___0 ; bool tmp___0 ; struct list_head const *__mptr___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 8U) { return; } else { } skl_ddb_get_hw_state(dev_priv, & hw_ddb); sw_ddb = & dev_priv->wm.__annonCompField83.skl_hw.ddb; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; intel_crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_55646; ldv_55645: pipe = intel_crtc->pipe; if (! intel_crtc->active) { goto ldv_55634; } else { } plane = 0; goto ldv_55643; ldv_55642: hw_entry = (struct skl_ddb_entry *)(& hw_ddb.plane) + ((unsigned long )pipe + (unsigned long )plane); sw_entry = (struct skl_ddb_entry *)(& sw_ddb->plane) + ((unsigned long )pipe + (unsigned long )plane); tmp = skl_ddb_entry_equal((struct skl_ddb_entry const *)hw_entry, (struct skl_ddb_entry const *)sw_entry); if ((int )tmp) { goto ldv_55641; } else { } drm_err("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", (int )pipe + 65, plane + 1, (int )sw_entry->start, (int )sw_entry->end, (int )hw_entry->start, (int )hw_entry->end); ldv_55641: plane = plane + 1; ldv_55643: __p___0 = dev_priv; if ((int )__p___0->info.num_sprites[(int )pipe] + 1 > plane) { goto ldv_55642; } else { } hw_entry = (struct skl_ddb_entry *)(& hw_ddb.cursor) + (unsigned long )pipe; sw_entry = (struct skl_ddb_entry *)(& sw_ddb->cursor) + (unsigned long )pipe; tmp___0 = skl_ddb_entry_equal((struct skl_ddb_entry const *)hw_entry, (struct skl_ddb_entry const *)sw_entry); if ((int )tmp___0) { goto ldv_55634; } else { } drm_err("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", (int )pipe + 65, (int )sw_entry->start, (int )sw_entry->end, (int )hw_entry->start, (int )hw_entry->end); ldv_55634: __mptr___0 = (struct list_head const *)intel_crtc->base.head.next; intel_crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_55646: ; if ((unsigned long )(& intel_crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_55645; } else { } return; } } static void check_connector_state(struct drm_device *dev ) { struct intel_connector *connector ; struct list_head const *__mptr ; int __ret_warn_on ; int __ret_warn_on___0 ; long tmp ; long tmp___0 ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_55661; ldv_55660: intel_connector_check_state(connector); __ret_warn_on = (unsigned long )(& (connector->new_encoder)->base) != (unsigned long )connector->base.encoder; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12193, "connector\'s staged encoder doesn\'t match current encoder\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("connector\'s staged encoder doesn\'t match current encoder\n"); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __mptr___0 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_55661: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_55660; } else { } return; } } static void check_encoder_state(struct drm_device *dev ) { struct intel_encoder *encoder ; struct intel_connector *connector ; struct list_head const *__mptr ; bool enabled ; bool active ; enum pipe pipe ; enum pipe tracked_pipe ; long tmp ; int __ret_warn_on ; int __ret_warn_on___0 ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___1 ; int __ret_warn_on___2 ; long tmp___2 ; long tmp___3 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int __ret_warn_on___3 ; int __ret_warn_on___4 ; long tmp___4 ; long tmp___5 ; int __ret_warn_on___5 ; int __ret_warn_on___6 ; long tmp___6 ; long tmp___7 ; int __ret_warn_on___7 ; int __ret_warn_on___8 ; long tmp___8 ; long tmp___9 ; int __ret_warn_on___9 ; int __ret_warn_on___10 ; long tmp___10 ; long tmp___11 ; struct drm_crtc const *__mptr___2 ; int __ret_warn_on___11 ; int __ret_warn_on___12 ; long tmp___12 ; long tmp___13 ; struct list_head const *__mptr___3 ; { __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_55717; ldv_55716: enabled = 0; active = 0; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("check_encoder_state", "[ENCODER:%d:%s]\n", encoder->base.base.id, encoder->base.name); } else { } __ret_warn_on = (unsigned long )(& (encoder->new_crtc)->base) != (unsigned long )encoder->base.crtc; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12213, "encoder\'s stage crtc doesn\'t match current crtc\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("encoder\'s stage crtc doesn\'t match current crtc\n"); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___1 = (int )encoder->connectors_active && (unsigned long )encoder->base.crtc == (unsigned long )((struct drm_crtc *)0); tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12215, "encoder\'s active_connectors set, but no crtc\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("encoder\'s active_connectors set, but no crtc\n"); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); __mptr___0 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; goto ldv_55691; ldv_55690: ; if ((unsigned long )connector->base.encoder != (unsigned long )(& encoder->base)) { goto ldv_55689; } else { } enabled = 1; if (connector->base.dpms != 3) { active = 1; } else { } ldv_55689: __mptr___1 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_55691: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_55690; } else { } if (! enabled && encoder->base.encoder_type == 7) { goto ldv_55693; } else { } __ret_warn_on___3 = ((unsigned long )encoder->base.crtc != (unsigned long )((struct drm_crtc *)0)) != (int )enabled; tmp___5 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___5 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___4 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12235, "encoder\'s enabled state mismatch (expected %i, found %i)\n", (unsigned long )encoder->base.crtc != (unsigned long )((struct drm_crtc *)0), (int )enabled); } else { } ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); } else { drm_err("encoder\'s enabled state mismatch (expected %i, found %i)\n", (unsigned long )encoder->base.crtc != (unsigned long )((struct drm_crtc *)0), (int )enabled); } } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); __ret_warn_on___5 = (int )active && (unsigned long )encoder->base.crtc == (unsigned long )((struct drm_crtc *)0); tmp___7 = ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); if (tmp___7 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___6 = 1; tmp___6 = ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12237, "active encoder with no crtc\n"); } else { } ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); } else { drm_err("active encoder with no crtc\n"); } } else { } ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); __ret_warn_on___7 = (int )encoder->connectors_active != (int )active; tmp___9 = ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); if (tmp___9 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___8 = 1; tmp___8 = ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12241, "encoder\'s computed active state doesn\'t match tracked active state (expected %i, found %i)\n", (int )active, (int )encoder->connectors_active); } else { } ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); } else { drm_err("encoder\'s computed active state doesn\'t match tracked active state (expected %i, found %i)\n", (int )active, (int )encoder->connectors_active); } } else { } ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); active = (*(encoder->get_hw_state))(encoder, & pipe); __ret_warn_on___9 = (int )encoder->connectors_active != (int )active; tmp___11 = ldv__builtin_expect(__ret_warn_on___9 != 0, 0L); if (tmp___11 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___10 = 1; tmp___10 = ldv__builtin_expect(__ret_warn_on___10 != 0, 0L); if (tmp___10 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12247, "encoder\'s hw state doesn\'t match sw tracking (expected %i, found %i)\n", (int )encoder->connectors_active, (int )active); } else { } ldv__builtin_expect(__ret_warn_on___10 != 0, 0L); } else { drm_err("encoder\'s hw state doesn\'t match sw tracking (expected %i, found %i)\n", (int )encoder->connectors_active, (int )active); } } else { } ldv__builtin_expect(__ret_warn_on___9 != 0, 0L); if ((unsigned long )encoder->base.crtc == (unsigned long )((struct drm_crtc *)0)) { goto ldv_55693; } else { } __mptr___2 = (struct drm_crtc const *)encoder->base.crtc; tracked_pipe = ((struct intel_crtc *)__mptr___2)->pipe; __ret_warn_on___11 = (int )active && (int )pipe != (int )tracked_pipe; tmp___13 = ldv__builtin_expect(__ret_warn_on___11 != 0, 0L); if (tmp___13 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___12 = 1; tmp___12 = ldv__builtin_expect(__ret_warn_on___12 != 0, 0L); if (tmp___12 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12256, "active encoder\'s pipe doesn\'t match(expected %i, found %i)\n", (int )tracked_pipe, (int )pipe); } else { } ldv__builtin_expect(__ret_warn_on___12 != 0, 0L); } else { drm_err("active encoder\'s pipe doesn\'t match(expected %i, found %i)\n", (int )tracked_pipe, (int )pipe); } } else { } ldv__builtin_expect(__ret_warn_on___11 != 0, 0L); ldv_55693: __mptr___3 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___3 + 0xfffffffffffffff8UL; ldv_55717: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_55716; } else { } return; } } static void check_crtc_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct intel_encoder *encoder ; struct intel_crtc_state pipe_config ; struct list_head const *__mptr ; bool enabled ; bool active ; long tmp ; int __ret_warn_on ; int __ret_warn_on___0 ; long tmp___0 ; long tmp___1 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int __ret_warn_on___1 ; int __ret_warn_on___2 ; long tmp___2 ; long tmp___3 ; int __ret_warn_on___3 ; int __ret_warn_on___4 ; long tmp___4 ; long tmp___5 ; struct list_head const *__mptr___2 ; enum pipe pipe ; bool tmp___6 ; struct list_head const *__mptr___3 ; int __ret_warn_on___5 ; int __ret_warn_on___6 ; long tmp___7 ; long tmp___8 ; int __ret_warn_on___7 ; int __ret_warn_on___8 ; long tmp___9 ; long tmp___10 ; bool tmp___11 ; int tmp___12 ; struct list_head const *__mptr___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_55771; ldv_55770: enabled = 0; active = 0; memset((void *)(& pipe_config), 0, 752UL); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("check_crtc_state", "[CRTC:%d]\n", crtc->base.base.id); } else { } __ret_warn_on = (int )crtc->active && ! (crtc->base.state)->enable; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12279, "active crtc, but not enabled in sw tracking\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { drm_err("active crtc, but not enabled in sw tracking\n"); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_55743; ldv_55742: ; if ((unsigned long )encoder->base.crtc != (unsigned long )(& crtc->base)) { goto ldv_55741; } else { } enabled = 1; if ((int )encoder->connectors_active) { active = 1; } else { } ldv_55741: __mptr___1 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_55743: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_55742; } else { } __ret_warn_on___1 = (int )crtc->active != (int )active; tmp___3 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___3 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12291, "crtc\'s computed active state doesn\'t match tracked active state (expected %i, found %i)\n", (int )active, (int )crtc->active); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("crtc\'s computed active state doesn\'t match tracked active state (expected %i, found %i)\n", (int )active, (int )crtc->active); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); __ret_warn_on___3 = (int )(crtc->base.state)->enable != (int )enabled; tmp___5 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___5 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___4 = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12295, "crtc\'s computed enabled state doesn\'t match tracked enabled state (expected %i, found %i)\n", (int )enabled, (int )(crtc->base.state)->enable); } else { } ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); } else { drm_err("crtc\'s computed enabled state doesn\'t match tracked enabled state (expected %i, found %i)\n", (int )enabled, (int )(crtc->base.state)->enable); } } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); active = (*(dev_priv->display.get_pipe_config))(crtc, & pipe_config); if (((int )crtc->pipe == 0 && (int )dev_priv->quirks & 1) || ((int )crtc->pipe == 1 && (dev_priv->quirks & 16UL) != 0UL)) { active = crtc->active; } else { } __mptr___2 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; goto ldv_55760; ldv_55759: ; if ((unsigned long )encoder->base.crtc != (unsigned long )(& crtc->base)) { goto ldv_55758; } else { } tmp___6 = (*(encoder->get_hw_state))(encoder, & pipe); if ((int )tmp___6) { (*(encoder->get_config))(encoder, & pipe_config); } else { } ldv_55758: __mptr___3 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___3 + 0xfffffffffffffff8UL; ldv_55760: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_55759; } else { } __ret_warn_on___5 = (int )crtc->active != (int )active; tmp___8 = ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); if (tmp___8 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___6 = 1; tmp___7 = ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); if (tmp___7 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12315, "crtc active state doesn\'t match with hw state (expected %i, found %i)\n", (int )crtc->active, (int )active); } else { } ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); } else { drm_err("crtc active state doesn\'t match with hw state (expected %i, found %i)\n", (int )crtc->active, (int )active); } } else { } ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); if ((int )active) { tmp___11 = intel_pipe_config_compare(dev, crtc->config, & pipe_config); if (tmp___11) { tmp___12 = 0; } else { tmp___12 = 1; } if (tmp___12) { __ret_warn_on___7 = 1; tmp___10 = ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); if (tmp___10 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___8 = 1; tmp___9 = ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12319, "pipe state doesn\'t match!\n"); } else { } ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); } else { drm_err("pipe state doesn\'t match!\n"); } } else { } ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); intel_dump_pipe_config(crtc, & pipe_config, "[hw state]"); intel_dump_pipe_config(crtc, crtc->config, "[sw state]"); } else { } } else { } __mptr___4 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___4 + 0xfffffffffffffff0UL; ldv_55771: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_55770; } else { } return; } } static void check_shared_dpll_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct intel_dpll_hw_state dpll_hw_state ; int i ; struct intel_shared_dpll *pll ; int enabled_crtcs ; int active_crtcs ; bool active ; long tmp ; int __ret_warn_on ; unsigned int tmp___0 ; int __ret_warn_on___0 ; unsigned int tmp___1 ; long tmp___2 ; unsigned int tmp___3 ; long tmp___4 ; int __ret_warn_on___1 ; int __ret_warn_on___2 ; long tmp___5 ; long tmp___6 ; int __ret_warn_on___3 ; int __ret_warn_on___4 ; long tmp___7 ; long tmp___8 ; int __ret_warn_on___5 ; int __ret_warn_on___6 ; long tmp___9 ; long tmp___10 ; struct list_head const *__mptr ; struct intel_shared_dpll *tmp___11 ; struct intel_shared_dpll *tmp___12 ; struct list_head const *__mptr___0 ; int __ret_warn_on___7 ; int __ret_warn_on___8 ; long tmp___13 ; long tmp___14 ; int __ret_warn_on___9 ; unsigned int tmp___15 ; int __ret_warn_on___10 ; unsigned int tmp___16 ; long tmp___17 ; unsigned int tmp___18 ; long tmp___19 ; int __ret_warn_on___11 ; int tmp___20 ; int tmp___21 ; int __ret_warn_on___12 ; long tmp___22 ; long tmp___23 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_55821; ldv_55820: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; enabled_crtcs = 0; active_crtcs = 0; memset((void *)(& dpll_hw_state), 0, 68UL); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("check_shared_dpll_state", "%s\n", pll->name); } else { } active = (*(pll->get_hw_state))(dev_priv, pll, & dpll_hw_state); tmp___0 = __arch_hweight32(pll->config.crtc_mask); __ret_warn_on = (unsigned int )pll->active > tmp___0; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { tmp___1 = __arch_hweight32(pll->config.crtc_mask); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12349, "more active pll users than references: %i vs %i\n", pll->active, tmp___1); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___3 = __arch_hweight32(pll->config.crtc_mask); drm_err("more active pll users than references: %i vs %i\n", pll->active, tmp___3); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___1 = pll->active != 0 && ! pll->on; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___2 = 1; tmp___5 = ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12351, "pll in active use but not on in sw tracking\n"); } else { } ldv__builtin_expect(__ret_warn_on___2 != 0, 0L); } else { drm_err("pll in active use but not on in sw tracking\n"); } } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); __ret_warn_on___3 = (int )pll->on && pll->active == 0; tmp___8 = ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); if (tmp___8 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___4 = 1; tmp___7 = ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); if (tmp___7 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12353, "pll in on but not on in use in sw tracking\n"); } else { } ldv__builtin_expect(__ret_warn_on___4 != 0, 0L); } else { drm_err("pll in on but not on in use in sw tracking\n"); } } else { } ldv__builtin_expect(__ret_warn_on___3 != 0, 0L); __ret_warn_on___5 = (int )pll->on != (int )active; tmp___10 = ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); if (tmp___10 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___6 = 1; tmp___9 = ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12356, "pll on state mismatch (expected %i, found %i)\n", (int )pll->on, (int )active); } else { } ldv__builtin_expect(__ret_warn_on___6 != 0, 0L); } else { drm_err("pll on state mismatch (expected %i, found %i)\n", (int )pll->on, (int )active); } } else { } ldv__builtin_expect(__ret_warn_on___5 != 0, 0L); __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_55806; ldv_55805: ; if ((int )(crtc->base.state)->enable) { tmp___11 = intel_crtc_to_shared_dpll(crtc); if ((unsigned long )tmp___11 == (unsigned long )pll) { enabled_crtcs = enabled_crtcs + 1; } else { } } else { } if ((int )crtc->active) { tmp___12 = intel_crtc_to_shared_dpll(crtc); if ((unsigned long )tmp___12 == (unsigned long )pll) { active_crtcs = active_crtcs + 1; } else { } } else { } __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_55806: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_55805; } else { } __ret_warn_on___7 = pll->active != active_crtcs; tmp___14 = ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); if (tmp___14 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___8 = 1; tmp___13 = ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); if (tmp___13 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12366, "pll active crtcs mismatch (expected %i, found %i)\n", pll->active, active_crtcs); } else { } ldv__builtin_expect(__ret_warn_on___8 != 0, 0L); } else { drm_err("pll active crtcs mismatch (expected %i, found %i)\n", pll->active, active_crtcs); } } else { } ldv__builtin_expect(__ret_warn_on___7 != 0, 0L); tmp___15 = __arch_hweight32(pll->config.crtc_mask); __ret_warn_on___9 = tmp___15 != (unsigned int )enabled_crtcs; tmp___19 = ldv__builtin_expect(__ret_warn_on___9 != 0, 0L); if (tmp___19 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___10 = 1; tmp___17 = ldv__builtin_expect(__ret_warn_on___10 != 0, 0L); if (tmp___17 != 0L) { tmp___16 = __arch_hweight32(pll->config.crtc_mask); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12369, "pll enabled crtcs mismatch (expected %i, found %i)\n", tmp___16, enabled_crtcs); } else { } ldv__builtin_expect(__ret_warn_on___10 != 0, 0L); } else { tmp___18 = __arch_hweight32(pll->config.crtc_mask); drm_err("pll enabled crtcs mismatch (expected %i, found %i)\n", tmp___18, enabled_crtcs); } } else { } ldv__builtin_expect(__ret_warn_on___9 != 0, 0L); if ((int )pll->on) { tmp___20 = memcmp((void const *)(& pll->config.hw_state), (void const *)(& dpll_hw_state), 68UL); if (tmp___20 != 0) { tmp___21 = 1; } else { tmp___21 = 0; } } else { tmp___21 = 0; } __ret_warn_on___11 = tmp___21; tmp___23 = ldv__builtin_expect(__ret_warn_on___11 != 0, 0L); if (tmp___23 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___12 = 1; tmp___22 = ldv__builtin_expect(__ret_warn_on___12 != 0, 0L); if (tmp___22 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12373, "pll hw state mismatch\n"); } else { } ldv__builtin_expect(__ret_warn_on___12 != 0, 0L); } else { drm_err("pll hw state mismatch\n"); } } else { } ldv__builtin_expect(__ret_warn_on___11 != 0, 0L); i = i + 1; ldv_55821: ; if (dev_priv->num_shared_dpll > i) { goto ldv_55820; } else { } return; } } void intel_modeset_check_state(struct drm_device *dev ) { { check_wm_state(dev); check_connector_state(dev); check_encoder_state(dev); check_crtc_state(dev); check_shared_dpll_state(dev); return; } } void ironlake_check_encoder_dotclock(struct intel_crtc_state const *pipe_config , int dotclock ) { int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; { tmp = intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12396, "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", pipe_config->base.adjusted_mode.crtc_clock, dotclock); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void update_scanline_offset(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_display_mode const *mode ; int vtotal ; struct drm_i915_private *__p ; bool tmp ; struct drm_i915_private *__p___0 ; { dev = crtc->base.dev; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 2U) { mode = (struct drm_display_mode const *)(& (crtc->config)->base.adjusted_mode); vtotal = mode->crtc_vtotal; if (((unsigned int )mode->flags & 16U) != 0U) { vtotal = vtotal / 2; } else { } crtc->scanline_offset = vtotal + -1; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { tmp = intel_pipe_has_type(crtc, 6); if ((int )tmp) { crtc->scanline_offset = 2; } else { crtc->scanline_offset = 1; } } else { crtc->scanline_offset = 1; } } return; } } static struct intel_crtc_state *intel_modeset_compute_config(struct drm_crtc *crtc , struct drm_atomic_state *state ) { struct intel_crtc_state *pipe_config ; int ret ; void *tmp ; void *tmp___0 ; struct drm_crtc const *__mptr ; bool tmp___1 ; void *tmp___2 ; struct drm_crtc const *__mptr___0 ; struct drm_crtc const *__mptr___1 ; void *tmp___3 ; { ret = 0; ret = drm_atomic_add_affected_connectors(state, crtc); if (ret != 0) { tmp = ERR_PTR((long )ret); return ((struct intel_crtc_state *)tmp); } else { } ret = drm_atomic_helper_check_modeset(state->dev, state); if (ret != 0) { tmp___0 = ERR_PTR((long )ret); return ((struct intel_crtc_state *)tmp___0); } else { } __mptr = (struct drm_crtc const *)crtc; pipe_config = intel_atomic_get_crtc_state(state, (struct intel_crtc *)__mptr); tmp___1 = IS_ERR((void const *)pipe_config); if ((int )tmp___1) { return (pipe_config); } else { } if (! pipe_config->base.enable) { return (pipe_config); } else { } ret = intel_modeset_pipe_config(crtc, state, pipe_config); if (ret != 0) { tmp___2 = ERR_PTR((long )ret); return ((struct intel_crtc_state *)tmp___2); } else { } __mptr___0 = (struct drm_crtc const *)crtc; if ((int )pipe_config->has_audio != (int )(((struct intel_crtc *)__mptr___0)->config)->has_audio) { pipe_config->base.mode_changed = 1; } else { } __mptr___1 = (struct drm_crtc const *)crtc; intel_dump_pipe_config((struct intel_crtc *)__mptr___1, pipe_config, "[modeset]"); ret = drm_atomic_helper_check_planes(state->dev, state); if (ret != 0) { tmp___3 = ERR_PTR((long )ret); return ((struct intel_crtc_state *)tmp___3); } else { } return (pipe_config); } } static int __intel_set_mode_setup_plls(struct drm_atomic_state *state ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; unsigned int clear_pipes ; struct intel_crtc *intel_crtc ; struct intel_crtc_state *intel_crtc_state ; struct drm_crtc *crtc ; struct drm_crtc_state *crtc_state ; int ret ; int i ; struct drm_crtc const *__mptr ; struct drm_crtc_state const *__mptr___0 ; bool tmp___0 ; bool tmp___1 ; int tmp___2 ; struct drm_crtc const *__mptr___1 ; struct drm_crtc_state const *__mptr___2 ; { dev = state->dev; tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; clear_pipes = 0U; ret = 0; if ((unsigned long )dev_priv->display.crtc_compute_clock == (unsigned long )((int (*)(struct intel_crtc * , struct intel_crtc_state * ))0)) { return (0); } else { } i = 0; goto ldv_55879; ldv_55878: ; if ((unsigned long )crtc_state != (unsigned long )((struct drm_crtc_state *)0)) { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; __mptr___0 = (struct drm_crtc_state const *)crtc_state; intel_crtc_state = (struct intel_crtc_state *)__mptr___0; tmp___0 = needs_modeset(crtc_state); if ((int )tmp___0) { clear_pipes = (unsigned int )(1 << (int )intel_crtc->pipe) | clear_pipes; intel_crtc_state->shared_dpll = -1; } else { } } else { } i = i + 1; ldv_55879: ; if ((state->dev)->mode_config.num_crtc > i) { crtc = *(state->crtcs + (unsigned long )i); crtc_state = *(state->crtc_states + (unsigned long )i); goto ldv_55878; } else { } ret = intel_shared_dpll_start_config(dev_priv, clear_pipes); if (ret != 0) { goto done; } else { } i = 0; goto ldv_55888; ldv_55887: ; if ((unsigned long )crtc_state != (unsigned long )((struct drm_crtc_state *)0)) { tmp___1 = needs_modeset(crtc_state); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2 || ! crtc_state->enable) { goto ldv_55882; } else { } __mptr___1 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___1; __mptr___2 = (struct drm_crtc_state const *)crtc_state; intel_crtc_state = (struct intel_crtc_state *)__mptr___2; ret = (*(dev_priv->display.crtc_compute_clock))(intel_crtc, intel_crtc_state); if (ret != 0) { intel_shared_dpll_abort_config(dev_priv); goto done; } else { } } else { } ldv_55882: i = i + 1; ldv_55888: ; if ((state->dev)->mode_config.num_crtc > i) { crtc = *(state->crtcs + (unsigned long )i); crtc_state = *(state->crtc_states + (unsigned long )i); goto ldv_55887; } else { } done: ; return (ret); } } static int __intel_set_mode_checks(struct drm_atomic_state *state ) { struct drm_device *dev ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = state->dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 9U) { _L: /* CIL Label */ ret = valleyview_modeset_global_pipes(state); if (ret != 0) { return (ret); } else { } } else { } } else { } } ret = __intel_set_mode_setup_plls(state); if (ret != 0) { return (ret); } else { } return (0); } } static int __intel_set_mode(struct drm_crtc *modeset_crtc , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_atomic_state *state ; struct drm_crtc *crtc ; struct drm_crtc_state *crtc_state ; int ret ; int i ; bool tmp ; int tmp___0 ; bool tmp___1 ; bool tmp___2 ; int tmp___3 ; struct drm_crtc const *__mptr ; { dev = modeset_crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; state = pipe_config->base.state; ret = 0; ret = __intel_set_mode_checks(state); if (ret < 0) { return (ret); } else { } ret = drm_atomic_helper_prepare_planes(dev, state); if (ret != 0) { return (ret); } else { } i = 0; goto ldv_55926; ldv_55925: ; if ((unsigned long )crtc_state != (unsigned long )((struct drm_crtc_state *)0)) { tmp = needs_modeset(crtc_state); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_55924; } else { } if (! crtc_state->enable) { intel_crtc_disable(crtc); } else if ((int )(crtc->state)->enable) { intel_crtc_disable_planes(crtc); (*(dev_priv->display.crtc_disable))(crtc); } else { } } else { } ldv_55924: i = i + 1; ldv_55926: ; if ((state->dev)->mode_config.num_crtc > i) { crtc = *(state->crtcs + (unsigned long )i); crtc_state = *(state->crtc_states + (unsigned long )i); goto ldv_55925; } else { } if ((int )pipe_config->base.enable) { tmp___1 = needs_modeset(& pipe_config->base); if ((int )tmp___1) { modeset_crtc->mode = pipe_config->base.mode; drm_calc_timestamping_constants(modeset_crtc, (struct drm_display_mode const *)(& pipe_config->base.adjusted_mode)); } else { } } else { } intel_modeset_update_state(state); modeset_update_crtc_power_domains(state); drm_atomic_helper_commit_planes(dev, state); i = 0; goto ldv_55932; ldv_55931: ; if ((unsigned long )crtc_state != (unsigned long )((struct drm_crtc_state *)0)) { tmp___2 = needs_modeset(crtc->state); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3 || ! (crtc->state)->enable) { goto ldv_55928; } else { } __mptr = (struct drm_crtc const *)crtc; update_scanline_offset((struct intel_crtc *)__mptr); (*(dev_priv->display.crtc_enable))(crtc); intel_crtc_enable_planes(crtc); } else { } ldv_55928: i = i + 1; ldv_55932: ; if ((state->dev)->mode_config.num_crtc > i) { crtc = *(state->crtcs + (unsigned long )i); crtc_state = *(state->crtc_states + (unsigned long )i); goto ldv_55931; } else { } drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_state_free(state); return (0); } } static int intel_set_mode_with_config(struct drm_crtc *crtc , struct intel_crtc_state *pipe_config , bool force_restore ) { int ret ; { ret = __intel_set_mode(crtc, pipe_config); if (ret == 0 && (int )force_restore) { intel_modeset_update_staged_output_state(crtc->dev); intel_modeset_check_state(crtc->dev); } else { } return (ret); } } static int intel_set_mode(struct drm_crtc *crtc , struct drm_atomic_state *state , bool force_restore ) { struct intel_crtc_state *pipe_config ; int ret ; long tmp ; bool tmp___0 ; { ret = 0; pipe_config = intel_modeset_compute_config(crtc, state); tmp___0 = IS_ERR((void const *)pipe_config); if ((int )tmp___0) { tmp = PTR_ERR((void const *)pipe_config); ret = (int )tmp; goto out; } else { } ret = intel_set_mode_with_config(crtc, pipe_config, (int )force_restore); if (ret != 0) { } else { } out: ; return (ret); } } void intel_crtc_restore_mode(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_atomic_state *state ; struct intel_encoder *encoder ; struct intel_connector *connector ; struct drm_connector_state *connector_state ; struct intel_crtc_state *crtc_state ; int ret ; long tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; long tmp___0 ; long tmp___1 ; bool tmp___2 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct drm_crtc const *__mptr___3 ; long tmp___3 ; long tmp___4 ; bool tmp___5 ; bool tmp___6 ; struct drm_crtc const *__mptr___4 ; { dev = crtc->dev; state = drm_atomic_state_alloc(dev); if ((unsigned long )state == (unsigned long )((struct drm_atomic_state *)0)) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_crtc_restore_mode", "[CRTC:%d] mode restore failed, out of memory", crtc->base.id); } else { } return; } else { } state->acquire_ctx = dev->mode_config.acquire_ctx; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_55973; ldv_55972: ; if ((unsigned long )(& (encoder->new_crtc)->base) != (unsigned long )crtc) { goto ldv_55963; } else { } __mptr___0 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; goto ldv_55970; ldv_55969: ; if ((unsigned long )connector->new_encoder != (unsigned long )encoder) { goto ldv_55968; } else { } connector_state = drm_atomic_get_connector_state(state, & connector->base); tmp___2 = IS_ERR((void const *)connector_state); if ((int )tmp___2) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { tmp___0 = PTR_ERR((void const *)connector_state); drm_ut_debug_printk("intel_crtc_restore_mode", "Failed to add [CONNECTOR:%d:%s] to state: %ld\n", connector->base.base.id, connector->base.name, tmp___0); } else { } goto ldv_55968; } else { } connector_state->crtc = crtc; connector_state->best_encoder = & encoder->base; ldv_55968: __mptr___1 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_55970: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_55969; } else { } ldv_55963: __mptr___2 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; ldv_55973: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_55972; } else { } __mptr___3 = (struct drm_crtc const *)crtc; crtc_state = intel_atomic_get_crtc_state(state, (struct intel_crtc *)__mptr___3); tmp___5 = IS_ERR((void const *)crtc_state); if ((int )tmp___5) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { tmp___3 = PTR_ERR((void const *)crtc_state); drm_ut_debug_printk("intel_crtc_restore_mode", "Failed to add [CRTC:%d] to state: %ld\n", crtc->base.id, tmp___3); } else { } drm_atomic_state_free(state); return; } else { } __mptr___4 = (struct drm_crtc const *)crtc; tmp___6 = ((struct intel_crtc *)__mptr___4)->new_enabled; crtc_state->base.enable = tmp___6; crtc_state->base.active = tmp___6; drm_mode_copy(& crtc_state->base.mode, (struct drm_display_mode const *)(& crtc->mode)); intel_modeset_setup_plane_state(state, crtc, & crtc->mode, (crtc->primary)->fb, crtc->x, crtc->y); ret = intel_set_mode(crtc, state, 0); if (ret != 0) { drm_atomic_state_free(state); } else { } return; } } static bool intel_connector_in_mode_set(struct intel_connector *connector , struct drm_mode_set *set ) { int ro ; { ro = 0; goto ldv_55985; ldv_55984: ; if ((unsigned long )*(set->connectors + (unsigned long )ro) == (unsigned long )(& connector->base)) { return (1); } else { } ro = ro + 1; ldv_55985: ; if ((size_t )ro < set->num_connectors) { goto ldv_55984; } else { } return (0); } } static int intel_modeset_stage_output_state(struct drm_device *dev , struct drm_mode_set *set , struct drm_atomic_state *state ) { struct intel_connector *connector ; struct drm_connector *drm_connector ; struct drm_connector_state *connector_state ; struct drm_crtc *crtc ; struct drm_crtc_state *crtc_state ; int i ; int ret ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; long tmp___0 ; struct list_head const *__mptr ; bool in_mode_set ; bool tmp___1 ; long tmp___2 ; bool tmp___3 ; int pipe ; struct drm_crtc const *__mptr___0 ; struct intel_encoder *tmp___4 ; long tmp___5 ; struct list_head const *__mptr___1 ; struct drm_connector const *__mptr___2 ; struct drm_crtc *crtc___0 ; long tmp___6 ; bool tmp___7 ; bool tmp___8 ; bool tmp___9 ; int tmp___10 ; long tmp___11 ; struct drm_encoder const *__mptr___3 ; bool has_connectors ; int tmp___12 ; bool tmp___13 ; long tmp___14 ; bool tmp___15 ; { __ret_warn_on = (unsigned long )set->fb == (unsigned long )((struct drm_framebuffer *)0) && set->num_connectors != 0UL; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12777, "WARN_ON(!set->fb && (set->num_connectors != 0))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (unsigned long )set->fb != (unsigned long )((struct drm_framebuffer *)0) && set->num_connectors == 0UL; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12778, "WARN_ON(set->fb && (set->num_connectors == 0))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_56014; ldv_56013: tmp___1 = intel_connector_in_mode_set(connector, set); in_mode_set = tmp___1; if (! in_mode_set && (unsigned long )(connector->base.state)->crtc != (unsigned long )set->crtc) { goto ldv_56008; } else { } connector_state = drm_atomic_get_connector_state(state, & connector->base); tmp___3 = IS_ERR((void const *)connector_state); if ((int )tmp___3) { tmp___2 = PTR_ERR((void const *)connector_state); return ((int )tmp___2); } else { } if ((int )in_mode_set) { __mptr___0 = (struct drm_crtc const *)set->crtc; pipe = ((struct intel_crtc *)__mptr___0)->pipe; tmp___4 = intel_find_encoder(connector, pipe); connector_state->best_encoder = & tmp___4->base; } else { } if ((unsigned long )(connector->base.state)->crtc != (unsigned long )set->crtc) { goto ldv_56008; } else { } if ((unsigned long )set->fb == (unsigned long )((struct drm_framebuffer *)0) || ! in_mode_set) { connector_state->best_encoder = (struct drm_encoder *)0; tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_modeset_stage_output_state", "[CONNECTOR:%d:%s] to [NOCRTC]\n", connector->base.base.id, connector->base.name); } else { } } else { } ldv_56008: __mptr___1 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_56014: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_56013; } else { } i = 0; goto ldv_56023; ldv_56022: ; if ((unsigned long )drm_connector != (unsigned long )((struct drm_connector *)0)) { __mptr___2 = (struct drm_connector const *)drm_connector; connector = (struct intel_connector *)__mptr___2; if ((unsigned long )connector_state->best_encoder == (unsigned long )((struct drm_encoder *)0)) { ret = drm_atomic_set_crtc_for_connector(connector_state, (struct drm_crtc *)0); if (ret != 0) { return (ret); } else { } goto ldv_56018; } else { } tmp___8 = intel_connector_in_mode_set(connector, set); if ((int )tmp___8) { crtc___0 = (connector->base.state)->crtc; if ((unsigned long )crtc___0 != (unsigned long )((struct drm_crtc *)0)) { crtc_state = drm_atomic_get_crtc_state(state, crtc___0); tmp___7 = IS_ERR((void const *)crtc_state); if ((int )tmp___7) { tmp___6 = PTR_ERR((void const *)crtc_state); return ((int )tmp___6); } else { } } else { } ret = drm_atomic_set_crtc_for_connector(connector_state, set->crtc); if (ret != 0) { return (ret); } else { } } else { } tmp___9 = drm_encoder_crtc_ok(connector_state->best_encoder, connector_state->crtc); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } if (tmp___10) { return (-22); } else { } tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("intel_modeset_stage_output_state", "[CONNECTOR:%d:%s] to [CRTC:%d]\n", connector->base.base.id, connector->base.name, (connector_state->crtc)->base.id); } else { } if ((unsigned long )connector_state->best_encoder != (unsigned long )(& (connector->encoder)->base)) { __mptr___3 = (struct drm_encoder const *)connector_state->best_encoder; connector->encoder = (struct intel_encoder *)__mptr___3; } else { } } else { } ldv_56018: i = i + 1; ldv_56023: ; if (state->num_connector > i) { drm_connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_56022; } else { } i = 0; goto ldv_56027; ldv_56026: ; if ((unsigned long )crtc_state != (unsigned long )((struct drm_crtc_state *)0)) { ret = drm_atomic_add_affected_connectors(state, crtc); if (ret != 0) { return (ret); } else { } tmp___12 = drm_atomic_connectors_for_crtc(state, crtc); has_connectors = tmp___12 != 0; if ((int )crtc_state->enable != (int )has_connectors) { tmp___13 = has_connectors; crtc_state->active = tmp___13; crtc_state->enable = tmp___13; } else { } } else { } i = i + 1; ldv_56027: ; if ((state->dev)->mode_config.num_crtc > i) { crtc = *(state->crtcs + (unsigned long )i); crtc_state = *(state->crtc_states + (unsigned long )i); goto ldv_56026; } else { } ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode, set->fb, (int )set->x, (int )set->y); if (ret != 0) { return (ret); } else { } crtc_state = drm_atomic_get_crtc_state(state, set->crtc); tmp___15 = IS_ERR((void const *)crtc_state); if ((int )tmp___15) { tmp___14 = PTR_ERR((void const *)crtc_state); return ((int )tmp___14); } else { } if ((unsigned long )set->mode != (unsigned long )((struct drm_display_mode *)0)) { drm_mode_copy(& crtc_state->mode, (struct drm_display_mode const *)set->mode); } else { } if (set->num_connectors != 0UL) { crtc_state->active = 1; } else { } return (0); } } static bool primary_plane_visible(struct drm_crtc *crtc ) { struct intel_plane_state *plane_state ; struct drm_plane_state const *__mptr ; { __mptr = (struct drm_plane_state const *)(crtc->primary)->state; plane_state = (struct intel_plane_state *)__mptr; return (plane_state->visible); } } static int intel_crtc_set_config(struct drm_mode_set *set ) { struct drm_device *dev ; struct drm_atomic_state *state ; struct intel_crtc_state *pipe_config ; bool primary_plane_was_visible ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; bool tmp___7 ; struct drm_crtc const *__mptr ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr___0 ; int __ret_warn_on ; long tmp___8 ; bool tmp___9 ; bool tmp___10 ; int tmp___11 ; long tmp___12 ; { state = (struct drm_atomic_state *)0; tmp = ldv__builtin_expect((unsigned long )set == (unsigned long )((struct drm_mode_set *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (12906), "i" (12UL)); ldv_56043: ; goto ldv_56043; } else { } tmp___0 = ldv__builtin_expect((unsigned long )set->crtc == (unsigned long )((struct drm_crtc *)0), 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (12907), "i" (12UL)); ldv_56044: ; goto ldv_56044; } else { } tmp___1 = ldv__builtin_expect((unsigned long )(set->crtc)->helper_private == (unsigned long )((void const *)0), 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (12908), "i" (12UL)); ldv_56045: ; goto ldv_56045; } else { } tmp___2 = ldv__builtin_expect((long )((unsigned long )set->mode == (unsigned long )((struct drm_display_mode *)0) && (unsigned long )set->fb != (unsigned long )((struct drm_framebuffer *)0)), 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (12911), "i" (12UL)); ldv_56046: ; goto ldv_56046; } else { } tmp___3 = ldv__builtin_expect((long )((unsigned long )set->fb != (unsigned long )((struct drm_framebuffer *)0) && set->num_connectors == 0UL), 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (12912), "i" (12UL)); ldv_56047: ; goto ldv_56047; } else { } if ((unsigned long )set->fb != (unsigned long )((struct drm_framebuffer *)0)) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_crtc_set_config", "[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", (set->crtc)->base.id, (set->fb)->base.id, (int )set->num_connectors, set->x, set->y); } else { } } else { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_crtc_set_config", "[CRTC:%d] [NOFB]\n", (set->crtc)->base.id); } else { } } dev = (set->crtc)->dev; state = drm_atomic_state_alloc(dev); if ((unsigned long )state == (unsigned long )((struct drm_atomic_state *)0)) { return (-12); } else { } state->acquire_ctx = dev->mode_config.acquire_ctx; ret = intel_modeset_stage_output_state(dev, set, state); if (ret != 0) { goto out; } else { } pipe_config = intel_modeset_compute_config(set->crtc, state); tmp___7 = IS_ERR((void const *)pipe_config); if ((int )tmp___7) { tmp___6 = PTR_ERR((void const *)pipe_config); ret = (int )tmp___6; goto out; } else { } __mptr = (struct drm_crtc const *)set->crtc; intel_update_pipe_size((struct intel_crtc *)__mptr); primary_plane_was_visible = primary_plane_visible(set->crtc); ret = intel_set_mode_with_config(set->crtc, pipe_config, 1); if ((ret == 0 && (int )pipe_config->base.enable) && (int )pipe_config->base.planes_changed) { tmp___10 = needs_modeset(& pipe_config->base); if (tmp___10) { tmp___11 = 0; } else { tmp___11 = 1; } if (tmp___11) { __mptr___0 = (struct drm_crtc const *)set->crtc; intel_crtc = (struct intel_crtc *)__mptr___0; if (ret == 0 && ! primary_plane_was_visible) { tmp___9 = primary_plane_visible(set->crtc); if ((int )tmp___9) { __ret_warn_on = ! intel_crtc->active; tmp___8 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 12958, "WARN_ON(!intel_crtc->active)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); intel_post_enable_primary(set->crtc); } else { } } else { } if ((int )i915.fastboot && ret == 0) { intel_modeset_check_state((set->crtc)->dev); } else { } } else { } } else { } if (ret != 0) { tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("intel_crtc_set_config", "failed to set mode on [CRTC:%d], err = %d\n", (set->crtc)->base.id, ret); } else { } } else { } out: ; if (ret != 0) { drm_atomic_state_free(state); } else { } return (ret); } } static struct drm_crtc_funcs const intel_crtc_funcs = {0, 0, 0, 0, 0, 0, & intel_crtc_gamma_set, & intel_crtc_destroy, & intel_crtc_set_config, & intel_crtc_page_flip, 0, & intel_crtc_duplicate_state, & intel_crtc_destroy_state, 0, 0}; static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll , struct intel_dpll_hw_state *hw_state ) { uint32_t val ; bool tmp ; int tmp___0 ; { tmp = intel_display_power_is_enabled(dev_priv, 23); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 811028L : 811032L, 1); hw_state->dpll = val; hw_state->fp0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 811072L : 811080L, 1); hw_state->fp1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 811076L : 811084L, 1); return ((val & 2147483648U) != 0U); } } static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll ) { { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (int )pll->id == 0 ? 811072L : 811080L, pll->config.hw_state.fp0, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (int )pll->id == 0 ? 811076L : 811084L, pll->config.hw_state.fp1, 1); return; } } static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll ) { { ibx_assert_pch_refclk_enabled(dev_priv); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (int )pll->id == 0 ? 811028L : 811032L, pll->config.hw_state.dpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 811028L : 811032L, 0); __const_udelay(644250UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (int )pll->id == 0 ? 811028L : 811032L, pll->config.hw_state.dpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 811028L : 811032L, 0); __const_udelay(859000UL); return; } } static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll ) { struct drm_device *dev ; struct intel_crtc *crtc ; struct list_head const *__mptr ; struct intel_shared_dpll *tmp ; struct list_head const *__mptr___0 ; { dev = dev_priv->dev; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_56083; ldv_56082: tmp = intel_crtc_to_shared_dpll(crtc); if ((unsigned long )tmp == (unsigned long )pll) { assert_pch_transcoder_disabled(dev_priv, crtc->pipe); } else { } __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_56083: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_56082; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (int )pll->id == 0 ? 811028L : 811032L, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 811028L : 811032L, 0); __const_udelay(859000UL); return; } } static char *ibx_pch_dpll_names[2U] = { (char *)"PCH DPLL A", (char *)"PCH DPLL B"}; static void ibx_pch_dpll_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int i ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->num_shared_dpll = 2; i = 0; goto ldv_56092; ldv_56091: dev_priv->shared_dplls[i].id = (enum intel_dpll_id )i; dev_priv->shared_dplls[i].name = (char const *)ibx_pch_dpll_names[i]; dev_priv->shared_dplls[i].mode_set = & ibx_pch_dpll_mode_set; dev_priv->shared_dplls[i].enable = & ibx_pch_dpll_enable; dev_priv->shared_dplls[i].disable = & ibx_pch_dpll_disable; dev_priv->shared_dplls[i].get_hw_state = & ibx_pch_dpll_get_hw_state; i = i + 1; ldv_56092: ; if (dev_priv->num_shared_dpll > i) { goto ldv_56091; } else { } return; } } static void intel_shared_dpll_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 46UL) != 0U) { intel_ddi_pll_init(dev); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U) { ibx_pch_dpll_init(dev); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { ibx_pch_dpll_init(dev); } else { dev_priv->num_shared_dpll = 0; } } } tmp = ldv__builtin_expect(dev_priv->num_shared_dpll > 3, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (13091), "i" (12UL)); ldv_56116: ; goto ldv_56116; } else { } return; } } bool intel_wm_need_update(struct drm_plane *plane , struct drm_plane_state *state ) { { if ((((unsigned long )(plane->state)->fb == (unsigned long )((struct drm_framebuffer *)0) || (unsigned long )state->fb == (unsigned long )((struct drm_framebuffer *)0)) || ((plane->state)->fb)->modifier[0] != (state->fb)->modifier[0]) || (plane->state)->rotation != state->rotation) { return (1); } else { } return (0); } } int intel_prepare_plane_fb(struct drm_plane *plane , struct drm_framebuffer *fb , struct drm_plane_state const *new_state ) { struct drm_device *dev ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; enum pipe pipe ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr___0 ; struct drm_i915_gem_object *tmp ; struct drm_i915_gem_object *old_obj ; struct drm_framebuffer const *__mptr___1 ; struct drm_i915_gem_object *tmp___0 ; unsigned int frontbuffer_bits ; int ret ; int align ; struct drm_i915_private *__p ; long tmp___1 ; struct drm_i915_private *__p___0 ; { dev = plane->dev; __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; pipe = intel_plane->pipe; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___0 = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr___0)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; if ((unsigned long )plane->fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___1 = (struct drm_framebuffer const *)plane->fb; tmp___0 = ((struct intel_framebuffer *)__mptr___1)->obj; } else { tmp___0 = (struct drm_i915_gem_object *)0; } old_obj = tmp___0; frontbuffer_bits = 0U; ret = 0; if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { return (0); } else { } switch ((unsigned int )plane->type) { case 1U: frontbuffer_bits = (unsigned int )(1 << (int )pipe * 4); goto ldv_56140; case 2U: frontbuffer_bits = (unsigned int )(1 << ((int )pipe * 4 + 1)); goto ldv_56140; case 0U: frontbuffer_bits = (unsigned int )(1 << ((int )pipe * 4 + 2)); goto ldv_56140; } ldv_56140: mutex_lock_nested(& dev->struct_mutex, 0U); if ((unsigned int )plane->type == 2U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) != 0U) { __p = to_i915((struct drm_device const *)dev); align = (unsigned int )((unsigned short )__p->info.device_id) == 13687U ? 16384 : 256; ret = i915_gem_object_attach_phys(obj, align); if (ret != 0) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_prepare_plane_fb", "failed to attach phys object\n"); } else { } } else { } } else { ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, (struct intel_engine_cs *)0); } } else { ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, (struct intel_engine_cs *)0); } if (ret == 0) { i915_gem_track_fb(old_obj, obj, frontbuffer_bits); } else { } mutex_unlock(& dev->struct_mutex); return (ret); } } void intel_cleanup_plane_fb(struct drm_plane *plane , struct drm_framebuffer *fb , struct drm_plane_state const *old_state ) { struct drm_device *dev ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p ; { dev = plane->dev; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; __ret_warn_on = (unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 13191, "WARN_ON(!obj)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return; } else { } if ((unsigned int )plane->type != 2U) { mutex_lock_nested(& dev->struct_mutex, 0U); intel_unpin_fb_obj(fb, old_state); mutex_unlock(& dev->struct_mutex); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { mutex_lock_nested(& dev->struct_mutex, 0U); intel_unpin_fb_obj(fb, old_state); mutex_unlock(& dev->struct_mutex); } else { } } return; } } int skl_max_scale(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state ) { int max_scale ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int crtc_clock ; int cdclk ; int _min1 ; int _min2 ; { if ((unsigned long )intel_crtc == (unsigned long )((struct intel_crtc *)0) || (unsigned long )crtc_state == (unsigned long )((struct intel_crtc_state *)0)) { return (65536); } else { } dev = intel_crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; cdclk = (*(dev_priv->display.get_display_clock_speed))(dev); if (crtc_clock == 0 || cdclk == 0) { return (65536); } else { } _min1 = 196607; _min2 = ((cdclk << 8) / crtc_clock) * 256; max_scale = _min1 < _min2 ? _min1 : _min2; return (max_scale); } } static int intel_check_primary_plane(struct drm_plane *plane , struct intel_plane_state *state ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct intel_crtc_state *crtc_state ; struct drm_framebuffer *fb ; struct drm_rect *dest ; struct drm_rect *src ; struct drm_rect const *clip ; bool can_position ; int max_scale ; int min_scale ; int ret ; struct drm_crtc const *__mptr ; struct intel_crtc_state *tmp ; struct drm_plane const *__mptr___0 ; struct drm_i915_private *__p ; struct intel_plane_state *old_state ; struct drm_plane_state const *__mptr___1 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; bool tmp___0 ; struct drm_plane const *__mptr___2 ; struct drm_i915_private *__p___4 ; { dev = plane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = state->base.crtc; fb = state->base.fb; dest = & state->dst; src = & state->src; clip = (struct drm_rect const *)(& state->clip); can_position = 0; max_scale = 65536; min_scale = 65536; crtc = (unsigned long )crtc == (unsigned long )((struct drm_crtc *)0) ? plane->crtc : crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; if ((unsigned long )state->base.state != (unsigned long )((struct drm_atomic_state *)0)) { tmp = intel_atomic_get_crtc_state(state->base.state, intel_crtc); crtc_state = tmp; } else { crtc_state = (struct intel_crtc_state *)0; } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { __mptr___0 = (struct drm_plane const *)plane; if (((struct intel_plane *)__mptr___0)->ckey.flags == 1U) { min_scale = 1; max_scale = skl_max_scale(intel_crtc, crtc_state); } else { } can_position = 1; } else { } ret = drm_plane_helper_check_update(plane, crtc, fb, src, dest, clip, min_scale, max_scale, (int )can_position, 1, & state->visible); if (ret != 0) { return (ret); } else { } if ((int )intel_crtc->active) { __mptr___1 = (struct drm_plane_state const *)plane->state; old_state = (struct intel_plane_state *)__mptr___1; intel_crtc->atomic.wait_for_flips = 1; if ((int )state->visible) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 4U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) == 0U) { if ((unsigned long )dev_priv->fbc.crtc == (unsigned long )intel_crtc) { if (state->base.rotation != 1U) { intel_crtc->atomic.disable_fbc = 1; } else { } } else { } } else { } } else { } } else { } if ((int )state->visible && ! old_state->visible) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { intel_crtc->atomic.wait_vblank = 1; } else { } } else { } } else { } if (! state->visible || (unsigned long )fb == (unsigned long )((struct drm_framebuffer *)0)) { intel_crtc->atomic.disable_ips = 1; } else { } intel_crtc->atomic.fb_bits = intel_crtc->atomic.fb_bits | (unsigned int )(1 << (int )intel_crtc->pipe * 4); intel_crtc->atomic.update_fbc = 1; tmp___0 = intel_wm_need_update(plane, & state->base); if ((int )tmp___0) { intel_crtc->atomic.update_wm = 1; } else { } } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 8U) { __mptr___2 = (struct drm_plane const *)plane; ret = skl_update_scaler_users(intel_crtc, crtc_state, (struct intel_plane *)__mptr___2, state, 0); if (ret != 0) { return (ret); } else { } } else { } return (0); } } static void intel_commit_primary_plane(struct drm_plane *plane , struct intel_plane_state *state ) { struct drm_crtc *crtc ; struct drm_framebuffer *fb ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_rect *src ; struct drm_crtc const *__mptr ; { crtc = state->base.crtc; fb = state->base.fb; dev = plane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; src = & state->src; crtc = (unsigned long )crtc == (unsigned long )((struct drm_crtc *)0) ? plane->crtc : crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; plane->fb = fb; crtc->x = src->x1 >> 16; crtc->y = src->y1 >> 16; if ((int )intel_crtc->active) { if ((int )state->visible) { intel_update_pipe_size(intel_crtc); } else { } (*(dev_priv->display.update_primary_plane))(crtc, plane->fb, crtc->x, crtc->y); } else { } return; } } static void intel_disable_primary_plane(struct drm_plane *plane , struct drm_crtc *crtc , bool force ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { dev = plane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->display.update_primary_plane))(crtc, (struct drm_framebuffer *)0, 0, 0); return; } } static void intel_begin_crtc_commit(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_plane *intel_plane ; struct drm_plane *p ; unsigned int fb_bits ; struct list_head const *__mptr___0 ; struct drm_plane const *__mptr___1 ; struct drm_framebuffer const *__mptr___2 ; struct drm_i915_gem_object *tmp ; unsigned int tmp___0 ; struct list_head const *__mptr___3 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; fb_bits = 0U; __mptr___0 = (struct list_head const *)dev->mode_config.plane_list.next; p = (struct drm_plane *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_56291; ldv_56290: __mptr___1 = (struct drm_plane const *)p; intel_plane = (struct intel_plane *)__mptr___1; tmp___0 = drm_plane_index(p); if ((intel_crtc->atomic.disabled_planes & (unsigned int )(1 << (int )tmp___0)) != 0U) { switch ((unsigned int )p->type) { case 1U: fb_bits = (unsigned int )(1 << (int )intel_plane->pipe * 4); goto ldv_56285; case 2U: fb_bits = (unsigned int )(1 << ((int )intel_plane->pipe * 4 + 1)); goto ldv_56285; case 0U: fb_bits = (unsigned int )(1 << ((int )intel_plane->pipe * 4 + 2)); goto ldv_56285; } ldv_56285: mutex_lock_nested(& dev->struct_mutex, 0U); if ((unsigned long )p->fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___2 = (struct drm_framebuffer const *)p->fb; tmp = ((struct intel_framebuffer *)__mptr___2)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } i915_gem_track_fb(tmp, (struct drm_i915_gem_object *)0, fb_bits); mutex_unlock(& dev->struct_mutex); } else { } __mptr___3 = (struct list_head const *)p->head.next; p = (struct drm_plane *)__mptr___3 + 0xfffffffffffffff8UL; ldv_56291: ; if ((unsigned long )(& p->head) != (unsigned long )(& dev->mode_config.plane_list)) { goto ldv_56290; } else { } if ((int )intel_crtc->atomic.wait_for_flips) { intel_crtc_wait_for_pending_flips(crtc); } else { } if ((int )intel_crtc->atomic.disable_fbc) { intel_fbc_disable(dev); } else { } if ((int )intel_crtc->atomic.disable_ips) { hsw_disable_ips(intel_crtc); } else { } if ((int )intel_crtc->atomic.pre_disable_primary) { intel_pre_disable_primary(crtc); } else { } if ((int )intel_crtc->atomic.update_wm) { intel_update_watermarks(crtc); } else { } intel_runtime_pm_get(dev_priv); if ((int )intel_crtc->active) { intel_crtc->atomic.evade = intel_pipe_update_start(intel_crtc, & intel_crtc->atomic.start_vbl_count); } else { } return; } } static void intel_finish_crtc_commit(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_plane *p ; struct list_head const *__mptr___0 ; unsigned int tmp ; struct list_head const *__mptr___1 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; if ((int )intel_crtc->atomic.evade) { intel_pipe_update_end(intel_crtc, intel_crtc->atomic.start_vbl_count); } else { } intel_runtime_pm_put(dev_priv); if ((int )intel_crtc->atomic.wait_vblank) { intel_wait_for_vblank(dev, (int )intel_crtc->pipe); } else { } intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits); if ((int )intel_crtc->atomic.update_fbc) { mutex_lock_nested(& dev->struct_mutex, 0U); intel_fbc_update(dev); mutex_unlock(& dev->struct_mutex); } else { } if ((int )intel_crtc->atomic.post_enable_primary) { intel_post_enable_primary(crtc); } else { } __mptr___0 = (struct list_head const *)dev->mode_config.plane_list.next; p = (struct drm_plane *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_56307; ldv_56306: ; if ((unsigned int )p->type == 0U) { tmp = drm_plane_index(p); if ((intel_crtc->atomic.update_sprite_watermarks & tmp) != 0U) { intel_update_sprite_watermarks(p, crtc, 0U, 0U, 0, 0, 0); } else { } } else { } __mptr___1 = (struct list_head const *)p->head.next; p = (struct drm_plane *)__mptr___1 + 0xfffffffffffffff8UL; ldv_56307: ; if ((unsigned long )(& p->head) != (unsigned long )(& dev->mode_config.plane_list)) { goto ldv_56306; } else { } memset((void *)(& intel_crtc->atomic), 0, 32UL); return; } } void intel_plane_destroy(struct drm_plane *plane ) { struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; { __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; drm_plane_cleanup(plane); kfree((void const *)intel_plane); return; } } struct drm_plane_funcs const intel_plane_funcs = {& drm_atomic_helper_update_plane, & drm_atomic_helper_disable_plane, & intel_plane_destroy, 0, & drm_atomic_helper_plane_set_property, & intel_plane_duplicate_state, & intel_plane_destroy_state, & intel_plane_atomic_set_property, & intel_plane_atomic_get_property}; static struct drm_plane *intel_primary_plane_create(struct drm_device *dev , int pipe ) { struct intel_plane *primary ; struct intel_plane_state *state ; uint32_t const *intel_primary_formats ; int num_formats ; void *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { tmp = kzalloc(808UL, 208U); primary = (struct intel_plane *)tmp; if ((unsigned long )primary == (unsigned long )((struct intel_plane *)0)) { return ((struct drm_plane *)0); } else { } state = intel_create_plane_state(& primary->base); if ((unsigned long )state == (unsigned long )((struct intel_plane_state *)0)) { kfree((void const *)primary); return ((struct drm_plane *)0); } else { } primary->base.state = & state->base; primary->can_scale = 0; primary->max_downscale = 1; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { primary->can_scale = 1; state->scaler_id = -1; } else { } primary->pipe = (enum pipe )pipe; primary->plane = pipe; primary->check_plane = & intel_check_primary_plane; primary->commit_plane = & intel_commit_primary_plane; primary->disable_plane = & intel_disable_primary_plane; primary->ckey.flags = 1U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) <= 3U) { primary->plane = pipe == 0; } else { } } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) > 8U) { intel_primary_formats = (uint32_t const *)(& skl_primary_formats); num_formats = 8; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 3U) { intel_primary_formats = (uint32_t const *)(& i965_primary_formats); num_formats = 6; } else { intel_primary_formats = (uint32_t const *)(& i8xx_primary_formats); num_formats = 4; } } drm_universal_plane_init(dev, & primary->base, 0UL, & intel_plane_funcs, intel_primary_formats, (uint32_t )num_formats, 1); __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 3U) { intel_create_rotation_property(dev, primary); } else { } drm_plane_helper_add(& primary->base, & intel_plane_helper_funcs); return (& primary->base); } } void intel_create_rotation_property(struct drm_device *dev , struct intel_plane *plane ) { unsigned long flags ; struct drm_i915_private *__p ; { if ((unsigned long )dev->mode_config.rotation_property == (unsigned long )((struct drm_property *)0)) { flags = 5UL; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { flags = flags | 10UL; } else { } dev->mode_config.rotation_property = drm_mode_create_rotation_property(dev, (unsigned int )flags); } else { } if ((unsigned long )dev->mode_config.rotation_property != (unsigned long )((struct drm_property *)0)) { drm_object_attach_property(& plane->base.base, dev->mode_config.rotation_property, (uint64_t )(plane->base.state)->rotation); } else { } return; } } static int intel_check_cursor_plane(struct drm_plane *plane , struct intel_plane_state *state ) { struct drm_crtc *crtc ; struct drm_device *dev ; struct drm_framebuffer *fb ; struct drm_rect *dest ; struct drm_rect *src ; struct drm_rect const *clip ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; struct intel_crtc *intel_crtc ; unsigned int stride ; int ret ; struct drm_crtc const *__mptr___0 ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; unsigned long tmp___3 ; long tmp___4 ; long tmp___5 ; { crtc = state->base.crtc; dev = plane->dev; fb = state->base.fb; dest = & state->dst; src = & state->src; clip = (struct drm_rect const *)(& state->clip); if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; crtc = (unsigned long )crtc == (unsigned long )((struct drm_crtc *)0) ? plane->crtc : crtc; __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; ret = drm_plane_helper_check_update(plane, crtc, fb, src, dest, clip, 65536, 65536, 1, 1, & state->visible); if (ret != 0) { return (ret); } else { } if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { goto finish; } else { } tmp___1 = cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { tmp___0 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_check_cursor_plane", "Cursor dimension %dx%d not supported\n", state->base.crtc_w, state->base.crtc_h); } else { } return (-22); } else { } tmp___3 = __roundup_pow_of_two((unsigned long )state->base.crtc_w); stride = (unsigned int )tmp___3 * 4U; if (obj->base.size < (size_t )(state->base.crtc_h * stride)) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_check_cursor_plane", "buffer is too small\n"); } else { } return (-12); } else { } if (fb->modifier[0] != 0ULL) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_check_cursor_plane", "cursor cannot be tiled\n"); } else { } ret = -22; } else { } finish: ; if ((int )intel_crtc->active) { if ((plane->state)->crtc_w != state->base.crtc_w) { intel_crtc->atomic.update_wm = 1; } else { } intel_crtc->atomic.fb_bits = intel_crtc->atomic.fb_bits | (unsigned int )(1 << ((int )intel_crtc->pipe * 4 + 1)); } else { } return (ret); } } static void intel_disable_cursor_plane(struct drm_plane *plane , struct drm_crtc *crtc , bool force ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; if (! force) { plane->fb = (struct drm_framebuffer *)0; intel_crtc->cursor_bo = (struct drm_i915_gem_object *)0; intel_crtc->cursor_addr = 0U; } else { } intel_crtc_update_cursor(crtc, 0); return; } } static void intel_commit_cursor_plane(struct drm_plane *plane , struct intel_plane_state *state ) { struct drm_crtc *crtc ; struct drm_device *dev ; struct intel_crtc *intel_crtc ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; uint32_t addr ; struct drm_crtc const *__mptr___0 ; unsigned long tmp___0 ; struct drm_i915_private *__p ; { crtc = state->base.crtc; dev = plane->dev; if ((unsigned long )state->base.fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)state->base.fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; crtc = (unsigned long )crtc == (unsigned long )((struct drm_crtc *)0) ? plane->crtc : crtc; __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; plane->fb = state->base.fb; crtc->cursor_x = state->base.crtc_x; crtc->cursor_y = state->base.crtc_y; if ((unsigned long )intel_crtc->cursor_bo == (unsigned long )obj) { goto update; } else { } if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { addr = 0U; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { tmp___0 = i915_gem_obj_ggtt_offset(obj); addr = (uint32_t )tmp___0; } else { addr = (uint32_t )(obj->__annonCompField84.phys_handle)->busaddr; } } intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = obj; update: ; if ((int )intel_crtc->active) { intel_crtc_update_cursor(crtc, (int )state->visible); } else { } return; } } static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev , int pipe ) { struct intel_plane *cursor ; struct intel_plane_state *state ; void *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { tmp = kzalloc(808UL, 208U); cursor = (struct intel_plane *)tmp; if ((unsigned long )cursor == (unsigned long )((struct intel_plane *)0)) { return ((struct drm_plane *)0); } else { } state = intel_create_plane_state(& cursor->base); if ((unsigned long )state == (unsigned long )((struct intel_plane_state *)0)) { kfree((void const *)cursor); return ((struct drm_plane *)0); } else { } cursor->base.state = & state->base; cursor->can_scale = 0; cursor->max_downscale = 1; cursor->pipe = (enum pipe )pipe; cursor->plane = pipe; cursor->check_plane = & intel_check_cursor_plane; cursor->commit_plane = & intel_commit_cursor_plane; cursor->disable_plane = & intel_disable_cursor_plane; drm_universal_plane_init(dev, & cursor->base, 0UL, & intel_plane_funcs, (uint32_t const *)(& intel_cursor_formats), 1U, 2); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { if ((unsigned long )dev->mode_config.rotation_property == (unsigned long )((struct drm_property *)0)) { dev->mode_config.rotation_property = drm_mode_create_rotation_property(dev, 5U); } else { } if ((unsigned long )dev->mode_config.rotation_property != (unsigned long )((struct drm_property *)0)) { drm_object_attach_property(& cursor->base.base, dev->mode_config.rotation_property, (uint64_t )state->base.rotation); } else { } } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 8U) { state->scaler_id = -1; } else { } drm_plane_helper_add(& cursor->base, & intel_plane_helper_funcs); return (& cursor->base); } } static void skl_init_scalers(struct drm_device *dev , struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state ) { int i ; struct intel_scaler *intel_scaler ; struct intel_crtc_scaler_state *scaler_state ; { scaler_state = & crtc_state->scaler_state; i = 0; goto ldv_56454; ldv_56453: intel_scaler = (struct intel_scaler *)(& scaler_state->scalers) + (unsigned long )i; intel_scaler->in_use = 0; intel_scaler->id = i; intel_scaler->mode = 0U; i = i + 1; ldv_56454: ; if (intel_crtc->num_scalers > i) { goto ldv_56453; } else { } scaler_state->scaler_id = -1; return; } } static void intel_crtc_init(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct intel_crtc_state *crtc_state ; struct drm_plane *primary ; struct drm_plane *cursor ; int i ; int ret ; void *tmp ; void *tmp___0 ; struct drm_i915_private *__p ; long tmp___1 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; long tmp___2 ; int __ret_warn_on ; unsigned int tmp___3 ; long tmp___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; crtc_state = (struct intel_crtc_state *)0; primary = (struct drm_plane *)0; cursor = (struct drm_plane *)0; tmp = kzalloc(2416UL, 208U); intel_crtc = (struct intel_crtc *)tmp; if ((unsigned long )intel_crtc == (unsigned long )((struct intel_crtc *)0)) { return; } else { } tmp___0 = kzalloc(752UL, 208U); crtc_state = (struct intel_crtc_state *)tmp___0; if ((unsigned long )crtc_state == (unsigned long )((struct intel_crtc_state *)0)) { goto fail; } else { } intel_crtc->config = crtc_state; intel_crtc->base.state = & crtc_state->base; crtc_state->base.crtc = & intel_crtc->base; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { if (pipe == 2) { intel_crtc->num_scalers = 1; } else { intel_crtc->num_scalers = 2; } skl_init_scalers(dev, intel_crtc, crtc_state); } else { } primary = intel_primary_plane_create(dev, pipe); if ((unsigned long )primary == (unsigned long )((struct drm_plane *)0)) { goto fail; } else { } cursor = intel_cursor_plane_create(dev, pipe); if ((unsigned long )cursor == (unsigned long )((struct drm_plane *)0)) { goto fail; } else { } ret = drm_crtc_init_with_planes(dev, & intel_crtc->base, primary, cursor, & intel_crtc_funcs); if (ret != 0) { goto fail; } else { } drm_mode_crtc_set_gamma_size(& intel_crtc->base, 256); i = 0; goto ldv_56475; ldv_56474: intel_crtc->lut_r[i] = (u8 )i; intel_crtc->lut_g[i] = (u8 )i; intel_crtc->lut_b[i] = (u8 )i; i = i + 1; ldv_56475: ; if (i <= 255) { goto ldv_56474; } else { } intel_crtc->pipe = (enum pipe )pipe; intel_crtc->plane = (enum plane )pipe; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) <= 3U) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_crtc_init", "swapping pipes & planes for FBC\n"); } else { } intel_crtc->plane = pipe == 0; } else { } } else { } intel_crtc->cursor_base = 4294967295U; intel_crtc->cursor_cntl = 4294967295U; intel_crtc->cursor_size = 4294967295U; tmp___2 = ldv__builtin_expect((long )((unsigned int )pipe > 2U || (unsigned long )dev_priv->plane_to_crtc_mapping[(unsigned int )intel_crtc->plane] != (unsigned long )((struct drm_crtc *)0)), 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c"), "i" (13815), "i" (12UL)); ldv_56492: ; goto ldv_56492; } else { } dev_priv->plane_to_crtc_mapping[(unsigned int )intel_crtc->plane] = & intel_crtc->base; dev_priv->pipe_to_crtc_mapping[(int )intel_crtc->pipe] = & intel_crtc->base; drm_crtc_helper_add(& intel_crtc->base, & intel_helper_funcs); tmp___3 = drm_crtc_index(& intel_crtc->base); __ret_warn_on = tmp___3 != (unsigned int )intel_crtc->pipe; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 13821, "WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; fail: ; if ((unsigned long )primary != (unsigned long )((struct drm_plane *)0)) { drm_plane_cleanup(primary); } else { } if ((unsigned long )cursor != (unsigned long )((struct drm_plane *)0)) { drm_plane_cleanup(cursor); } else { } kfree((void const *)crtc_state); kfree((void const *)intel_crtc); return; } } enum pipe intel_get_pipe_from_connector(struct intel_connector *connector ) { struct drm_encoder *encoder ; struct drm_device *dev ; int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; long tmp___3 ; struct drm_crtc const *__mptr ; { encoder = connector->base.encoder; dev = connector->base.dev; tmp = drm_modeset_is_locked(& dev->mode_config.connection_mutex); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 13838, "WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned long )encoder == (unsigned long )((struct drm_encoder *)0)) { return (-1); } else { __ret_warn_on___0 = (unsigned long )encoder->crtc == (unsigned long )((struct drm_crtc *)0); tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 13840, "WARN_ON(!encoder->crtc)"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { return (-1); } else { } } __mptr = (struct drm_crtc const *)encoder->crtc; return (((struct intel_crtc *)__mptr)->pipe); } } int intel_get_pipe_from_crtc_id(struct drm_device *dev , void *data , struct drm_file *file ) { struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id ; struct drm_crtc *drmmode_crtc ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; { pipe_from_crtc_id = (struct drm_i915_get_pipe_from_crtc_id *)data; drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); if ((unsigned long )drmmode_crtc == (unsigned long )((struct drm_crtc *)0)) { drm_err("no such CRTC id\n"); return (-2); } else { } __mptr = (struct drm_crtc const *)drmmode_crtc; crtc = (struct intel_crtc *)__mptr; pipe_from_crtc_id->pipe = (__u32 )crtc->pipe; return (0); } } static int intel_encoder_clones(struct intel_encoder *encoder ) { struct drm_device *dev ; struct intel_encoder *source_encoder ; int index_mask ; int entry ; struct list_head const *__mptr ; bool tmp ; struct list_head const *__mptr___0 ; { dev = encoder->base.dev; index_mask = 0; entry = 0; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; source_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_56528; ldv_56527: tmp = encoders_cloneable((struct intel_encoder const *)encoder, (struct intel_encoder const *)source_encoder); if ((int )tmp) { index_mask = (1 << entry) | index_mask; } else { } entry = entry + 1; __mptr___0 = (struct list_head const *)source_encoder->base.head.next; source_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_56528: ; if ((unsigned long )(& source_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_56527; } else { } return (index_mask); } } static bool has_edp_a(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; uint32_t tmp ; struct drm_i915_private *__p___0 ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) == 0U) { return (0); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 409600L, 1); if ((tmp & 4U) == 0U) { return (0); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 5U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270356L, 1); if ((tmp___0 & 16777216U) != 0U) { return (0); } else { } } else { } return (1); } } static bool intel_crt_present(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { return (0); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if (((int )__p___1->info.device_id & 65280) == 2560) { return (0); } else { goto _L; } } else { _L: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { __p___4 = to_i915((struct drm_device const *)dev); if (((int )__p___4->info.device_id & 15) == 6) { return (0); } else { __p___5 = to_i915((struct drm_device const *)dev); if (((int )__p___5->info.device_id & 15) == 11) { return (0); } else { __p___6 = to_i915((struct drm_device const *)dev); if (((int )__p___6->info.device_id & 15) == 14) { return (0); } else { } } } } else { } } else { } } __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) != 0U) { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) == 8U) { return (0); } else { } } else { } __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 45UL) != 0U && (unsigned int )*((unsigned char *)dev_priv + 41280UL) == 0U) { return (0); } else { } return (1); } } static void intel_setup_outputs(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_encoder *encoder ; bool dpd_is_edp ; bool tmp ; int found ; uint32_t tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp___1 ; int found___0 ; bool tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; uint32_t tmp___5 ; uint32_t tmp___6 ; uint32_t tmp___7 ; uint32_t tmp___8 ; uint32_t tmp___9 ; uint32_t tmp___10 ; bool tmp___11 ; int tmp___12 ; uint32_t tmp___13 ; bool tmp___14 ; uint32_t tmp___15 ; bool tmp___16 ; int tmp___17 ; uint32_t tmp___18 ; bool tmp___19 ; uint32_t tmp___20 ; uint32_t tmp___21 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; bool found___1 ; long tmp___22 ; long tmp___23 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; uint32_t tmp___24 ; long tmp___25 ; uint32_t tmp___26 ; long tmp___27 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; uint32_t tmp___28 ; struct drm_i915_private *__p___11 ; struct drm_i915_private *__p___12 ; uint32_t tmp___29 ; struct drm_i915_private *__p___13 ; struct drm_i915_private *__p___14 ; struct drm_i915_private *__p___15 ; struct drm_i915_private *__p___16 ; struct drm_i915_private *__p___17 ; struct drm_i915_private *__p___18 ; struct drm_i915_private *__p___19 ; struct drm_i915_private *__p___20 ; struct drm_i915_private *__p___21 ; struct list_head const *__mptr ; int tmp___30 ; struct list_head const *__mptr___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dpd_is_edp = 0; intel_lvds_init(dev); tmp = intel_crt_present(dev); if ((int )tmp) { intel_crt_init(dev); } else { } __p___19 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___19 + 45UL) == 0U) { __p___20 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___20->info.gen) == 9U) { intel_ddi_init(dev, 0); intel_ddi_init(dev, 1); intel_ddi_init(dev, 2); } else { goto _L___3; } } else { _L___3: /* CIL Label */ __p___18 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___18 + 46UL) != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 409600L, 1); found = (int )tmp___0 & 1; if (found != 0) { intel_ddi_init(dev, 0); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___0->dev)->pdev)->revision <= 2U) { intel_ddi_init(dev, 0); } else { } } else { } } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794644L, 1); found = (int )tmp___1; if ((found & 4) != 0) { intel_ddi_init(dev, 1); } else { } if ((found & 2) != 0) { intel_ddi_init(dev, 2); } else { } if (found & 1) { intel_ddi_init(dev, 3); } else { } } else { __p___17 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___17->pch_type != 0U) { dpd_is_edp = intel_dp_is_edp(dev, 3); tmp___2 = has_edp_a(dev); if ((int )tmp___2) { intel_dp_init(dev, 409600, 0); } else { } tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 921920L, 1); if ((tmp___5 & 4U) != 0U) { tmp___3 = intel_sdvo_init(dev, 921920U, 1); found___0 = (int )tmp___3; if (found___0 == 0) { intel_hdmi_init(dev, 921920, 1); } else { } if (found___0 == 0) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 934144L, 1); if ((tmp___4 & 4U) != 0U) { intel_dp_init(dev, 934144, 1); } else { } } else { } } else { } tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 921936L, 1); if ((tmp___6 & 4U) != 0U) { intel_hdmi_init(dev, 921936, 2); } else { } if (! dpd_is_edp) { tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 921952L, 1); if ((tmp___7 & 4U) != 0U) { intel_hdmi_init(dev, 921952, 3); } else { } } else { } tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 934400L, 1); if ((tmp___8 & 4U) != 0U) { intel_dp_init(dev, 934400, 2); } else { } tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 934656L, 1); if ((tmp___9 & 4U) != 0U) { intel_dp_init(dev, 934656, 3); } else { } } else { __p___16 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___16 + 45UL) != 0U) { tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1970496L, 1); if ((tmp___10 & 4U) != 0U) { tmp___11 = intel_dp_is_edp(dev, 1); if (tmp___11) { tmp___12 = 0; } else { tmp___12 = 1; } if (tmp___12) { intel_hdmi_init(dev, 1970496, 1); } else { } } else { } tmp___13 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1982720L, 1); if ((tmp___13 & 4U) != 0U) { intel_dp_init(dev, 1982720, 1); } else { tmp___14 = intel_dp_is_edp(dev, 1); if ((int )tmp___14) { intel_dp_init(dev, 1982720, 1); } else { } } tmp___15 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1970528L, 1); if ((tmp___15 & 4U) != 0U) { tmp___16 = intel_dp_is_edp(dev, 2); if (tmp___16) { tmp___17 = 0; } else { tmp___17 = 1; } if (tmp___17) { intel_hdmi_init(dev, 1970528, 2); } else { } } else { } tmp___18 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1982976L, 1); if ((tmp___18 & 4U) != 0U) { intel_dp_init(dev, 1982976, 2); } else { tmp___19 = intel_dp_is_edp(dev, 2); if ((int )tmp___19) { intel_dp_init(dev, 1982976, 2); } else { } } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { tmp___20 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1970540L, 1); if ((tmp___20 & 4U) != 0U) { intel_hdmi_init(dev, 1970540, 3); } else { } tmp___21 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1983232L, 1); if ((tmp___21 & 4U) != 0U) { intel_dp_init(dev, 1983232, 3); } else { } } else { } } else { } intel_dsi_init(dev); } else { __p___14 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___14->info.gen) != 2U) { __p___15 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___15 + 44UL) == 0U) { found___1 = 0; tmp___24 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397632L, 1); if ((tmp___24 & 4U) != 0U) { tmp___22 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___22 != 0L) { drm_ut_debug_printk("intel_setup_outputs", "probing SDVOB\n"); } else { } found___1 = intel_sdvo_init(dev, 397632U, 1); if (! found___1) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) != 0U) { goto _L; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 5U) { _L: /* CIL Label */ tmp___23 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___23 != 0L) { drm_ut_debug_printk("intel_setup_outputs", "probing HDMI on SDVOB\n"); } else { } intel_hdmi_init(dev, 397632, 1); } else { } } } else { } if (! found___1) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 44UL) != 0U) { intel_dp_init(dev, 409856, 1); } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 5U) { intel_dp_init(dev, 409856, 1); } else { } } } else { } } else { } tmp___26 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397632L, 1); if ((tmp___26 & 4U) != 0U) { tmp___25 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___25 != 0L) { drm_ut_debug_printk("intel_setup_outputs", "probing SDVOC\n"); } else { } found___1 = intel_sdvo_init(dev, 397664U, 0); } else { } if (! found___1) { tmp___28 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397664L, 1); if ((tmp___28 & 4U) != 0U) { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 44UL) != 0U) { goto _L___0; } else { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) == 5U) { _L___0: /* CIL Label */ tmp___27 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___27 != 0L) { drm_ut_debug_printk("intel_setup_outputs", "probing HDMI on SDVOC\n"); } else { } intel_hdmi_init(dev, 397664, 2); } else { } } __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 44UL) != 0U) { intel_dp_init(dev, 410112, 2); } else { __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___10->info.gen) == 5U) { intel_dp_init(dev, 410112, 2); } else { } } } else { } } else { } __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___11 + 44UL) != 0U) { goto _L___1; } else { __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___12->info.gen) == 5U) { _L___1: /* CIL Label */ tmp___29 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 410368L, 1); if ((tmp___29 & 4U) != 0U) { intel_dp_init(dev, 410368, 3); } else { } } else { } } } else { goto _L___2; } } else { _L___2: /* CIL Label */ __p___13 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___13->info.gen) == 2U) { intel_dvo_init(dev); } else { } } } } } } __p___21 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___21 + 46UL) != 0U) { intel_tv_init(dev); } else { } intel_psr_init(dev); __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_56769; ldv_56768: encoder->base.possible_crtcs = (uint32_t )encoder->crtc_mask; tmp___30 = intel_encoder_clones(encoder); encoder->base.possible_clones = (uint32_t )tmp___30; __mptr___0 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_56769: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_56768; } else { } intel_init_pch_refclk(dev); drm_helper_move_panel_connectors_to_head(dev); return; } } static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb ) { struct drm_device *dev ; struct intel_framebuffer *intel_fb ; struct drm_framebuffer const *__mptr ; int __ret_warn_on ; unsigned long tmp ; long tmp___0 ; { dev = fb->dev; __mptr = (struct drm_framebuffer const *)fb; intel_fb = (struct intel_framebuffer *)__mptr; drm_framebuffer_cleanup(fb); mutex_lock_nested(& dev->struct_mutex, 0U); tmp = (intel_fb->obj)->framebuffer_references; (intel_fb->obj)->framebuffer_references = (intel_fb->obj)->framebuffer_references - 1UL; __ret_warn_on = tmp == 0UL; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 14086, "WARN_ON(!intel_fb->obj->framebuffer_references--)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); drm_gem_object_unreference___12(& (intel_fb->obj)->base); mutex_unlock(& dev->struct_mutex); kfree((void const *)intel_fb); return; } } static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb , struct drm_file *file , unsigned int *handle ) { struct intel_framebuffer *intel_fb ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *obj ; int tmp ; { __mptr = (struct drm_framebuffer const *)fb; intel_fb = (struct intel_framebuffer *)__mptr; obj = intel_fb->obj; tmp = drm_gem_handle_create(file, & obj->base, handle); return (tmp); } } static struct drm_framebuffer_funcs const intel_fb_funcs = {& intel_user_framebuffer_destroy, & intel_user_framebuffer_create_handle, 0}; static u32 intel_fb_pitch_limit(struct drm_device *dev , uint64_t fb_modifier , uint32_t pixel_format ) { u32 gen ; struct drm_i915_private *__p ; int _min1 ; int tmp ; int _min2 ; struct drm_i915_private *__p___0 ; { __p = to_i915((struct drm_device const *)dev); gen = (u32 )__p->info.gen; if (gen > 8U) { tmp = drm_format_plane_cpp(pixel_format, 0); _min1 = tmp * 8192; _min2 = 32768; return ((u32 )(_min1 < _min2 ? _min1 : _min2)); } else if (gen > 4U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { return (32768U); } else { goto _L; } } else _L: /* CIL Label */ if (gen > 3U) { if (fb_modifier == 72057594037927937ULL) { return (16384U); } else { return (32768U); } } else if (gen > 2U) { if (fb_modifier == 72057594037927937ULL) { return (8192U); } else { return (16384U); } } else { return (8192U); } } } static int intel_framebuffer_init(struct drm_device *dev , struct intel_framebuffer *intel_fb , struct drm_mode_fb_cmd2 *mode_cmd , struct drm_i915_gem_object *obj ) { unsigned int aligned_height ; int ret ; u32 pitch_limit ; u32 stride_alignment ; int __ret_warn_on ; int tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; struct drm_i915_private *__p ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; char const *tmp___8 ; long tmp___9 ; struct drm_i915_private *__p___0 ; char const *tmp___10 ; long tmp___11 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; char const *tmp___12 ; long tmp___13 ; struct drm_i915_private *__p___3 ; char const *tmp___14 ; long tmp___15 ; struct drm_i915_private *__p___4 ; char const *tmp___16 ; long tmp___17 ; struct drm_i915_private *__p___5 ; char const *tmp___18 ; long tmp___19 ; { tmp = mutex_is_locked(& dev->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 14145, "WARN_ON(!mutex_is_locked(&dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((mode_cmd->flags & 2U) != 0U) { if (((unsigned int )*((unsigned char *)obj + 409UL) == 64U) ^ (mode_cmd->modifier[0] == 72057594037927937ULL)) { tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_framebuffer_init", "tiling_mode doesn\'t match fb modifier\n"); } else { } return (-22); } else { } } else if ((unsigned int )*((unsigned char *)obj + 409UL) == 64U) { mode_cmd->modifier[0] = 72057594037927937ULL; } else if ((unsigned int )*((unsigned char *)obj + 409UL) == 128U) { tmp___2 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_framebuffer_init", "No Y tiling for legacy addfb\n"); } else { } return (-22); } else { } switch (mode_cmd->modifier[0]) { case 2ULL: ; case 3ULL: __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 8U) { tmp___3 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_framebuffer_init", "Unsupported tiling 0x%llx!\n", mode_cmd->modifier[0]); } else { } return (-22); } else { } case 0ULL: ; case 1ULL: ; goto ldv_56834; default: tmp___4 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_framebuffer_init", "Unsupported fb modifier 0x%llx!\n", mode_cmd->modifier[0]); } else { } return (-22); } ldv_56834: stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0], mode_cmd->pixel_format); if ((mode_cmd->pitches[0] & (stride_alignment - 1U)) != 0U) { tmp___5 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_framebuffer_init", "pitch (%d) must be at least %u byte aligned\n", mode_cmd->pitches[0], stride_alignment); } else { } return (-22); } else { } pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], mode_cmd->pixel_format); if (mode_cmd->pitches[0] > pitch_limit) { tmp___6 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_framebuffer_init", "%s pitch (%u) must be at less than %d\n", mode_cmd->modifier[0] != 0ULL ? (char *)"tiled" : (char *)"linear", mode_cmd->pitches[0], pitch_limit); } else { } return (-22); } else { } if (mode_cmd->modifier[0] == 72057594037927937ULL && mode_cmd->pitches[0] != obj->stride) { tmp___7 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_framebuffer_init", "pitch (%d) must match tiling stride (%d)\n", mode_cmd->pitches[0], obj->stride); } else { } return (-22); } else { } switch (mode_cmd->pixel_format) { case 538982467U: ; case 909199186U: ; case 875713112U: ; case 875713089U: ; goto ldv_56840; case 892424792U: __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 3U) { tmp___9 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___9 != 0L) { tmp___8 = drm_get_format_name(mode_cmd->pixel_format); drm_ut_debug_printk("intel_framebuffer_init", "unsupported pixel format: %s\n", tmp___8); } else { } return (-22); } else { } goto ldv_56840; case 875708993U: __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 8U) { tmp___11 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___11 != 0L) { tmp___10 = drm_get_format_name(mode_cmd->pixel_format); drm_ut_debug_printk("intel_framebuffer_init", "unsupported pixel format: %s\n", tmp___10); } else { } return (-22); } else { } } else { } goto ldv_56840; case 875709016U: ; case 808669784U: ; case 808665688U: __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) <= 3U) { tmp___13 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___13 != 0L) { tmp___12 = drm_get_format_name(mode_cmd->pixel_format); drm_ut_debug_printk("intel_framebuffer_init", "unsupported pixel format: %s\n", tmp___12); } else { } return (-22); } else { } goto ldv_56840; case 808665665U: __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { tmp___15 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___15 != 0L) { tmp___14 = drm_get_format_name(mode_cmd->pixel_format); drm_ut_debug_printk("intel_framebuffer_init", "unsupported pixel format: %s\n", tmp___14); } else { } return (-22); } else { } goto ldv_56840; case 1448695129U: ; case 1498831189U: ; case 1431918169U: ; case 1498765654U: __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) <= 4U) { tmp___17 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___17 != 0L) { tmp___16 = drm_get_format_name(mode_cmd->pixel_format); drm_ut_debug_printk("intel_framebuffer_init", "unsupported pixel format: %s\n", tmp___16); } else { } return (-22); } else { } goto ldv_56840; default: tmp___19 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___19 != 0L) { tmp___18 = drm_get_format_name(mode_cmd->pixel_format); drm_ut_debug_printk("intel_framebuffer_init", "unsupported pixel format: %s\n", tmp___18); } else { } return (-22); } ldv_56840: ; if (mode_cmd->offsets[0] != 0U) { return (-22); } else { } aligned_height = intel_fb_align_height(dev, mode_cmd->height, mode_cmd->pixel_format, mode_cmd->modifier[0]); if (obj->base.size < (size_t )(mode_cmd->pitches[0] * aligned_height)) { return (-22); } else { } drm_helper_mode_fill_fb_struct(& intel_fb->base, mode_cmd); intel_fb->obj = obj; (intel_fb->obj)->framebuffer_references = (intel_fb->obj)->framebuffer_references + 1UL; ret = drm_framebuffer_init(dev, & intel_fb->base, & intel_fb_funcs); if (ret != 0) { drm_err("framebuffer init failed %d\n", ret); return (ret); } else { } return (0); } } static struct drm_framebuffer *intel_user_framebuffer_create(struct drm_device *dev , struct drm_file *filp , struct drm_mode_fb_cmd2 *mode_cmd ) { struct drm_i915_gem_object *obj ; struct drm_gem_object const *__mptr ; struct drm_gem_object *tmp ; void *tmp___0 ; struct drm_framebuffer *tmp___1 ; { tmp = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]); __mptr = (struct drm_gem_object const *)tmp; obj = (struct drm_i915_gem_object *)__mptr; if ((unsigned long )(& obj->base) == (unsigned long )((struct drm_gem_object *)0)) { tmp___0 = ERR_PTR(-2L); return ((struct drm_framebuffer *)tmp___0); } else { } tmp___1 = intel_framebuffer_create(dev, mode_cmd, obj); return (tmp___1); } } static struct drm_mode_config_funcs const intel_mode_funcs = {& intel_user_framebuffer_create, & intel_fbdev_output_poll_changed, & intel_atomic_check, & intel_atomic_commit, 0, 0, 0}; static void intel_init_display(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; struct drm_i915_private *__p___12 ; struct drm_i915_private *__p___13 ; struct drm_i915_private *__p___14 ; struct drm_i915_private *__p___15 ; struct drm_i915_private *__p___16 ; struct drm_i915_private *__p___17 ; struct drm_i915_private *__p___18 ; struct drm_i915_private *__p___19 ; struct drm_i915_private *__p___20 ; struct drm_i915_private *__p___21 ; struct drm_i915_private *__p___22 ; struct drm_i915_private *__p___23 ; struct drm_i915_private *__p___24 ; struct drm_i915_private *__p___25 ; struct drm_i915_private *__p___26 ; struct drm_i915_private *__p___27 ; struct drm_i915_private *__p___28 ; struct drm_i915_private *__p___29 ; struct drm_i915_private *__p___30 ; struct drm_i915_private *__p___31 ; struct drm_i915_private *__p___32 ; struct drm_i915_private *__p___33 ; struct drm_i915_private *__p___34 ; struct drm_i915_private *__p___35 ; struct drm_i915_private *__p___36 ; struct drm_i915_private *__p___37 ; struct lock_class_key __key ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type != 0U) { dev_priv->display.find_dpll = & g4x_find_best_dpll; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 44UL) != 0U) { dev_priv->display.find_dpll = & g4x_find_best_dpll; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { dev_priv->display.find_dpll = & chv_find_best_dpll; } else { goto _L; } } else { _L: /* CIL Label */ __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { dev_priv->display.find_dpll = & vlv_find_best_dpll; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { dev_priv->display.find_dpll = & pnv_find_best_dpll; } else { dev_priv->display.find_dpll = & i9xx_find_best_dpll; } } } } } __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) > 8U) { dev_priv->display.get_pipe_config = & haswell_get_pipe_config; dev_priv->display.get_initial_plane_config = & skylake_get_initial_plane_config; dev_priv->display.crtc_compute_clock = & haswell_crtc_compute_clock; dev_priv->display.crtc_enable = & haswell_crtc_enable; dev_priv->display.crtc_disable = & haswell_crtc_disable; dev_priv->display.off = & ironlake_crtc_off; dev_priv->display.update_primary_plane = & skylake_update_primary_plane; } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 46UL) != 0U) { dev_priv->display.get_pipe_config = & haswell_get_pipe_config; dev_priv->display.get_initial_plane_config = & ironlake_get_initial_plane_config; dev_priv->display.crtc_compute_clock = & haswell_crtc_compute_clock; dev_priv->display.crtc_enable = & haswell_crtc_enable; dev_priv->display.crtc_disable = & haswell_crtc_disable; dev_priv->display.off = & ironlake_crtc_off; dev_priv->display.update_primary_plane = & ironlake_update_primary_plane; } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___6->pch_type != 0U) { dev_priv->display.get_pipe_config = & ironlake_get_pipe_config; dev_priv->display.get_initial_plane_config = & ironlake_get_initial_plane_config; dev_priv->display.crtc_compute_clock = & ironlake_crtc_compute_clock; dev_priv->display.crtc_enable = & ironlake_crtc_enable; dev_priv->display.crtc_disable = & ironlake_crtc_disable; dev_priv->display.off = & ironlake_crtc_off; dev_priv->display.update_primary_plane = & ironlake_update_primary_plane; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { dev_priv->display.get_pipe_config = & i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = & i9xx_get_initial_plane_config; dev_priv->display.crtc_compute_clock = & i9xx_crtc_compute_clock; dev_priv->display.crtc_enable = & valleyview_crtc_enable; dev_priv->display.crtc_disable = & i9xx_crtc_disable; dev_priv->display.off = & i9xx_crtc_off; dev_priv->display.update_primary_plane = & i9xx_update_primary_plane; } else { dev_priv->display.get_pipe_config = & i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = & i9xx_get_initial_plane_config; dev_priv->display.crtc_compute_clock = & i9xx_crtc_compute_clock; dev_priv->display.crtc_enable = & i9xx_crtc_enable; dev_priv->display.crtc_disable = & i9xx_crtc_disable; dev_priv->display.off = & i9xx_crtc_off; dev_priv->display.update_primary_plane = & i9xx_update_primary_plane; } } } } __p___27 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___27 + 45UL) != 0U) { dev_priv->display.get_display_clock_speed = & skylake_get_display_clock_speed; } else { __p___25 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___25 + 45UL) == 0U) { __p___26 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___26->info.gen) == 8U) { dev_priv->display.get_display_clock_speed = & broadwell_get_display_clock_speed; } else { goto _L___1; } } else { _L___1: /* CIL Label */ __p___24 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___24 + 45UL) != 0U) { dev_priv->display.get_display_clock_speed = & haswell_get_display_clock_speed; } else { __p___23 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___23 + 45UL) != 0U) { dev_priv->display.get_display_clock_speed = & valleyview_get_display_clock_speed; } else { __p___22 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___22->info.gen) == 5U) { dev_priv->display.get_display_clock_speed = & ilk_get_display_clock_speed; } else { __p___16 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___16->info.device_id) == 10098U) { dev_priv->display.get_display_clock_speed = & i945_get_display_clock_speed; } else { __p___17 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___17 + 45UL) != 0U) { dev_priv->display.get_display_clock_speed = & i945_get_display_clock_speed; } else { __p___18 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___18->info.gen) == 6U) { dev_priv->display.get_display_clock_speed = & i945_get_display_clock_speed; } else { __p___19 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___19 + 45UL) != 0U) { dev_priv->display.get_display_clock_speed = & i945_get_display_clock_speed; } else { __p___20 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___20 + 44UL) != 0U) { __p___21 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___21->info.device_id) != 40977U) { dev_priv->display.get_display_clock_speed = & i945_get_display_clock_speed; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___15 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___15 + 44UL) != 0U) { dev_priv->display.get_display_clock_speed = & i915_get_display_clock_speed; } else { __p___13 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___13 + 44UL) != 0U) { dev_priv->display.get_display_clock_speed = & i9xx_misc_get_display_clock_speed; } else { __p___14 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___14->info.device_id) == 9570U) { dev_priv->display.get_display_clock_speed = & i9xx_misc_get_display_clock_speed; } else { __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___12 + 44UL) != 0U) { dev_priv->display.get_display_clock_speed = & pnv_get_display_clock_speed; } else { __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___11->info.device_id) == 9618U) { dev_priv->display.get_display_clock_speed = & i915gm_get_display_clock_speed; } else { __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___10->info.device_id) == 9586U) { dev_priv->display.get_display_clock_speed = & i865_get_display_clock_speed; } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 44UL) != 0U) { dev_priv->display.get_display_clock_speed = & i855_get_display_clock_speed; } else { dev_priv->display.get_display_clock_speed = & i830_get_display_clock_speed; } } } } } } } } } } } } } } } } } __p___36 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___36->info.gen) == 5U) { dev_priv->display.fdi_link_train = & ironlake_fdi_link_train; } else { __p___35 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___35->info.gen) == 6U) { dev_priv->display.fdi_link_train = & gen6_fdi_link_train; } else { __p___34 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___34 + 45UL) != 0U) { dev_priv->display.fdi_link_train = & ivb_manual_fdi_link_train; } else { __p___31 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___31 + 45UL) != 0U) { dev_priv->display.fdi_link_train = & hsw_fdi_link_train; } else { __p___32 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___32 + 45UL) == 0U) { __p___33 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___33->info.gen) == 8U) { dev_priv->display.fdi_link_train = & hsw_fdi_link_train; } else { goto _L___2; } } else { _L___2: /* CIL Label */ __p___30 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___30 + 45UL) != 0U) { dev_priv->display.modeset_global_resources = & valleyview_modeset_global_resources; } else { __p___28 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___28 + 45UL) == 0U) { __p___29 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___29->info.gen) == 9U) { dev_priv->display.modeset_global_resources = & broxton_modeset_global_resources; } else { } } else { } } } } } } } __p___37 = to_i915((struct drm_device const *)dev); switch ((int )__p___37->info.gen) { case 2: dev_priv->display.queue_flip = & intel_gen2_queue_flip; goto ldv_57136; case 3: dev_priv->display.queue_flip = & intel_gen3_queue_flip; goto ldv_57136; case 4: ; case 5: dev_priv->display.queue_flip = & intel_gen4_queue_flip; goto ldv_57136; case 6: dev_priv->display.queue_flip = & intel_gen6_queue_flip; goto ldv_57136; case 7: ; case 8: dev_priv->display.queue_flip = & intel_gen7_queue_flip; goto ldv_57136; case 9: ; default: dev_priv->display.queue_flip = & intel_default_queue_flip; } ldv_57136: intel_panel_init_backlight_funcs(dev); __mutex_init(& dev_priv->pps_mutex, "&dev_priv->pps_mutex", & __key); return; } } static void quirk_pipea_force(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->quirks = dev_priv->quirks | 1UL; printk("\016[drm] applying pipe a force quirk\n"); return; } } static void quirk_pipeb_force(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->quirks = dev_priv->quirks | 16UL; printk("\016[drm] applying pipe b force quirk\n"); return; } } static void quirk_ssc_force_disable(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->quirks = dev_priv->quirks | 2UL; printk("\016[drm] applying lvds SSC disable quirk\n"); return; } } static void quirk_invert_brightness(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->quirks = dev_priv->quirks | 4UL; printk("\016[drm] applying inverted panel brightness quirk\n"); return; } } static void quirk_backlight_present(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->quirks = dev_priv->quirks | 8UL; printk("\016[drm] applying backlight present quirk\n"); return; } } static int intel_dmi_reverse_brightness(struct dmi_system_id const *id ) { { printk("\016[drm] Backlight polarity reversed on %s\n", id->ident); return (1); } } static struct dmi_system_id const __constr_expr_0[2] = { {& intel_dmi_reverse_brightness, "NCR Corporation", {{4U, (unsigned char)0, {'N', 'C', 'R', ' ', 'C', 'o', 'r', 'p', 'o', 'r', 'a', 't', 'i', 'o', 'n', '\000'}}, {5U, (unsigned char)0, {'\000'}}}, 0}}; static struct intel_dmi_quirk const intel_dmi_quirks[1U] = { {& quirk_invert_brightness, (struct dmi_system_id const *)(& __constr_expr_0)}}; static struct intel_quirk intel_quirks[18U] = { {9618, 4473, 1, & quirk_pipea_force}, {10114, 6058, 8218, & quirk_pipea_force}, {13687, -1, -1, & quirk_pipea_force}, {13687, -1, -1, & quirk_pipeb_force}, {70, 6058, 14624, & quirk_ssc_force_disable}, {70, 4173, 36982, & quirk_ssc_force_disable}, {10818, 4133, 1113, & quirk_invert_brightness}, {10818, 4133, 528, & quirk_invert_brightness}, {10818, 4133, 530, & quirk_invert_brightness}, {10818, 4133, 843, & quirk_invert_brightness}, {10818, 4133, 608, & quirk_invert_brightness}, {10818, 4133, 1162, & quirk_invert_brightness}, {2566, 4133, 2577, & quirk_backlight_present}, {2582, 4133, 2577, & quirk_backlight_present}, {10146, 32902, 29296, & quirk_backlight_present}, {2566, 4473, 2696, & quirk_backlight_present}, {2566, 4156, 8685, & quirk_backlight_present}, {2566, 4136, 2613, & quirk_backlight_present}}; static void intel_init_quirks(struct drm_device *dev ) { struct pci_dev *d ; int i ; struct intel_quirk *q ; int tmp ; { d = dev->pdev; i = 0; goto ldv_57191; ldv_57190: q = (struct intel_quirk *)(& intel_quirks) + (unsigned long )i; if (((int )d->device == q->device && ((int )d->subsystem_vendor == q->subsystem_vendor || q->subsystem_vendor == -1)) && ((int )d->subsystem_device == q->subsystem_device || q->subsystem_device == -1)) { (*(q->hook))(dev); } else { } i = i + 1; ldv_57191: ; if ((unsigned int )i <= 17U) { goto ldv_57190; } else { } i = 0; goto ldv_57196; ldv_57195: tmp = dmi_check_system((struct dmi_system_id const *)intel_dmi_quirks[i].dmi_id_list); if (tmp != 0) { (*(intel_dmi_quirks[i].hook))(dev); } else { } i = i + 1; ldv_57196: ; if (i == 0) { goto ldv_57195; } else { } return; } } static void i915_disable_vga(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u8 sr1 ; u32 vga_reg ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = i915_vgacntrl_reg(dev); vga_reg = tmp; vga_get_uninterruptible(dev->pdev, 1U); outb(1, 964); sr1 = inb(965); outb((int )((unsigned int )sr1 | 32U), 965); vga_put(dev->pdev, 1U); __const_udelay(1288500UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )vga_reg, 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )vga_reg, 0); return; } } void intel_modeset_init_hw(struct drm_device *dev ) { struct drm_i915_private *__p ; { intel_prepare_ddi(dev); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { vlv_update_cdclk(dev); } else { } intel_init_clock_gating(dev); intel_enable_gt_powersave(dev); return; } } void intel_modeset_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int sprite ; int ret ; enum pipe pipe ; struct intel_crtc *crtc ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; long tmp ; struct drm_i915_private *__p___8 ; long tmp___0 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; drm_mode_config_init(dev); dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; dev->mode_config.preferred_depth = 24U; dev->mode_config.prefer_shadow = 1U; dev->mode_config.allow_fb_modifiers = 1; dev->mode_config.funcs = & intel_mode_funcs; intel_init_quirks(dev); intel_init_pm(dev); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 38UL) == 0U) { return; } else { } intel_init_display(dev); intel_init_audio(dev); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 2U) { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 3U) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; } else { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; } } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___4->info.device_id) == 9570U) { __p___2 = to_i915((struct drm_device const *)dev); dev->mode_config.cursor_width = (unsigned int )((unsigned short )__p___2->info.device_id) == 9570U ? 64U : 512U; dev->mode_config.cursor_height = 1023U; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___5->info.device_id) == 9586U) { __p___2 = to_i915((struct drm_device const *)dev); dev->mode_config.cursor_width = (unsigned int )((unsigned short )__p___2->info.device_id) == 9570U ? 64U : 512U; dev->mode_config.cursor_height = 1023U; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 2U) { dev->mode_config.cursor_width = 64U; dev->mode_config.cursor_height = 64U; } else { dev->mode_config.cursor_width = 256U; dev->mode_config.cursor_height = 256U; } } } dev->mode_config.fb_base = dev_priv->gtt.mappable_base; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { __p___6 = to_i915((struct drm_device const *)dev); __p___7 = to_i915((struct drm_device const *)dev); drm_ut_debug_printk("intel_modeset_init", "%d display pipe%s available.\n", (int )__p___7->info.num_pipes, (int )__p___6->info.num_pipes > 1 ? (char *)"s" : (char *)""); } else { } pipe = 0; goto ldv_57298; ldv_57297: intel_crtc_init(dev, (int )pipe); sprite = 0; goto ldv_57295; ldv_57294: ret = intel_plane_init(dev, pipe, sprite); if (ret != 0) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { __p___8 = to_i915((struct drm_device const *)dev); drm_ut_debug_printk("intel_modeset_init", "pipe %c sprite %c init failed: %d\n", (int )pipe + 65, ((int )__p___8->info.num_sprites[(int )pipe] * (int )pipe + sprite) + 65, ret); } else { } } else { } sprite = sprite + 1; ldv_57295: __p___9 = dev_priv; if ((int )__p___9->info.num_sprites[(int )pipe] > sprite) { goto ldv_57294; } else { } pipe = (enum pipe )((int )pipe + 1); ldv_57298: __p___10 = dev_priv; if ((int )__p___10->info.num_pipes > (int )pipe) { goto ldv_57297; } else { } intel_init_dpio(dev); intel_shared_dpll_init(dev); i915_disable_vga(dev); intel_setup_outputs(dev); intel_fbc_disable(dev); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, 0); drm_modeset_unlock_all(dev); __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_57306; ldv_57305: ; if (! crtc->active) { goto ldv_57304; } else { } if ((unsigned long )dev_priv->display.get_initial_plane_config != (unsigned long )((void (*)(struct intel_crtc * , struct intel_initial_plane_config * ))0)) { (*(dev_priv->display.get_initial_plane_config))(crtc, & crtc->plane_config); intel_find_initial_plane_obj(crtc, & crtc->plane_config); } else { } ldv_57304: __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_57306: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_57305; } else { } return; } } static void intel_enable_pipe_a(struct drm_device *dev ) { struct intel_connector *connector ; struct drm_connector *crt ; struct intel_load_detect_pipe load_detect_temp ; struct drm_modeset_acquire_ctx *ctx ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; bool tmp ; { crt = (struct drm_connector *)0; ctx = dev->mode_config.acquire_ctx; __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_57321; ldv_57320: ; if ((unsigned int )(connector->encoder)->type == 1U) { crt = & connector->base; goto ldv_57319; } else { } __mptr___0 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_57321: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_57320; } else { } ldv_57319: ; if ((unsigned long )crt == (unsigned long )((struct drm_connector *)0)) { return; } else { } tmp = intel_get_load_detect_pipe(crt, (struct drm_display_mode *)0, & load_detect_temp, ctx); if ((int )tmp) { intel_release_load_detect_pipe(crt, & load_detect_temp, ctx); } else { } return; } } static bool intel_check_plane_mapping(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 reg ; u32 val ; struct drm_i915_private *__p ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 38UL) == 1U) { return (1); } else { } reg = ((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )crtc->plane == 0U] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((int )val < 0 && ((val & 50331648U) != 0U) == (int )crtc->pipe) { return (0); } else { } return (1); } } static void intel_sanitize_crtc(struct intel_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 reg ; uint32_t tmp ; struct intel_connector *connector ; bool plane ; long tmp___0 ; struct drm_plane_state const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; int __ret_warn_on ; long tmp___1 ; struct drm_i915_private *__p ; bool tmp___2 ; int tmp___3 ; struct intel_encoder *encoder ; long tmp___4 ; int __ret_warn_on___0 ; long tmp___5 ; struct list_head const *__mptr___4 ; int __ret_warn_on___1 ; long tmp___6 ; struct list_head const *__mptr___5 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; reg = ((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )(crtc->config)->cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, tmp & 3892314111U, 1); drm_crtc_vblank_reset(& crtc->base); if ((int )crtc->active) { update_scanline_offset(crtc); drm_crtc_vblank_on(& crtc->base); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 3U) { tmp___2 = intel_check_plane_mapping(crtc); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_sanitize_crtc", "[CRTC:%d] wrong plane connection detected!\n", crtc->base.base.id); } else { } plane = (unsigned int )crtc->plane != 0U; __mptr = (struct drm_plane_state const *)(crtc->base.primary)->state; ((struct intel_plane_state *)__mptr)->visible = 1; crtc->plane = (enum plane )(! plane); intel_crtc_disable_planes(& crtc->base); (*(dev_priv->display.crtc_disable))(& crtc->base); crtc->plane = (enum plane )plane; __mptr___0 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; goto ldv_57358; ldv_57357: ; if ((unsigned long )(connector->encoder)->base.crtc != (unsigned long )(& crtc->base)) { goto ldv_57356; } else { } connector->base.dpms = 3; connector->base.encoder = (struct drm_encoder *)0; ldv_57356: __mptr___1 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_57358: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_57357; } else { } __mptr___2 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr___2 + 0xffffffffffffffe8UL; goto ldv_57365; ldv_57364: ; if ((unsigned long )(connector->encoder)->base.crtc == (unsigned long )(& crtc->base)) { (connector->encoder)->base.crtc = (struct drm_crtc *)0; (connector->encoder)->connectors_active = 0; } else { } __mptr___3 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___3 + 0xffffffffffffffe8UL; ldv_57365: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_57364; } else { } __ret_warn_on = (int )crtc->active; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 14870, "WARN_ON(crtc->active)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (crtc->base.state)->enable = 0; (crtc->base.state)->active = 0; crtc->base.enabled = 0; } else { } } else { } if (((int )dev_priv->quirks & 1 && (int )crtc->pipe == 0) && ! crtc->active) { intel_enable_pipe_a(dev); } else { } intel_crtc_update_dpms(& crtc->base); if ((int )crtc->active != (int )(crtc->base.state)->enable) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_sanitize_crtc", "[CRTC:%d] hw state adjusted, was %s, now %s\n", crtc->base.base.id, (int )(crtc->base.state)->enable ? (char *)"enabled" : (char *)"disabled", (int )crtc->active ? (char *)"enabled" : (char *)"disabled"); } else { } (crtc->base.state)->enable = crtc->active; (crtc->base.state)->active = crtc->active; crtc->base.enabled = crtc->active; __ret_warn_on___0 = (int )crtc->active; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 14909, "WARN_ON(crtc->active)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); __mptr___4 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___4 + 0xfffffffffffffff8UL; goto ldv_57379; ldv_57378: ; if ((unsigned long )encoder->base.crtc == (unsigned long )(& crtc->base)) { __ret_warn_on___1 = (int )encoder->connectors_active; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_display.c", 14912, "WARN_ON(encoder->connectors_active)"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); encoder->base.crtc = (struct drm_crtc *)0; } else { } __mptr___5 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___5 + 0xfffffffffffffff8UL; ldv_57379: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_57378; } else { } } else { } if ((int )crtc->active) { crtc->cpu_fifo_underrun_disabled = 1; crtc->pch_fifo_underrun_disabled = 1; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 4U) { crtc->cpu_fifo_underrun_disabled = 1; crtc->pch_fifo_underrun_disabled = 1; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { crtc->cpu_fifo_underrun_disabled = 1; crtc->pch_fifo_underrun_disabled = 1; } else { } } } return; } } static void intel_sanitize_encoder(struct intel_encoder *encoder ) { struct intel_connector *connector ; struct drm_device *dev ; bool has_active_crtc ; struct drm_crtc const *__mptr ; int tmp ; long tmp___0 ; long tmp___1 ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { dev = encoder->base.dev; if ((unsigned long )encoder->base.crtc != (unsigned long )((struct drm_crtc *)0)) { __mptr = (struct drm_crtc const *)encoder->base.crtc; if ((int )((struct intel_crtc *)__mptr)->active) { tmp = 1; } else { tmp = 0; } } else { tmp = 0; } has_active_crtc = (bool )tmp; if ((int )encoder->connectors_active && ! has_active_crtc) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_sanitize_encoder", "[ENCODER:%d:%s] has active connectors but no active pipe!\n", encoder->base.base.id, encoder->base.name); } else { } if ((unsigned long )encoder->base.crtc != (unsigned long )((struct drm_crtc *)0)) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_sanitize_encoder", "[ENCODER:%d:%s] manually disabled\n", encoder->base.base.id, encoder->base.name); } else { } (*(encoder->disable))(encoder); if ((unsigned long )encoder->post_disable != (unsigned long )((void (*)(struct intel_encoder * ))0)) { (*(encoder->post_disable))(encoder); } else { } } else { } encoder->base.crtc = (struct drm_crtc *)0; encoder->connectors_active = 0; __mptr___0 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; goto ldv_57408; ldv_57407: ; if ((unsigned long )connector->encoder != (unsigned long )encoder) { goto ldv_57406; } else { } connector->base.dpms = 3; connector->base.encoder = (struct drm_encoder *)0; ldv_57406: __mptr___1 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_57408: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_57407; } else { } } else { } return; } } void i915_redisable_vga_power_on(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 vga_reg ; uint32_t tmp ; long tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = i915_vgacntrl_reg(dev); vga_reg = tmp; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )vga_reg, 1); if ((int )tmp___1 >= 0) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i915_redisable_vga_power_on", "Something enabled VGA plane, disabling it\n"); } else { } i915_disable_vga(dev); } else { } return; } } void i915_redisable_vga(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; bool tmp ; int tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_display_power_is_enabled(dev_priv, 21); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } i915_redisable_vga_power_on(dev); return; } } static bool primary_get_hw_state(struct intel_crtc *crtc ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)(crtc->base.dev)->dev_private; if (! crtc->active) { return (0); } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )crtc->plane] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U), 1); return ((tmp & 2147483648U) != 0U); } } static void intel_modeset_readout_hw_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; enum pipe pipe ; struct intel_crtc *crtc ; struct intel_encoder *encoder ; struct intel_connector *connector ; int i ; struct list_head const *__mptr ; struct drm_plane *primary ; struct intel_plane_state *plane_state ; struct drm_plane_state const *__mptr___0 ; long tmp ; struct list_head const *__mptr___1 ; struct intel_shared_dpll *pll ; struct list_head const *__mptr___2 ; struct intel_shared_dpll *tmp___0 ; struct list_head const *__mptr___3 ; long tmp___1 ; struct list_head const *__mptr___4 ; struct drm_crtc const *__mptr___5 ; bool tmp___2 ; long tmp___3 ; struct list_head const *__mptr___6 ; struct list_head const *__mptr___7 ; bool tmp___4 ; long tmp___5 ; struct list_head const *__mptr___8 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_57443; ldv_57442: primary = crtc->base.primary; memset((void *)crtc->config, 0, 752UL); (crtc->config)->quirks = (crtc->config)->quirks | 2UL; crtc->active = (*(dev_priv->display.get_pipe_config))(crtc, crtc->config); (crtc->base.state)->enable = crtc->active; (crtc->base.state)->active = crtc->active; crtc->base.enabled = crtc->active; __mptr___0 = (struct drm_plane_state const *)primary->state; plane_state = (struct intel_plane_state *)__mptr___0; plane_state->visible = primary_get_hw_state(crtc); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_modeset_readout_hw_state", "[CRTC:%d] hw state readout: %s\n", crtc->base.base.id, (int )crtc->active ? (char *)"enabled" : (char *)"disabled"); } else { } __mptr___1 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___1 + 0xfffffffffffffff0UL; ldv_57443: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_57442; } else { } i = 0; goto ldv_57454; ldv_57453: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; pll->on = (*(pll->get_hw_state))(dev_priv, pll, & pll->config.hw_state); pll->active = 0; pll->config.crtc_mask = 0U; __mptr___2 = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr___2 + 0xfffffffffffffff0UL; goto ldv_57451; ldv_57450: ; if ((int )crtc->active) { tmp___0 = intel_crtc_to_shared_dpll(crtc); if ((unsigned long )tmp___0 == (unsigned long )pll) { pll->active = pll->active + 1; pll->config.crtc_mask = pll->config.crtc_mask | (unsigned int )(1 << (int )crtc->pipe); } else { } } else { } __mptr___3 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___3 + 0xfffffffffffffff0UL; ldv_57451: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_57450; } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_modeset_readout_hw_state", "%s hw state readout: crtc_mask 0x%08x, on %i\n", pll->name, pll->config.crtc_mask, (int )pll->on); } else { } if (pll->config.crtc_mask != 0U) { intel_display_power_get(dev_priv, 23); } else { } i = i + 1; ldv_57454: ; if (dev_priv->num_shared_dpll > i) { goto ldv_57453; } else { } __mptr___4 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___4 + 0xfffffffffffffff8UL; goto ldv_57463; ldv_57462: pipe = 0; tmp___2 = (*(encoder->get_hw_state))(encoder, & pipe); if ((int )tmp___2) { __mptr___5 = (struct drm_crtc const *)dev_priv->pipe_to_crtc_mapping[(int )pipe]; crtc = (struct intel_crtc *)__mptr___5; encoder->base.crtc = & crtc->base; (*(encoder->get_config))(encoder, crtc->config); } else { encoder->base.crtc = (struct drm_crtc *)0; } encoder->connectors_active = 0; tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_modeset_readout_hw_state", "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", encoder->base.base.id, encoder->base.name, (unsigned long )encoder->base.crtc != (unsigned long )((struct drm_crtc *)0) ? (char *)"enabled" : (char *)"disabled", (int )pipe + 65); } else { } __mptr___6 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___6 + 0xfffffffffffffff8UL; ldv_57463: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_57462; } else { } __mptr___7 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr___7 + 0xffffffffffffffe8UL; goto ldv_57470; ldv_57469: tmp___4 = (*(connector->get_hw_state))(connector); if ((int )tmp___4) { connector->base.dpms = 0; (connector->encoder)->connectors_active = 1; connector->base.encoder = & (connector->encoder)->base; } else { connector->base.dpms = 3; connector->base.encoder = (struct drm_encoder *)0; } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_modeset_readout_hw_state", "[CONNECTOR:%d:%s] hw state readout: %s\n", connector->base.base.id, connector->base.name, (unsigned long )connector->base.encoder != (unsigned long )((struct drm_encoder *)0) ? (char *)"enabled" : (char *)"disabled"); } else { } __mptr___8 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___8 + 0xffffffffffffffe8UL; ldv_57470: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_57469; } else { } return; } } void intel_modeset_setup_hw_state(struct drm_device *dev , bool force_restore ) { struct drm_i915_private *dev_priv ; enum pipe pipe ; struct intel_crtc *crtc ; struct intel_encoder *encoder ; int i ; struct list_head const *__mptr ; long tmp ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct drm_crtc const *__mptr___3 ; struct drm_i915_private *__p ; struct intel_shared_dpll *pll ; long tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_crtc *crtc___0 ; struct drm_i915_private *__p___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; intel_modeset_readout_hw_state(dev); __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_57487; ldv_57486: ; if ((int )crtc->active && (int )i915.fastboot) { intel_mode_from_pipe_config(& crtc->base.mode, crtc->config); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_modeset_setup_hw_state", "[CRTC:%d] found active mode: ", crtc->base.base.id); } else { } drm_mode_debug_printmodeline((struct drm_display_mode const *)(& crtc->base.mode)); } else { } __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_57487: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_57486; } else { } __mptr___1 = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; goto ldv_57494; ldv_57493: intel_sanitize_encoder(encoder); __mptr___2 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___2 + 0xfffffffffffffff8UL; ldv_57494: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_57493; } else { } pipe = 0; goto ldv_57505; ldv_57504: __mptr___3 = (struct drm_crtc const *)dev_priv->pipe_to_crtc_mapping[(int )pipe]; crtc = (struct intel_crtc *)__mptr___3; intel_sanitize_crtc(crtc); intel_dump_pipe_config(crtc, crtc->config, "[setup_hw_state]"); pipe = (enum pipe )((int )pipe + 1); ldv_57505: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_57504; } else { } intel_modeset_update_connector_atomic_state(dev); i = 0; goto ldv_57510; ldv_57509: pll = (struct intel_shared_dpll *)(& dev_priv->shared_dplls) + (unsigned long )i; if (! pll->on || pll->active != 0) { goto ldv_57508; } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_modeset_setup_hw_state", "%s enabled but not in use, disabling\n", pll->name); } else { } (*(pll->disable))(dev_priv, pll); pll->on = 0; ldv_57508: i = i + 1; ldv_57510: ; if (dev_priv->num_shared_dpll > i) { goto ldv_57509; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 9U) { skl_wm_get_hw_state(dev); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 0U) { ilk_wm_get_hw_state(dev); } else { } } if ((int )force_restore) { i915_redisable_vga(dev); pipe = 0; goto ldv_57532; ldv_57531: crtc___0 = dev_priv->pipe_to_crtc_mapping[(int )pipe]; intel_crtc_restore_mode(crtc___0); pipe = (enum pipe )((int )pipe + 1); ldv_57532: __p___2 = dev_priv; if ((int )__p___2->info.num_pipes > (int )pipe) { goto ldv_57531; } else { } } else { intel_modeset_update_staged_output_state(dev); } intel_modeset_check_state(dev); return; } } void intel_modeset_gem_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_crtc *c ; struct drm_i915_gem_object *obj ; int ret ; uint32_t tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct list_head const *__mptr ; struct drm_framebuffer const *__mptr___0 ; struct drm_crtc const *__mptr___1 ; struct list_head const *__mptr___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev->struct_mutex, 0U); intel_init_gt_powersave(dev); mutex_unlock(& dev->struct_mutex); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811520L, 1); dev_priv->vbt.lvds_use_ssc = (tmp & 2U) != 0U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811520L, 1); dev_priv->vbt.lvds_use_ssc = (tmp & 2U) != 0U; } else { } } intel_modeset_init_hw(dev); intel_setup_overlay(dev); __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; c = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_57563; ldv_57562: ; if ((unsigned long )(c->primary)->fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___0 = (struct drm_framebuffer const *)(c->primary)->fb; obj = ((struct intel_framebuffer *)__mptr___0)->obj; } else { obj = (struct drm_i915_gem_object *)0; } if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { goto ldv_57559; } else { } mutex_lock_nested(& dev->struct_mutex, 0U); ret = intel_pin_and_fence_fb_obj(c->primary, (c->primary)->fb, (struct drm_plane_state const *)(c->primary)->state, (struct intel_engine_cs *)0); mutex_unlock(& dev->struct_mutex); if (ret != 0) { __mptr___1 = (struct drm_crtc const *)c; drm_err("failed to pin boot fb on pipe %d\n", (int )((struct intel_crtc *)__mptr___1)->pipe); drm_framebuffer_unreference((c->primary)->fb); (c->primary)->fb = (struct drm_framebuffer *)0; update_state_fb(c->primary); } else { } ldv_57559: __mptr___2 = (struct list_head const *)c->head.next; c = (struct drm_crtc *)__mptr___2 + 0xfffffffffffffff0UL; ldv_57563: ; if ((unsigned long )(& c->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_57562; } else { } intel_backlight_register(dev); return; } } void intel_connector_unregister(struct intel_connector *intel_connector ) { struct drm_connector *connector ; { connector = & intel_connector->base; intel_panel_destroy_backlight(connector); drm_connector_unregister(connector); return; } } void intel_modeset_cleanup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_connector *connector ; struct list_head const *__mptr ; struct intel_connector *intel_connector ; struct drm_connector const *__mptr___0 ; struct list_head const *__mptr___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; intel_disable_gt_powersave(dev); intel_backlight_unregister(dev); intel_irq_uninstall(dev_priv); drm_kms_helper_poll_fini(dev); mutex_lock_nested(& dev->struct_mutex, 0U); intel_unregister_dsm_handler(); intel_fbc_disable(dev); mutex_unlock(& dev->struct_mutex); flush_scheduled_work(); __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct drm_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_57582; ldv_57581: __mptr___0 = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr___0; (*(intel_connector->unregister))(intel_connector); __mptr___1 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_57582: ; if ((unsigned long )(& connector->head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_57581; } else { } drm_mode_config_cleanup(dev); intel_cleanup_overlay(dev); mutex_lock_nested(& dev->struct_mutex, 0U); intel_cleanup_gt_powersave(dev); mutex_unlock(& dev->struct_mutex); return; } } struct drm_encoder *intel_best_encoder(struct drm_connector *connector ) { struct intel_encoder *tmp ; { tmp = intel_attached_encoder(connector); return (& tmp->base); } } void intel_connector_attach_encoder(struct intel_connector *connector , struct intel_encoder *encoder ) { { connector->encoder = encoder; drm_mode_connector_attach_encoder(& connector->base, & encoder->base); return; } } int intel_modeset_vga_set_state(struct drm_device *dev , bool state ) { struct drm_i915_private *dev_priv ; unsigned int reg ; struct drm_i915_private *__p ; u16 gmch_ctrl ; int tmp ; int tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); reg = (unsigned int )((unsigned char )__p->info.gen) > 5U ? 80U : 82U; tmp = pci_read_config_word((struct pci_dev const *)dev_priv->bridge_dev, (int )reg, & gmch_ctrl); if (tmp != 0) { drm_err("failed to read control word\n"); return (-5); } else { } if ((((int )gmch_ctrl & 2) != 0) == ! state) { return (0); } else { } if ((int )state) { gmch_ctrl = (unsigned int )gmch_ctrl & 65533U; } else { gmch_ctrl = (u16 )((unsigned int )gmch_ctrl | 2U); } tmp___0 = pci_write_config_word((struct pci_dev const *)dev_priv->bridge_dev, (int )reg, (int )gmch_ctrl); if (tmp___0 != 0) { drm_err("failed to write control word\n"); return (-5); } else { } return (0); } } struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_display_error_state *error ; int transcoders[4U] ; int i ; struct drm_i915_private *__p ; void *tmp ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; enum transcoder cpu_transcoder ; { dev_priv = (struct drm_i915_private *)dev->dev_private; transcoders[0] = 0; transcoders[1] = 1; transcoders[2] = 2; transcoders[3] = 3; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 38UL) == 0U) { return ((struct intel_display_error_state *)0); } else { } tmp = kzalloc(320UL, 32U); error = (struct intel_display_error_state *)tmp; if ((unsigned long )error == (unsigned long )((struct intel_display_error_state *)0)) { return ((struct intel_display_error_state *)0); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { error->power_well_driver = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { error->power_well_driver = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 283652L, 1); } else { } } else { } } i = 0; goto ldv_57712; ldv_57711: error->pipe[i].power_domain_on = __intel_display_power_is_enabled(dev_priv, (enum intel_display_power_domain )i); if (! error->pipe[i].power_domain_on) { goto ldv_57674; } else { } error->cursor[i].control = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[i] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458880U), 1); error->cursor[i].position = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[i] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458888U), 1); error->cursor[i].base = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.cursor_offsets[i] - dev_priv->info.cursor_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458884U), 1); error->plane[i].control = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459136U), 1); error->plane[i].stride = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459144U), 1); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) <= 3U) { error->plane[i].size = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459152U), 1); error->plane[i].pos = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459148U), 1); } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) <= 7U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { error->plane[i].addr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459140U), 1); } else { } } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 3U) { error->plane[i].surface = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459164U), 1); error->plane[i].tile_offset = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 459172U), 1); } else { } error->pipe[i].source = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[i] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393244U), 1); __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) <= 4U) { error->pipe[i].stat = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 1); } else { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { error->pipe[i].stat = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[i] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U), 1); } else { } } ldv_57674: i = i + 1; ldv_57712: __p___9 = dev_priv; if ((int )__p___9->info.num_pipes > i) { goto ldv_57711; } else { } __p___10 = to_i915((struct drm_device const *)dev); error->num_transcoders = (int )__p___10->info.num_pipes; __p___11 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___11 + 46UL) != 0U) { error->num_transcoders = error->num_transcoders + 1; } else { } i = 0; goto ldv_57729; ldv_57728: cpu_transcoder = (enum transcoder )transcoders[i]; error->transcoder[i].power_domain_on = __intel_display_power_is_enabled(dev_priv, (unsigned int )cpu_transcoder != 3U ? (enum intel_display_power_domain )((unsigned int )cpu_transcoder + 6U) : 9); if (! error->transcoder[i].power_domain_on) { goto ldv_57727; } else { } error->transcoder[i].cpu_transcoder = cpu_transcoder; error->transcoder[i].conf = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); error->transcoder[i].htotal = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393216U), 1); error->transcoder[i].hblank = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393220U), 1); error->transcoder[i].hsync = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393224U), 1); error->transcoder[i].vtotal = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393228U), 1); error->transcoder[i].vblank = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393232U), 1); error->transcoder[i].vsync = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393236U), 1); ldv_57727: i = i + 1; ldv_57729: ; if (error->num_transcoders > i) { goto ldv_57728; } else { } return (error); } } void intel_display_print_error_state(struct drm_i915_error_state_buf *m , struct drm_device *dev , struct intel_display_error_state *error ) { struct drm_i915_private *dev_priv ; int i ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )error == (unsigned long )((struct intel_display_error_state *)0)) { return; } else { } __p = to_i915((struct drm_device const *)dev); i915_error_printf(m, "Num Pipes: %d\n", (int )__p->info.num_pipes); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { i915_error_printf(m, "PWR_WELL_CTL2: %08x\n", error->power_well_driver); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { i915_error_printf(m, "PWR_WELL_CTL2: %08x\n", error->power_well_driver); } else { } } else { } } i = 0; goto ldv_57793; ldv_57792: i915_error_printf(m, "Pipe [%d]:\n", i); i915_error_printf(m, " Power: %s\n", (int )error->pipe[i].power_domain_on ? (char *)"on" : (char *)"off"); i915_error_printf(m, " SRC: %08x\n", error->pipe[i].source); i915_error_printf(m, " STAT: %08x\n", error->pipe[i].stat); i915_error_printf(m, "Plane [%d]:\n", i); i915_error_printf(m, " CNTR: %08x\n", error->plane[i].control); i915_error_printf(m, " STRIDE: %08x\n", error->plane[i].stride); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) <= 3U) { i915_error_printf(m, " SIZE: %08x\n", error->plane[i].size); i915_error_printf(m, " POS: %08x\n", error->plane[i].pos); } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) <= 7U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { i915_error_printf(m, " ADDR: %08x\n", error->plane[i].addr); } else { } } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 3U) { i915_error_printf(m, " SURF: %08x\n", error->plane[i].surface); i915_error_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); } else { } i915_error_printf(m, "Cursor [%d]:\n", i); i915_error_printf(m, " CNTR: %08x\n", error->cursor[i].control); i915_error_printf(m, " POS: %08x\n", error->cursor[i].position); i915_error_printf(m, " BASE: %08x\n", error->cursor[i].base); i = i + 1; ldv_57793: __p___7 = dev_priv; if ((int )__p___7->info.num_pipes > i) { goto ldv_57792; } else { } i = 0; goto ldv_57796; ldv_57795: i915_error_printf(m, "CPU transcoder: %c\n", (unsigned int )error->transcoder[i].cpu_transcoder + 65U); i915_error_printf(m, " Power: %s\n", (int )error->transcoder[i].power_domain_on ? (char *)"on" : (char *)"off"); i915_error_printf(m, " CONF: %08x\n", error->transcoder[i].conf); i915_error_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); i915_error_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); i915_error_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); i915_error_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); i915_error_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); i915_error_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); i = i + 1; ldv_57796: ; if (error->num_transcoders > i) { goto ldv_57795; } else { } return; } } void intel_modeset_preclose(struct drm_device *dev , struct drm_file *file ) { struct intel_crtc *crtc ; struct list_head const *__mptr ; struct intel_unpin_work *work ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_57809; ldv_57808: spin_lock_irq(& dev->event_lock); work = crtc->unpin_work; if (((unsigned long )work != (unsigned long )((struct intel_unpin_work *)0) && (unsigned long )work->event != (unsigned long )((struct drm_pending_vblank_event *)0)) && (unsigned long )(work->event)->base.file_priv == (unsigned long )file) { kfree((void const *)work->event); work->event = (struct drm_pending_vblank_event *)0; } else { } spin_unlock_irq(& dev->event_lock); __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_57809: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_57808; } else { } return; } } extern int ldv_probe_69(void) ; extern int ldv_probe_71(void) ; extern int ldv_probe_70(void) ; void ldv_initialize_drm_crtc_funcs_71(void) { void *tmp ; { tmp = ldv_init_zalloc(1160UL); intel_crtc_funcs_group0 = (struct drm_crtc *)tmp; return; } } void call_and_disable_all_14(int state ) { { if (ldv_work_14_0 == state) { call_and_disable_work_14(ldv_work_struct_14_0); } else { } if (ldv_work_14_1 == state) { call_and_disable_work_14(ldv_work_struct_14_1); } else { } if (ldv_work_14_2 == state) { call_and_disable_work_14(ldv_work_struct_14_2); } else { } if (ldv_work_14_3 == state) { call_and_disable_work_14(ldv_work_struct_14_3); } else { } return; } } void activate_work_13(struct work_struct *work , int state ) { { if (ldv_work_13_0 == 0) { ldv_work_struct_13_0 = work; ldv_work_13_0 = state; return; } else { } if (ldv_work_13_1 == 0) { ldv_work_struct_13_1 = work; ldv_work_13_1 = state; return; } else { } if (ldv_work_13_2 == 0) { ldv_work_struct_13_2 = work; ldv_work_13_2 = state; return; } else { } if (ldv_work_13_3 == 0) { ldv_work_struct_13_3 = work; ldv_work_13_3 = state; return; } else { } return; } } void call_and_disable_work_14(struct work_struct *work ) { { if ((ldv_work_14_0 == 2 || ldv_work_14_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_14_0) { intel_unpin_work_fn(work); ldv_work_14_0 = 1; return; } else { } if ((ldv_work_14_1 == 2 || ldv_work_14_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_14_1) { intel_unpin_work_fn(work); ldv_work_14_1 = 1; return; } else { } if ((ldv_work_14_2 == 2 || ldv_work_14_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_14_2) { intel_unpin_work_fn(work); ldv_work_14_2 = 1; return; } else { } if ((ldv_work_14_3 == 2 || ldv_work_14_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_14_3) { intel_unpin_work_fn(work); ldv_work_14_3 = 1; return; } else { } return; } } void ldv_initialize_drm_framebuffer_funcs_69(void) { void *tmp ; { tmp = ldv_init_zalloc(168UL); intel_fb_funcs_group0 = (struct drm_framebuffer *)tmp; return; } } void ldv_initialize_drm_mode_config_funcs_68(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(3320UL); intel_mode_funcs_group0 = (struct drm_device *)tmp; tmp___0 = ldv_init_zalloc(80UL); intel_mode_funcs_group1 = (struct drm_atomic_state *)tmp___0; return; } } void call_and_disable_work_13(struct work_struct *work ) { { if ((ldv_work_13_0 == 2 || ldv_work_13_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_13_0) { intel_mmio_flip_work_func(work); ldv_work_13_0 = 1; return; } else { } if ((ldv_work_13_1 == 2 || ldv_work_13_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_13_1) { intel_mmio_flip_work_func(work); ldv_work_13_1 = 1; return; } else { } if ((ldv_work_13_2 == 2 || ldv_work_13_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_13_2) { intel_mmio_flip_work_func(work); ldv_work_13_2 = 1; return; } else { } if ((ldv_work_13_3 == 2 || ldv_work_13_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_13_3) { intel_mmio_flip_work_func(work); ldv_work_13_3 = 1; return; } else { } return; } } void disable_work_14(struct work_struct *work ) { { if ((ldv_work_14_0 == 3 || ldv_work_14_0 == 2) && (unsigned long )ldv_work_struct_14_0 == (unsigned long )work) { ldv_work_14_0 = 1; } else { } if ((ldv_work_14_1 == 3 || ldv_work_14_1 == 2) && (unsigned long )ldv_work_struct_14_1 == (unsigned long )work) { ldv_work_14_1 = 1; } else { } if ((ldv_work_14_2 == 3 || ldv_work_14_2 == 2) && (unsigned long )ldv_work_struct_14_2 == (unsigned long )work) { ldv_work_14_2 = 1; } else { } if ((ldv_work_14_3 == 3 || ldv_work_14_3 == 2) && (unsigned long )ldv_work_struct_14_3 == (unsigned long )work) { ldv_work_14_3 = 1; } else { } return; } } void work_init_14(void) { { ldv_work_14_0 = 0; ldv_work_14_1 = 0; ldv_work_14_2 = 0; ldv_work_14_3 = 0; return; } } void ldv_initialize_drm_plane_funcs_70(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(704UL); intel_plane_funcs_group0 = (struct drm_plane *)tmp; tmp___0 = ldv_init_zalloc(104UL); intel_plane_funcs_group2 = (struct drm_property *)tmp___0; tmp___1 = ldv_init_zalloc(80UL); intel_plane_funcs_group1 = (struct drm_plane_state *)tmp___1; return; } } void invoke_work_14(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_14_0 == 2 || ldv_work_14_0 == 3) { ldv_work_14_0 = 4; intel_unpin_work_fn(ldv_work_struct_14_0); ldv_work_14_0 = 1; } else { } goto ldv_57858; case 1: ; if (ldv_work_14_1 == 2 || ldv_work_14_1 == 3) { ldv_work_14_1 = 4; intel_unpin_work_fn(ldv_work_struct_14_0); ldv_work_14_1 = 1; } else { } goto ldv_57858; case 2: ; if (ldv_work_14_2 == 2 || ldv_work_14_2 == 3) { ldv_work_14_2 = 4; intel_unpin_work_fn(ldv_work_struct_14_0); ldv_work_14_2 = 1; } else { } goto ldv_57858; case 3: ; if (ldv_work_14_3 == 2 || ldv_work_14_3 == 3) { ldv_work_14_3 = 4; intel_unpin_work_fn(ldv_work_struct_14_0); ldv_work_14_3 = 1; } else { } goto ldv_57858; default: ldv_stop(); } ldv_57858: ; return; } } void work_init_13(void) { { ldv_work_13_0 = 0; ldv_work_13_1 = 0; ldv_work_13_2 = 0; ldv_work_13_3 = 0; return; } } void disable_work_13(struct work_struct *work ) { { if ((ldv_work_13_0 == 3 || ldv_work_13_0 == 2) && (unsigned long )ldv_work_struct_13_0 == (unsigned long )work) { ldv_work_13_0 = 1; } else { } if ((ldv_work_13_1 == 3 || ldv_work_13_1 == 2) && (unsigned long )ldv_work_struct_13_1 == (unsigned long )work) { ldv_work_13_1 = 1; } else { } if ((ldv_work_13_2 == 3 || ldv_work_13_2 == 2) && (unsigned long )ldv_work_struct_13_2 == (unsigned long )work) { ldv_work_13_2 = 1; } else { } if ((ldv_work_13_3 == 3 || ldv_work_13_3 == 2) && (unsigned long )ldv_work_struct_13_3 == (unsigned long )work) { ldv_work_13_3 = 1; } else { } return; } } void ldv_initialize_drm_crtc_helper_funcs_72(void) { void *tmp ; { tmp = ldv_init_zalloc(1160UL); intel_helper_funcs_group0 = (struct drm_crtc *)tmp; return; } } void invoke_work_13(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_13_0 == 2 || ldv_work_13_0 == 3) { ldv_work_13_0 = 4; intel_mmio_flip_work_func(ldv_work_struct_13_0); ldv_work_13_0 = 1; } else { } goto ldv_57878; case 1: ; if (ldv_work_13_1 == 2 || ldv_work_13_1 == 3) { ldv_work_13_1 = 4; intel_mmio_flip_work_func(ldv_work_struct_13_0); ldv_work_13_1 = 1; } else { } goto ldv_57878; case 2: ; if (ldv_work_13_2 == 2 || ldv_work_13_2 == 3) { ldv_work_13_2 = 4; intel_mmio_flip_work_func(ldv_work_struct_13_0); ldv_work_13_2 = 1; } else { } goto ldv_57878; case 3: ; if (ldv_work_13_3 == 2 || ldv_work_13_3 == 3) { ldv_work_13_3 = 4; intel_mmio_flip_work_func(ldv_work_struct_13_0); ldv_work_13_3 = 1; } else { } goto ldv_57878; default: ldv_stop(); } ldv_57878: ; return; } } void activate_work_14(struct work_struct *work , int state ) { { if (ldv_work_14_0 == 0) { ldv_work_struct_14_0 = work; ldv_work_14_0 = state; return; } else { } if (ldv_work_14_1 == 0) { ldv_work_struct_14_1 = work; ldv_work_14_1 = state; return; } else { } if (ldv_work_14_2 == 0) { ldv_work_struct_14_2 = work; ldv_work_14_2 = state; return; } else { } if (ldv_work_14_3 == 0) { ldv_work_struct_14_3 = work; ldv_work_14_3 = state; return; } else { } return; } } void call_and_disable_all_13(int state ) { { if (ldv_work_13_0 == state) { call_and_disable_work_13(ldv_work_struct_13_0); } else { } if (ldv_work_13_1 == state) { call_and_disable_work_13(ldv_work_struct_13_1); } else { } if (ldv_work_13_2 == state) { call_and_disable_work_13(ldv_work_struct_13_2); } else { } if (ldv_work_13_3 == state) { call_and_disable_work_13(ldv_work_struct_13_3); } else { } return; } } void ldv_main_exported_69(void) { unsigned int *ldvarg161 ; void *tmp ; struct drm_file *ldvarg162 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(4UL); ldvarg161 = (unsigned int *)tmp; tmp___0 = ldv_init_zalloc(744UL); ldvarg162 = (struct drm_file *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_69 == 2) { intel_user_framebuffer_destroy(intel_fb_funcs_group0); ldv_state_variable_69 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_57896; case 1: ; if (ldv_state_variable_69 == 1) { intel_user_framebuffer_create_handle(intel_fb_funcs_group0, ldvarg162, ldvarg161); ldv_state_variable_69 = 1; } else { } if (ldv_state_variable_69 == 2) { intel_user_framebuffer_create_handle(intel_fb_funcs_group0, ldvarg162, ldvarg161); ldv_state_variable_69 = 2; } else { } goto ldv_57896; case 2: ; if (ldv_state_variable_69 == 1) { ldv_probe_69(); ldv_state_variable_69 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_57896; default: ldv_stop(); } ldv_57896: ; return; } } void ldv_main_exported_72(void) { struct drm_framebuffer *ldvarg289 ; void *tmp ; int ldvarg287 ; int ldvarg288 ; enum mode_set_atomic ldvarg290 ; int tmp___0 ; { tmp = ldv_init_zalloc(168UL); ldvarg289 = (struct drm_framebuffer *)tmp; ldv_memset((void *)(& ldvarg287), 0, 4UL); ldv_memset((void *)(& ldvarg288), 0, 4UL); ldv_memset((void *)(& ldvarg290), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_72 == 1) { intel_crtc_load_lut(intel_helper_funcs_group0); ldv_state_variable_72 = 1; } else { } goto ldv_57908; case 1: ; if (ldv_state_variable_72 == 1) { intel_finish_crtc_commit(intel_helper_funcs_group0); ldv_state_variable_72 = 1; } else { } goto ldv_57908; case 2: ; if (ldv_state_variable_72 == 1) { intel_begin_crtc_commit(intel_helper_funcs_group0); ldv_state_variable_72 = 1; } else { } goto ldv_57908; case 3: ; if (ldv_state_variable_72 == 1) { intel_pipe_set_base_atomic(intel_helper_funcs_group0, ldvarg289, ldvarg288, ldvarg287, ldvarg290); ldv_state_variable_72 = 1; } else { } goto ldv_57908; default: ldv_stop(); } ldv_57908: ; return; } } void ldv_main_exported_71(void) { struct drm_mode_set *ldvarg9 ; void *tmp ; uint32_t ldvarg10 ; u16 *ldvarg13 ; void *tmp___0 ; uint32_t ldvarg14 ; struct drm_framebuffer *ldvarg17 ; void *tmp___1 ; uint32_t ldvarg15 ; struct drm_pending_vblank_event *ldvarg16 ; void *tmp___2 ; u16 *ldvarg12 ; void *tmp___3 ; u16 *ldvarg11 ; void *tmp___4 ; struct drm_crtc_state *ldvarg18 ; void *tmp___5 ; int tmp___6 ; { tmp = ldv_init_zalloc(48UL); ldvarg9 = (struct drm_mode_set *)tmp; tmp___0 = ldv_init_zalloc(2UL); ldvarg13 = (u16 *)tmp___0; tmp___1 = ldv_init_zalloc(168UL); ldvarg17 = (struct drm_framebuffer *)tmp___1; tmp___2 = ldv_init_zalloc(88UL); ldvarg16 = (struct drm_pending_vblank_event *)tmp___2; tmp___3 = ldv_init_zalloc(2UL); ldvarg12 = (u16 *)tmp___3; tmp___4 = ldv_init_zalloc(2UL); ldvarg11 = (u16 *)tmp___4; tmp___5 = ldv_init_zalloc(464UL); ldvarg18 = (struct drm_crtc_state *)tmp___5; ldv_memset((void *)(& ldvarg10), 0, 4UL); ldv_memset((void *)(& ldvarg14), 0, 4UL); ldv_memset((void *)(& ldvarg15), 0, 4UL); tmp___6 = __VERIFIER_nondet_int(); switch (tmp___6) { case 0: ; if (ldv_state_variable_71 == 1) { intel_crtc_destroy_state(intel_crtc_funcs_group0, ldvarg18); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { intel_crtc_destroy_state(intel_crtc_funcs_group0, ldvarg18); ldv_state_variable_71 = 2; } else { } goto ldv_57927; case 1: ; if (ldv_state_variable_71 == 1) { intel_crtc_duplicate_state(intel_crtc_funcs_group0); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { intel_crtc_duplicate_state(intel_crtc_funcs_group0); ldv_state_variable_71 = 2; } else { } goto ldv_57927; case 2: ; if (ldv_state_variable_71 == 1) { intel_crtc_page_flip(intel_crtc_funcs_group0, ldvarg17, ldvarg16, ldvarg15); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { intel_crtc_page_flip(intel_crtc_funcs_group0, ldvarg17, ldvarg16, ldvarg15); ldv_state_variable_71 = 2; } else { } goto ldv_57927; case 3: ; if (ldv_state_variable_71 == 2) { intel_crtc_destroy(intel_crtc_funcs_group0); ldv_state_variable_71 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_57927; case 4: ; if (ldv_state_variable_71 == 1) { intel_crtc_gamma_set(intel_crtc_funcs_group0, ldvarg13, ldvarg12, ldvarg11, ldvarg14, ldvarg10); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { intel_crtc_gamma_set(intel_crtc_funcs_group0, ldvarg13, ldvarg12, ldvarg11, ldvarg14, ldvarg10); ldv_state_variable_71 = 2; } else { } goto ldv_57927; case 5: ; if (ldv_state_variable_71 == 1) { intel_crtc_set_config(ldvarg9); ldv_state_variable_71 = 1; } else { } if (ldv_state_variable_71 == 2) { intel_crtc_set_config(ldvarg9); ldv_state_variable_71 = 2; } else { } goto ldv_57927; case 6: ; if (ldv_state_variable_71 == 1) { ldv_probe_71(); ldv_state_variable_71 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_57927; default: ldv_stop(); } ldv_57927: ; return; } } void ldv_main_exported_70(void) { unsigned int ldvarg402 ; uint64_t ldvarg398 ; unsigned int ldvarg399 ; int ldvarg406 ; uint32_t ldvarg403 ; int ldvarg408 ; uint64_t ldvarg409 ; uint64_t *ldvarg396 ; void *tmp ; uint32_t ldvarg405 ; uint32_t ldvarg400 ; struct drm_plane_state *ldvarg397 ; void *tmp___0 ; struct drm_crtc *ldvarg404 ; void *tmp___1 ; uint32_t ldvarg407 ; struct drm_framebuffer *ldvarg401 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg396 = (uint64_t *)tmp; tmp___0 = ldv_init_zalloc(80UL); ldvarg397 = (struct drm_plane_state *)tmp___0; tmp___1 = ldv_init_zalloc(1160UL); ldvarg404 = (struct drm_crtc *)tmp___1; tmp___2 = ldv_init_zalloc(168UL); ldvarg401 = (struct drm_framebuffer *)tmp___2; ldv_memset((void *)(& ldvarg402), 0, 4UL); ldv_memset((void *)(& ldvarg398), 0, 8UL); ldv_memset((void *)(& ldvarg399), 0, 4UL); ldv_memset((void *)(& ldvarg406), 0, 4UL); ldv_memset((void *)(& ldvarg403), 0, 4UL); ldv_memset((void *)(& ldvarg408), 0, 4UL); ldv_memset((void *)(& ldvarg409), 0, 8UL); ldv_memset((void *)(& ldvarg405), 0, 4UL); ldv_memset((void *)(& ldvarg400), 0, 4UL); ldv_memset((void *)(& ldvarg407), 0, 4UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_70 == 2) { intel_plane_destroy_state(intel_plane_funcs_group0, intel_plane_funcs_group1); ldv_state_variable_70 = 2; } else { } if (ldv_state_variable_70 == 1) { intel_plane_destroy_state(intel_plane_funcs_group0, intel_plane_funcs_group1); ldv_state_variable_70 = 1; } else { } goto ldv_57953; case 1: ; if (ldv_state_variable_70 == 2) { drm_atomic_helper_disable_plane(intel_plane_funcs_group0); ldv_state_variable_70 = 2; } else { } if (ldv_state_variable_70 == 1) { drm_atomic_helper_disable_plane(intel_plane_funcs_group0); ldv_state_variable_70 = 1; } else { } goto ldv_57953; case 2: ; if (ldv_state_variable_70 == 2) { intel_plane_duplicate_state(intel_plane_funcs_group0); ldv_state_variable_70 = 2; } else { } if (ldv_state_variable_70 == 1) { intel_plane_duplicate_state(intel_plane_funcs_group0); ldv_state_variable_70 = 1; } else { } goto ldv_57953; case 3: ; if (ldv_state_variable_70 == 2) { drm_atomic_helper_plane_set_property(intel_plane_funcs_group0, intel_plane_funcs_group2, ldvarg409); ldv_state_variable_70 = 2; } else { } if (ldv_state_variable_70 == 1) { drm_atomic_helper_plane_set_property(intel_plane_funcs_group0, intel_plane_funcs_group2, ldvarg409); ldv_state_variable_70 = 1; } else { } goto ldv_57953; case 4: ; if (ldv_state_variable_70 == 2) { drm_atomic_helper_update_plane(intel_plane_funcs_group0, ldvarg404, ldvarg401, ldvarg406, ldvarg408, ldvarg399, ldvarg402, ldvarg405, ldvarg403, ldvarg407, ldvarg400); ldv_state_variable_70 = 2; } else { } if (ldv_state_variable_70 == 1) { drm_atomic_helper_update_plane(intel_plane_funcs_group0, ldvarg404, ldvarg401, ldvarg406, ldvarg408, ldvarg399, ldvarg402, ldvarg405, ldvarg403, ldvarg407, ldvarg400); ldv_state_variable_70 = 1; } else { } goto ldv_57953; case 5: ; if (ldv_state_variable_70 == 2) { intel_plane_destroy(intel_plane_funcs_group0); ldv_state_variable_70 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_57953; case 6: ; if (ldv_state_variable_70 == 2) { intel_plane_atomic_set_property(intel_plane_funcs_group0, intel_plane_funcs_group1, intel_plane_funcs_group2, ldvarg398); ldv_state_variable_70 = 2; } else { } if (ldv_state_variable_70 == 1) { intel_plane_atomic_set_property(intel_plane_funcs_group0, intel_plane_funcs_group1, intel_plane_funcs_group2, ldvarg398); ldv_state_variable_70 = 1; } else { } goto ldv_57953; case 7: ; if (ldv_state_variable_70 == 2) { intel_plane_atomic_get_property(intel_plane_funcs_group0, (struct drm_plane_state const *)ldvarg397, intel_plane_funcs_group2, ldvarg396); ldv_state_variable_70 = 2; } else { } if (ldv_state_variable_70 == 1) { intel_plane_atomic_get_property(intel_plane_funcs_group0, (struct drm_plane_state const *)ldvarg397, intel_plane_funcs_group2, ldvarg396); ldv_state_variable_70 = 1; } else { } goto ldv_57953; case 8: ; if (ldv_state_variable_70 == 1) { ldv_probe_70(); ldv_state_variable_70 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_57953; default: ldv_stop(); } ldv_57953: ; return; } } void ldv_main_exported_68(void) { struct drm_mode_fb_cmd2 *ldvarg425 ; void *tmp ; bool ldvarg424 ; struct drm_file *ldvarg426 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(104UL); ldvarg425 = (struct drm_mode_fb_cmd2 *)tmp; tmp___0 = ldv_init_zalloc(744UL); ldvarg426 = (struct drm_file *)tmp___0; ldv_memset((void *)(& ldvarg424), 0, 1UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_68 == 1) { intel_atomic_check(intel_mode_funcs_group0, intel_mode_funcs_group1); ldv_state_variable_68 = 1; } else { } goto ldv_57970; case 1: ; if (ldv_state_variable_68 == 1) { intel_user_framebuffer_create(intel_mode_funcs_group0, ldvarg426, ldvarg425); ldv_state_variable_68 = 1; } else { } goto ldv_57970; case 2: ; if (ldv_state_variable_68 == 1) { intel_fbdev_output_poll_changed(intel_mode_funcs_group0); ldv_state_variable_68 = 1; } else { } goto ldv_57970; case 3: ; if (ldv_state_variable_68 == 1) { intel_atomic_commit(intel_mode_funcs_group0, intel_mode_funcs_group1, (int )ldvarg424); ldv_state_variable_68 = 1; } else { } goto ldv_57970; default: ldv_stop(); } ldv_57970: ; return; } } bool ldv_queue_work_on_545(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_546(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_547(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_548(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_549(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_cancel_work_sync_550(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(ldv_func_arg1); return (ldv_func_res); } } void ldv_flush_workqueue_551(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___14(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static struct delayed_work *to_delayed_work(struct work_struct *work ) { struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)work; return ((struct delayed_work *)__mptr); } } bool ldv_queue_work_on_563(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_565(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_564(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_567(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_566(struct workqueue_struct *ldv_func_arg1 ) ; extern bool cancel_delayed_work(struct delayed_work * ) ; bool ldv_cancel_delayed_work_568(struct delayed_work *ldv_func_arg1 ) ; __inline static bool queue_delayed_work___2(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = ldv_queue_delayed_work_on_564(8192, wq, dwork, delay); return (tmp); } } __inline static bool schedule_delayed_work___0(struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = queue_delayed_work___2(system_wq, dwork, delay); return (tmp); } } void call_and_disable_work_15(struct work_struct *work ) ; void call_and_disable_all_15(int state ) ; void activate_work_15(struct work_struct *work , int state ) ; void invoke_work_15(void) ; void disable_work_15(struct work_struct *work ) ; __inline static bool drm_can_sleep___6(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_40006; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40006; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40006; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40006; default: __bad_percpu_size(); } ldv_40006: pscr_ret__ = pfo_ret__; goto ldv_40012; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40016; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40016; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40016; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40016; default: __bad_percpu_size(); } ldv_40016: pscr_ret__ = pfo_ret_____0; goto ldv_40012; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40025; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40025; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40025; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40025; default: __bad_percpu_size(); } ldv_40025: pscr_ret__ = pfo_ret_____1; goto ldv_40012; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40034; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40034; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40034; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40034; default: __bad_percpu_size(); } ldv_40034: pscr_ret__ = pfo_ret_____2; goto ldv_40012; default: __bad_size_call_parameter(); goto ldv_40012; } ldv_40012: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___14(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } void intel_fbc_invalidate(struct drm_i915_private *dev_priv , unsigned int frontbuffer_bits , enum fb_op_origin origin ) ; void intel_fbc_flush(struct drm_i915_private *dev_priv , unsigned int frontbuffer_bits ) ; static void i8xx_fbc_disable(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 fbc_ctl ; long tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; long tmp___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->fbc.enabled = 0; fbc_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 12808L, 1); if ((int )fbc_ctl >= 0) { return; } else { } fbc_ctl = fbc_ctl & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12808L, fbc_ctl, 1); tmp___0 = msecs_to_jiffies(10U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_47996; ldv_47995: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 12816L, 1); if ((int )tmp___1 < 0) { ret__ = -110; } else { } goto ldv_47994; } else { } tmp___2 = drm_can_sleep___6(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_47996: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 12816L, 1); if ((int )tmp___3 < 0) { goto ldv_47995; } else { } ldv_47994: ; if (ret__ != 0) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i8xx_fbc_disable", "FBC idle timed out\n"); } else { } return; } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("i8xx_fbc_disable", "disabled FBC\n"); } else { } return; } } static void i8xx_fbc_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_framebuffer *fb ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr___0 ; int cfb_pitch ; int i ; u32 fbc_ctl ; struct drm_i915_private *__p ; u32 fbc_ctl2 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; long tmp___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; fb = (crtc->primary)->fb; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; dev_priv->fbc.enabled = 1; cfb_pitch = (int )(dev_priv->fbc.uncompressed_size / 1536UL); if (fb->pitches[0] < (unsigned int )cfb_pitch) { cfb_pitch = (int )fb->pitches[0]; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { cfb_pitch = cfb_pitch / 32 + -1; } else { cfb_pitch = cfb_pitch / 64 + -1; } i = 0; goto ldv_48021; ldv_48020: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i + 3264) * 4), 0U, 1); i = i + 1; ldv_48021: ; if (i <= 48) { goto ldv_48020; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 4U) { fbc_ctl2 = 2U; fbc_ctl2 = (u32 )intel_crtc->plane | fbc_ctl2; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12820L, fbc_ctl2, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12824L, (uint32_t )crtc->y, 1); } else { } fbc_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 12808L, 1); fbc_ctl = fbc_ctl & 1073676288U; fbc_ctl = fbc_ctl | 3221225472U; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { fbc_ctl = fbc_ctl | 8192U; } else { } fbc_ctl = (u32 )((cfb_pitch & 255) << 5) | fbc_ctl; fbc_ctl = (u32 )obj->fence_reg | fbc_ctl; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12808L, fbc_ctl, 1); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i8xx_fbc_enable", "enabled FBC, pitch %d, yoff %d, plane %c\n", cfb_pitch, crtc->y, (unsigned int )intel_crtc->plane + 65U); } else { } return; } } static bool i8xx_fbc_enabled(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 12808L, 1); return ((tmp & 2147483648U) != 0U); } } static void g4x_fbc_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_framebuffer *fb ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr___0 ; u32 dpfc_ctl ; int tmp___0 ; long tmp___1 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; fb = (crtc->primary)->fb; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; dev_priv->fbc.enabled = 1; dpfc_ctl = ((unsigned int )intel_crtc->plane << 30) | 1024U; tmp___0 = drm_format_plane_cpp(fb->pixel_format, 0); if (tmp___0 == 2) { dpfc_ctl = dpfc_ctl | 64U; } else { dpfc_ctl = dpfc_ctl; } dpfc_ctl = ((u32 )obj->fence_reg | dpfc_ctl) | 536870912U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12824L, (uint32_t )crtc->y, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12808L, dpfc_ctl | 2147483648U, 1); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("g4x_fbc_enable", "enabled fbc on plane %c\n", (unsigned int )intel_crtc->plane + 65U); } else { } return; } } static void g4x_fbc_disable(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 dpfc_ctl ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->fbc.enabled = 0; dpfc_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 12808L, 1); if ((int )dpfc_ctl < 0) { dpfc_ctl = dpfc_ctl & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12808L, dpfc_ctl, 1); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("g4x_fbc_disable", "disabled FBC\n"); } else { } } else { } return; } } static bool g4x_fbc_enabled(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 12808L, 1); return ((tmp & 2147483648U) != 0U); } } static void intel_fbc_nuke(struct drm_i915_private *dev_priv ) { { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 328576L, 4U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 328576L, 0); return; } } static void ilk_fbc_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_framebuffer *fb ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr___0 ; u32 dpfc_ctl ; int tmp___0 ; struct drm_i915_private *__p ; unsigned long tmp___1 ; struct drm_i915_private *__p___0 ; long tmp___2 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; fb = (crtc->primary)->fb; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; dev_priv->fbc.enabled = 1; dpfc_ctl = (unsigned int )intel_crtc->plane << 30; tmp___0 = drm_format_plane_cpp(fb->pixel_format, 0); if (tmp___0 == 2) { dev_priv->fbc.threshold = dev_priv->fbc.threshold + 1U; } else { } switch (dev_priv->fbc.threshold) { case 4U: ; case 3U: dpfc_ctl = dpfc_ctl | 128U; goto ldv_48083; case 2U: dpfc_ctl = dpfc_ctl | 64U; goto ldv_48083; case 1U: dpfc_ctl = dpfc_ctl; goto ldv_48083; } ldv_48083: dpfc_ctl = dpfc_ctl | 536870912U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 5U) { dpfc_ctl = (u32 )obj->fence_reg | dpfc_ctl; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 274968L, (uint32_t )crtc->y, 1); tmp___1 = i915_gem_obj_ggtt_offset(obj); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 8488L, (uint32_t )tmp___1 | 1U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 274952L, dpfc_ctl | 2147483648U, 1); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 6U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1048832L, (uint32_t )((int )obj->fence_reg | 536870912), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1048836L, (uint32_t )crtc->y, 1); } else { } intel_fbc_nuke(dev_priv); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ilk_fbc_enable", "enabled fbc on plane %c\n", (unsigned int )intel_crtc->plane + 65U); } else { } return; } } static void ilk_fbc_disable(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 dpfc_ctl ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->fbc.enabled = 0; dpfc_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 274952L, 1); if ((int )dpfc_ctl < 0) { dpfc_ctl = dpfc_ctl & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 274952L, dpfc_ctl, 1); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ilk_fbc_disable", "disabled FBC\n"); } else { } } else { } return; } } static bool ilk_fbc_enabled(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 274952L, 1); return ((tmp & 2147483648U) != 0U); } } static void gen7_fbc_enable(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_framebuffer *fb ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr ; struct drm_i915_gem_object *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr___0 ; u32 dpfc_ctl ; int tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; struct drm_i915_private *__p ; long tmp___3 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; fb = (crtc->primary)->fb; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; dev_priv->fbc.enabled = 1; dpfc_ctl = (unsigned int )intel_crtc->plane << 29; tmp___0 = drm_format_plane_cpp(fb->pixel_format, 0); if (tmp___0 == 2) { dev_priv->fbc.threshold = dev_priv->fbc.threshold + 1U; } else { } switch (dev_priv->fbc.threshold) { case 4U: ; case 3U: dpfc_ctl = dpfc_ctl | 128U; goto ldv_48124; case 2U: dpfc_ctl = dpfc_ctl | 64U; goto ldv_48124; case 1U: dpfc_ctl = dpfc_ctl; goto ldv_48124; } ldv_48124: dpfc_ctl = dpfc_ctl | 268435456U; if ((int )dev_priv->fbc.false_color) { dpfc_ctl = dpfc_ctl | 1024U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 274952L, dpfc_ctl | 2147483648U, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270336L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 270336L, tmp___1 | 4194304U, 1); } else { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )intel_crtc->pipe + 67628) * 4), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((int )intel_crtc->pipe + 67628) * 4), tmp___2 | 4194304U, 1); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1048832L, (uint32_t )((int )obj->fence_reg | 536870912), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1048836L, (uint32_t )crtc->y, 1); intel_fbc_nuke(dev_priv); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("gen7_fbc_enable", "enabled fbc on plane %c\n", (unsigned int )intel_crtc->plane + 65U); } else { } return; } } bool intel_fbc_enabled(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; return (dev_priv->fbc.enabled); } } static void intel_fbc_work_fn(struct work_struct *__work ) { struct intel_fbc_work *work ; struct delayed_work const *__mptr ; struct delayed_work *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc const *__mptr___0 ; { tmp = to_delayed_work(__work); __mptr = (struct delayed_work const *)tmp; work = (struct intel_fbc_work *)__mptr; dev = (work->crtc)->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev->struct_mutex, 0U); if ((unsigned long )dev_priv->fbc.fbc_work == (unsigned long )work) { if ((unsigned long )((work->crtc)->primary)->fb == (unsigned long )work->fb) { (*(dev_priv->display.enable_fbc))(work->crtc); __mptr___0 = (struct drm_crtc const *)work->crtc; dev_priv->fbc.crtc = (struct intel_crtc *)__mptr___0; dev_priv->fbc.fb_id = (((work->crtc)->primary)->fb)->base.id; dev_priv->fbc.y = (work->crtc)->y; } else { } dev_priv->fbc.fbc_work = (struct intel_fbc_work *)0; } else { } mutex_unlock(& dev->struct_mutex); kfree((void const *)work); return; } } static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv ) { long tmp ; bool tmp___0 ; { if ((unsigned long )dev_priv->fbc.fbc_work == (unsigned long )((struct intel_fbc_work *)0)) { return; } else { } tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_fbc_cancel_work", "cancelling pending FBC enable\n"); } else { } tmp___0 = ldv_cancel_delayed_work_568(& (dev_priv->fbc.fbc_work)->work); if ((int )tmp___0) { kfree((void const *)dev_priv->fbc.fbc_work); } else { } dev_priv->fbc.fbc_work = (struct intel_fbc_work *)0; return; } } static void intel_fbc_enable(struct drm_crtc *crtc ) { struct intel_fbc_work *work ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; void *tmp ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; struct lock_class_key __key___0 ; unsigned long tmp___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->display.enable_fbc == (unsigned long )((void (*)(struct drm_crtc * ))0)) { return; } else { } intel_fbc_cancel_work(dev_priv); tmp = kzalloc(240UL, 208U); work = (struct intel_fbc_work *)tmp; if ((unsigned long )work == (unsigned long )((struct intel_fbc_work *)0)) { drm_err("Failed to allocate FBC work structure\n"); (*(dev_priv->display.enable_fbc))(crtc); return; } else { } work->crtc = crtc; work->fb = (crtc->primary)->fb; __init_work(& work->work.work, 0); __constr_expr_0___0.counter = 137438953408L; work->work.work.data = __constr_expr_0___0; lockdep_init_map(& work->work.work.lockdep_map, "(&(&work->work)->work)", & __key, 0); INIT_LIST_HEAD(& work->work.work.entry); work->work.work.func = & intel_fbc_work_fn; init_timer_key(& work->work.timer, 2097152U, "(&(&work->work)->timer)", & __key___0); work->work.timer.function = & delayed_work_timer_fn; work->work.timer.data = (unsigned long )(& work->work); dev_priv->fbc.fbc_work = work; tmp___0 = msecs_to_jiffies(50U); schedule_delayed_work___0(& work->work, tmp___0); return; } } void intel_fbc_disable(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; intel_fbc_cancel_work(dev_priv); if ((unsigned long )dev_priv->display.disable_fbc == (unsigned long )((void (*)(struct drm_device * ))0)) { return; } else { } (*(dev_priv->display.disable_fbc))(dev); dev_priv->fbc.crtc = (struct intel_crtc *)0; return; } } static bool set_no_fbc_reason(struct drm_i915_private *dev_priv , enum no_fbc_reason reason ) { { if ((unsigned int )dev_priv->fbc.no_fbc_reason == (unsigned int )reason) { return (0); } else { } dev_priv->fbc.no_fbc_reason = reason; return (1); } } static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv ) { struct drm_crtc *crtc ; struct drm_crtc *tmp_crtc ; enum pipe pipe ; bool pipe_a_only ; bool one_pipe_only ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; long tmp ; bool tmp___0 ; bool tmp___1 ; struct drm_plane_state const *__mptr ; struct drm_i915_private *__p___2 ; long tmp___2 ; bool tmp___3 ; { crtc = (struct drm_crtc *)0; pipe_a_only = 0; one_pipe_only = 0; __p___0 = dev_priv; if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { pipe_a_only = 1; } else { __p___1 = dev_priv; if ((unsigned int )((unsigned char )__p___1->info.gen) > 7U) { pipe_a_only = 1; } else { __p = dev_priv; if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { one_pipe_only = 1; } else { } } } pipe = 0; goto ldv_48206; ldv_48205: tmp_crtc = dev_priv->pipe_to_crtc_mapping[(int )pipe]; tmp___1 = intel_crtc_active(tmp_crtc); if ((int )tmp___1) { __mptr = (struct drm_plane_state const *)(tmp_crtc->primary)->state; if ((int )((struct intel_plane_state *)__mptr)->visible) { if ((int )one_pipe_only && (unsigned long )crtc != (unsigned long )((struct drm_crtc *)0)) { tmp___0 = set_no_fbc_reason(dev_priv, 8); if ((int )tmp___0) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_fbc_find_crtc", "more than one pipe active, disabling compression\n"); } else { } } else { } return ((struct drm_crtc *)0); } else { } crtc = tmp_crtc; } else { } } else { } if ((int )pipe_a_only) { goto ldv_48204; } else { } pipe = (enum pipe )((int )pipe + 1); ldv_48206: __p___2 = dev_priv; if ((int )__p___2->info.num_pipes > (int )pipe) { goto ldv_48205; } else { } ldv_48204: ; if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0) || (unsigned long )(crtc->primary)->fb == (unsigned long )((struct drm_framebuffer *)0)) { tmp___3 = set_no_fbc_reason(dev_priv, 2); if ((int )tmp___3) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_fbc_find_crtc", "no output, disabling\n"); } else { } } else { } return ((struct drm_crtc *)0); } else { } return (crtc); } } void intel_fbc_update(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_framebuffer *fb ; struct drm_i915_gem_object *obj ; struct drm_display_mode const *adjusted_mode ; unsigned int max_width ; unsigned int max_height ; struct drm_i915_private *__p ; bool tmp ; long tmp___0 ; bool tmp___1 ; long tmp___2 ; bool tmp___3 ; struct drm_crtc const *__mptr ; struct drm_framebuffer const *__mptr___0 ; long tmp___4 ; bool tmp___5 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; long tmp___6 ; bool tmp___7 ; long tmp___8 ; bool tmp___9 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; long tmp___10 ; bool tmp___11 ; long tmp___12 ; bool tmp___13 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___14 ; long tmp___15 ; bool tmp___16 ; int tmp___17 ; int tmp___18 ; long tmp___19 ; bool tmp___20 ; long tmp___21 ; bool tmp___22 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = (struct drm_crtc *)0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { return; } else { } tmp = intel_vgpu_active(dev); if ((int )tmp) { i915.enable_fbc = 0; } else { } if (i915.enable_fbc < 0) { tmp___1 = set_no_fbc_reason(dev_priv, 10); if ((int )tmp___1) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_fbc_update", "disabled per chip default\n"); } else { } } else { } goto out_disable; } else { } if (i915.enable_fbc == 0) { tmp___3 = set_no_fbc_reason(dev_priv, 9); if ((int )tmp___3) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_fbc_update", "fbc disabled per module param\n"); } else { } } else { } goto out_disable; } else { } crtc = intel_fbc_find_crtc(dev_priv); if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { goto out_disable; } else { } __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; fb = (crtc->primary)->fb; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___0 = (struct drm_framebuffer const *)fb; obj = ((struct intel_framebuffer *)__mptr___0)->obj; } else { obj = (struct drm_i915_gem_object *)0; } adjusted_mode = (struct drm_display_mode const *)(& (intel_crtc->config)->base.adjusted_mode); if (((unsigned int )adjusted_mode->flags & 16U) != 0U || ((unsigned int )adjusted_mode->flags & 32U) != 0U) { tmp___5 = set_no_fbc_reason(dev_priv, 4); if ((int )tmp___5) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_fbc_update", "mode incompatible with compression, disabling\n"); } else { } } else { } goto out_disable; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 7U) { max_width = 4096U; max_height = 4096U; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { max_width = 4096U; max_height = 4096U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { max_width = 4096U; max_height = 2048U; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 4U) { max_width = 4096U; max_height = 2048U; } else { max_width = 2048U; max_height = 1536U; } } } } if ((unsigned int )(intel_crtc->config)->pipe_src_w > max_width || (unsigned int )(intel_crtc->config)->pipe_src_h > max_height) { tmp___7 = set_no_fbc_reason(dev_priv, 5); if ((int )tmp___7) { tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_fbc_update", "mode too large for compression, disabling\n"); } else { } } else { } goto out_disable; } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) <= 3U) { goto _L; } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 46UL) != 0U) { _L: /* CIL Label */ if ((unsigned int )intel_crtc->plane != 0U) { tmp___9 = set_no_fbc_reason(dev_priv, 6); if ((int )tmp___9) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_fbc_update", "plane not A, disabling compression\n"); } else { } } else { } goto out_disable; } else { } } else { } } if ((unsigned int )*((unsigned char *)obj + 409UL) != 64U || (unsigned int )*((unsigned short *)obj + 204UL) == 4032U) { tmp___11 = set_no_fbc_reason(dev_priv, 7); if ((int )tmp___11) { tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("intel_fbc_update", "framebuffer not tiled or fenced, disabling compression\n"); } else { } } else { } goto out_disable; } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) <= 4U) { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 44UL) == 0U) { if (((crtc->primary)->state)->rotation != 1U) { tmp___13 = set_no_fbc_reason(dev_priv, 4); if ((int )tmp___13) { tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("intel_fbc_update", "Rotation unsupported, disabling\n"); } else { } } else { } goto out_disable; } else { } } else { } } else { } __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_48283; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_48283; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_48283; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_48283; default: __bad_percpu_size(); } ldv_48283: pscr_ret__ = pfo_ret__; goto ldv_48289; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_48293; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_48293; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_48293; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_48293; default: __bad_percpu_size(); } ldv_48293: pscr_ret__ = pfo_ret_____0; goto ldv_48289; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_48302; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_48302; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_48302; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_48302; default: __bad_percpu_size(); } ldv_48302: pscr_ret__ = pfo_ret_____1; goto ldv_48289; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_48311; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_48311; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_48311; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_48311; default: __bad_percpu_size(); } ldv_48311: pscr_ret__ = pfo_ret_____2; goto ldv_48289; default: __bad_size_call_parameter(); goto ldv_48289; } ldv_48289: tmp___14 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___14) { goto out_disable; } else { } tmp___17 = drm_format_plane_cpp(fb->pixel_format, 0); tmp___18 = i915_gem_stolen_setup_compression(dev, (int )obj->base.size, tmp___17); if (tmp___18 != 0) { tmp___16 = set_no_fbc_reason(dev_priv, 3); if ((int )tmp___16) { tmp___15 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___15 != 0L) { drm_ut_debug_printk("intel_fbc_update", "framebuffer too large, disabling compression\n"); } else { } } else { } goto out_disable; } else { } if (((unsigned long )dev_priv->fbc.crtc == (unsigned long )intel_crtc && dev_priv->fbc.fb_id == fb->base.id) && dev_priv->fbc.y == crtc->y) { return; } else { } tmp___20 = intel_fbc_enabled(dev); if ((int )tmp___20) { tmp___19 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___19 != 0L) { drm_ut_debug_printk("intel_fbc_update", "disabling active FBC for update\n"); } else { } intel_fbc_disable(dev); } else { } intel_fbc_enable(crtc); dev_priv->fbc.no_fbc_reason = 0; return; out_disable: tmp___22 = intel_fbc_enabled(dev); if ((int )tmp___22) { tmp___21 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___21 != 0L) { drm_ut_debug_printk("intel_fbc_update", "unsupported config, disabling FBC\n"); } else { } intel_fbc_disable(dev); } else { } i915_gem_stolen_cleanup_compression(dev); return; } } void intel_fbc_invalidate(struct drm_i915_private *dev_priv , unsigned int frontbuffer_bits , enum fb_op_origin origin ) { struct drm_device *dev ; unsigned int fbc_bits ; struct drm_crtc const *__mptr ; { dev = dev_priv->dev; if ((unsigned int )origin == 0U) { return; } else { } if ((int )dev_priv->fbc.enabled) { fbc_bits = (unsigned int )(1 << (int )(dev_priv->fbc.crtc)->pipe * 4); } else if ((unsigned long )dev_priv->fbc.fbc_work != (unsigned long )((struct intel_fbc_work *)0)) { __mptr = (struct drm_crtc const *)(dev_priv->fbc.fbc_work)->crtc; fbc_bits = (unsigned int )(1 << (int )((struct intel_crtc *)__mptr)->pipe * 4); } else { fbc_bits = dev_priv->fbc.possible_framebuffer_bits; } dev_priv->fbc.busy_bits = dev_priv->fbc.busy_bits | (fbc_bits & frontbuffer_bits); if (dev_priv->fbc.busy_bits != 0U) { intel_fbc_disable(dev); } else { } return; } } void intel_fbc_flush(struct drm_i915_private *dev_priv , unsigned int frontbuffer_bits ) { struct drm_device *dev ; { dev = dev_priv->dev; if (dev_priv->fbc.busy_bits == 0U) { return; } else { } dev_priv->fbc.busy_bits = dev_priv->fbc.busy_bits & ~ frontbuffer_bits; if (dev_priv->fbc.busy_bits == 0U) { intel_fbc_update(dev); } else { } return; } } void intel_fbc_init(struct drm_i915_private *dev_priv ) { enum pipe pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { dev_priv->fbc.enabled = 0; dev_priv->fbc.no_fbc_reason = 1; return; } else { } pipe = 0; goto ldv_48363; ldv_48362: dev_priv->fbc.possible_framebuffer_bits = dev_priv->fbc.possible_framebuffer_bits | (unsigned int )(1 << (int )pipe * 4); __p___0 = dev_priv; if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { goto ldv_48361; } else { __p___1 = dev_priv; if ((unsigned int )((unsigned char )__p___1->info.gen) > 7U) { goto ldv_48361; } else { } } pipe = (enum pipe )((int )pipe + 1); ldv_48363: __p___2 = dev_priv; if ((int )__p___2->info.num_pipes > (int )pipe) { goto ldv_48362; } else { } ldv_48361: __p___5 = dev_priv; if ((unsigned int )((unsigned char )__p___5->info.gen) > 6U) { dev_priv->display.fbc_enabled = & ilk_fbc_enabled; dev_priv->display.enable_fbc = & gen7_fbc_enable; dev_priv->display.disable_fbc = & ilk_fbc_disable; } else { __p___4 = dev_priv; if ((unsigned int )((unsigned char )__p___4->info.gen) > 4U) { dev_priv->display.fbc_enabled = & ilk_fbc_enabled; dev_priv->display.enable_fbc = & ilk_fbc_enable; dev_priv->display.disable_fbc = & ilk_fbc_disable; } else { __p___3 = dev_priv; if ((unsigned int )((unsigned short )__p___3->info.device_id) == 10818U) { dev_priv->display.fbc_enabled = & g4x_fbc_enabled; dev_priv->display.enable_fbc = & g4x_fbc_enable; dev_priv->display.disable_fbc = & g4x_fbc_disable; } else { dev_priv->display.fbc_enabled = & i8xx_fbc_enabled; dev_priv->display.enable_fbc = & i8xx_fbc_enable; dev_priv->display.disable_fbc = & i8xx_fbc_disable; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 12808L, 32768000U, 1); } } } dev_priv->fbc.enabled = (*(dev_priv->display.fbc_enabled))(dev_priv->dev); return; } } void call_and_disable_work_15(struct work_struct *work ) { { if ((ldv_work_15_0 == 2 || ldv_work_15_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_15_0) { intel_fbc_work_fn(work); ldv_work_15_0 = 1; return; } else { } if ((ldv_work_15_1 == 2 || ldv_work_15_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_15_1) { intel_fbc_work_fn(work); ldv_work_15_1 = 1; return; } else { } if ((ldv_work_15_2 == 2 || ldv_work_15_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_15_2) { intel_fbc_work_fn(work); ldv_work_15_2 = 1; return; } else { } if ((ldv_work_15_3 == 2 || ldv_work_15_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_15_3) { intel_fbc_work_fn(work); ldv_work_15_3 = 1; return; } else { } return; } } void call_and_disable_all_15(int state ) { { if (ldv_work_15_0 == state) { call_and_disable_work_15(ldv_work_struct_15_0); } else { } if (ldv_work_15_1 == state) { call_and_disable_work_15(ldv_work_struct_15_1); } else { } if (ldv_work_15_2 == state) { call_and_disable_work_15(ldv_work_struct_15_2); } else { } if (ldv_work_15_3 == state) { call_and_disable_work_15(ldv_work_struct_15_3); } else { } return; } } void activate_work_15(struct work_struct *work , int state ) { { if (ldv_work_15_0 == 0) { ldv_work_struct_15_0 = work; ldv_work_15_0 = state; return; } else { } if (ldv_work_15_1 == 0) { ldv_work_struct_15_1 = work; ldv_work_15_1 = state; return; } else { } if (ldv_work_15_2 == 0) { ldv_work_struct_15_2 = work; ldv_work_15_2 = state; return; } else { } if (ldv_work_15_3 == 0) { ldv_work_struct_15_3 = work; ldv_work_15_3 = state; return; } else { } return; } } void work_init_15(void) { { ldv_work_15_0 = 0; ldv_work_15_1 = 0; ldv_work_15_2 = 0; ldv_work_15_3 = 0; return; } } void invoke_work_15(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_15_0 == 2 || ldv_work_15_0 == 3) { ldv_work_15_0 = 4; intel_fbc_work_fn(ldv_work_struct_15_0); ldv_work_15_0 = 1; } else { } goto ldv_48403; case 1: ; if (ldv_work_15_1 == 2 || ldv_work_15_1 == 3) { ldv_work_15_1 = 4; intel_fbc_work_fn(ldv_work_struct_15_0); ldv_work_15_1 = 1; } else { } goto ldv_48403; case 2: ; if (ldv_work_15_2 == 2 || ldv_work_15_2 == 3) { ldv_work_15_2 = 4; intel_fbc_work_fn(ldv_work_struct_15_0); ldv_work_15_2 = 1; } else { } goto ldv_48403; case 3: ; if (ldv_work_15_3 == 2 || ldv_work_15_3 == 3) { ldv_work_15_3 = 4; intel_fbc_work_fn(ldv_work_struct_15_0); ldv_work_15_3 = 1; } else { } goto ldv_48403; default: ldv_stop(); } ldv_48403: ; return; } } void disable_work_15(struct work_struct *work ) { { if ((ldv_work_15_0 == 3 || ldv_work_15_0 == 2) && (unsigned long )ldv_work_struct_15_0 == (unsigned long )work) { ldv_work_15_0 = 1; } else { } if ((ldv_work_15_1 == 3 || ldv_work_15_1 == 2) && (unsigned long )ldv_work_struct_15_1 == (unsigned long )work) { ldv_work_15_1 = 1; } else { } if ((ldv_work_15_2 == 3 || ldv_work_15_2 == 2) && (unsigned long )ldv_work_struct_15_2 == (unsigned long )work) { ldv_work_15_2 = 1; } else { } if ((ldv_work_15_3 == 3 || ldv_work_15_3 == 2) && (unsigned long )ldv_work_struct_15_3 == (unsigned long )work) { ldv_work_15_3 = 1; } else { } return; } } bool ldv_queue_work_on_563(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_564(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_565(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_566(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_567(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_cancel_delayed_work_568(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_579(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_581(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_580(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_583(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_582(struct workqueue_struct *ldv_func_arg1 ) ; static bool ivb_can_enable_err_int(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; enum pipe pipe ; int tmp ; long tmp___0 ; struct drm_crtc const *__mptr ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fifo_underrun.c"), "i" (57), "i" (12UL)); ldv_47983: ; goto ldv_47983; } else { } pipe = 0; goto ldv_47993; ldv_47992: __mptr = (struct drm_crtc const *)dev_priv->pipe_to_crtc_mapping[(int )pipe]; crtc = (struct intel_crtc *)__mptr; if ((int )crtc->cpu_fifo_underrun_disabled) { return (0); } else { } pipe = (enum pipe )((int )pipe + 1); ldv_47993: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_47992; } else { } return (1); } } static bool cpt_can_enable_serr_int(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; enum pipe pipe ; struct intel_crtc *crtc ; int tmp ; long tmp___0 ; struct drm_crtc const *__mptr ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fifo_underrun.c"), "i" (75), "i" (12UL)); ldv_48001: ; goto ldv_48001; } else { } pipe = 0; goto ldv_48011; ldv_48010: __mptr = (struct drm_crtc const *)dev_priv->pipe_to_crtc_mapping[(int )pipe]; crtc = (struct intel_crtc *)__mptr; if ((int )crtc->pch_fifo_underrun_disabled) { return (0); } else { } pipe = (enum pipe )((int )pipe + 1); ldv_48011: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_48010; } else { } return (1); } } void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv ) { struct intel_crtc *crtc ; struct list_head const *__mptr ; u32 reg ; u32 pipestat ; uint32_t tmp ; struct list_head const *__mptr___0 ; { spin_lock_irq(& dev_priv->irq_lock); __mptr = (struct list_head const *)(dev_priv->dev)->mode_config.crtc_list.next; crtc = (struct intel_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_48025; ldv_48024: reg = ((unsigned int )(dev_priv->info.pipe_offsets[(int )crtc->pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U; if ((int )crtc->cpu_fifo_underrun_disabled) { goto ldv_48023; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); pipestat = tmp & 4294901760U; if ((int )pipestat >= 0) { goto ldv_48023; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, pipestat | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); drm_err("pipe %c underrun\n", (int )crtc->pipe + 65); ldv_48023: __mptr___0 = (struct list_head const *)crtc->base.head.next; crtc = (struct intel_crtc *)__mptr___0 + 0xfffffffffffffff0UL; ldv_48025: ; if ((unsigned long )(& crtc->base.head) != (unsigned long )(& (dev_priv->dev)->mode_config.crtc_list)) { goto ldv_48024; } else { } spin_unlock_irq(& dev_priv->irq_lock); return; } } static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev , enum pipe pipe , bool enable , bool old ) { struct drm_i915_private *dev_priv ; u32 reg ; u32 pipestat ; uint32_t tmp ; int tmp___0 ; long tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; reg = ((unsigned int )(dev_priv->info.pipe_offsets[(int )pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458788U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); pipestat = tmp & 4294901760U; tmp___0 = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fifo_underrun.c"), "i" (129), "i" (12UL)); ldv_48036: ; goto ldv_48036; } else { } if ((int )enable) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, pipestat | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); } else if ((int )old && (int )pipestat < 0) { drm_err("pipe %c underrun\n", (int )pipe + 65); } else { } return; } } static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev , enum pipe pipe , bool enable ) { struct drm_i915_private *dev_priv ; uint32_t bit ; { dev_priv = (struct drm_i915_private *)dev->dev_private; bit = (int )pipe == 0 ? 1U : 256U; if ((int )enable) { ironlake_enable_display_irq(dev_priv, bit); } else { ironlake_disable_display_irq(dev_priv, bit); } return; } } static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev , enum pipe pipe , bool enable , bool old ) { struct drm_i915_private *dev_priv ; bool tmp ; int tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((int )enable) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 278592L, (uint32_t )(1 << (int )pipe * 3), 1); tmp = ivb_can_enable_err_int(dev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } ironlake_enable_display_irq(dev_priv, 1073741824U); } else { ironlake_disable_display_irq(dev_priv, 1073741824U); if ((int )old) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 278592L, 1); if ((tmp___1 & (uint32_t )(1 << (int )pipe * 3)) != 0U) { drm_err("uncleared fifo underrun on pipe %c\n", (int )pipe + 65); } else { } } else { } } return; } } static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev , enum pipe pipe , bool enable ) { struct drm_i915_private *dev_priv ; int tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fifo_underrun.c"), "i" (181), "i" (12UL)); ldv_48057: ; goto ldv_48057; } else { } if ((int )enable) { dev_priv->__annonCompField82.de_irq_mask[(int )pipe] = dev_priv->__annonCompField82.de_irq_mask[(int )pipe] & 2147483647U; } else { dev_priv->__annonCompField82.de_irq_mask[(int )pipe] = dev_priv->__annonCompField82.de_irq_mask[(int )pipe] | 2147483648U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 16 + 279556), dev_priv->__annonCompField82.de_irq_mask[(int )pipe], 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 16 + 279556), 0); return; } } static void ibx_set_fifo_underrun_reporting(struct drm_device *dev , enum transcoder pch_transcoder , bool enable ) { struct drm_i915_private *dev_priv ; uint32_t bit ; { dev_priv = (struct drm_i915_private *)dev->dev_private; bit = (unsigned int )pch_transcoder == 0U ? 1U : 8U; if ((int )enable) { ibx_display_interrupt_update(dev_priv, bit, bit); } else { ibx_display_interrupt_update(dev_priv, bit, 0U); } return; } } static void cpt_set_fifo_underrun_reporting(struct drm_device *dev , enum transcoder pch_transcoder , bool enable , bool old ) { struct drm_i915_private *dev_priv ; bool tmp ; int tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((int )enable) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 802880L, (uint32_t )(1 << (int )((unsigned int )pch_transcoder * 3U)), 1); tmp = cpt_can_enable_serr_int(dev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } ibx_display_interrupt_update(dev_priv, 65536U, 65536U); } else { ibx_display_interrupt_update(dev_priv, 65536U, 0U); if ((int )old) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 802880L, 1); if ((tmp___1 & (uint32_t )(1 << (int )((unsigned int )pch_transcoder * 3U))) != 0U) { drm_err("uncleared pch fifo underrun on pch transcoder %c\n", (unsigned int )pch_transcoder + 65U); } else { } } else { } } return; } } static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev , enum pipe pipe , bool enable ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; bool old ; int tmp ; long tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = dev_priv->pipe_to_crtc_mapping[(int )pipe]; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = queued_spin_is_locked(& dev_priv->irq_lock.__annonCompField18.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fifo_underrun.c"), "i" (238), "i" (12UL)); ldv_48083: ; goto ldv_48083; } else { } old = (bool )(! ((int )intel_crtc->cpu_fifo_underrun_disabled != 0)); intel_crtc->cpu_fifo_underrun_disabled = (bool )(! ((int )enable != 0)); __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) <= 4U) { i9xx_set_fifo_underrun_reporting(dev, pipe, (int )enable, (int )old); } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { i9xx_set_fifo_underrun_reporting(dev, pipe, (int )enable, (int )old); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 5U) { ironlake_set_fifo_underrun_reporting(dev, pipe, (int )enable); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 6U) { ironlake_set_fifo_underrun_reporting(dev, pipe, (int )enable); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 7U) { ivybridge_set_fifo_underrun_reporting(dev, pipe, (int )enable, (int )old); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 8U) { broadwell_set_fifo_underrun_reporting(dev, pipe, (int )enable); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { broadwell_set_fifo_underrun_reporting(dev, pipe, (int )enable); } else { } } } } } } } return (old); } } bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv , enum pipe pipe , bool enable ) { unsigned long flags ; bool ret ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp); ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe, (int )enable); spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return (ret); } } bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv , enum transcoder pch_transcoder , bool enable ) { struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; unsigned long flags ; bool old ; raw_spinlock_t *tmp ; struct drm_i915_private *__p ; { crtc = dev_priv->pipe_to_crtc_mapping[(unsigned int )pch_transcoder]; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = spinlock_check(& dev_priv->irq_lock); flags = _raw_spin_lock_irqsave(tmp); old = (bool )(! ((int )intel_crtc->pch_fifo_underrun_disabled != 0)); intel_crtc->pch_fifo_underrun_disabled = (bool )(! ((int )enable != 0)); __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p->pch_type == 1U) { ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, (int )enable); } else { cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, (int )enable, (int )old); } spin_unlock_irqrestore(& dev_priv->irq_lock, flags); return (old); } } void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv , enum pipe pipe ) { struct drm_crtc *crtc ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_crtc const *__mptr ; bool tmp ; { crtc = dev_priv->pipe_to_crtc_mapping[(int )pipe]; if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { return; } else { } __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { _L: /* CIL Label */ __mptr = (struct drm_crtc const *)crtc; if ((int )((struct intel_crtc *)__mptr)->cpu_fifo_underrun_disabled) { return; } else { } } else { } } tmp = intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, 0); if ((int )tmp) { drm_err("CPU pipe %c FIFO underrun\n", (int )pipe + 65); } else { } return; } } void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv , enum transcoder pch_transcoder ) { bool tmp ; { tmp = intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, 0); if ((int )tmp) { drm_err("PCH transcoder %c FIFO underrun\n", (unsigned int )pch_transcoder + 65U); } else { } return; } } bool ldv_queue_work_on_579(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_580(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_581(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_582(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_583(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_593(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_595(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_594(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_597(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_596(struct workqueue_struct *ldv_func_arg1 ) ; void intel_edp_drrs_invalidate(struct drm_device *dev , unsigned int frontbuffer_bits ) ; void intel_edp_drrs_flush(struct drm_device *dev , unsigned int frontbuffer_bits ) ; void intel_psr_invalidate(struct drm_device *dev , unsigned int frontbuffer_bits ) ; void intel_psr_flush(struct drm_device *dev , unsigned int frontbuffer_bits ) ; void intel_psr_single_frame_update(struct drm_device *dev ) ; static void intel_increase_pllclock(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *dev_priv ; int dpll_reg ; int dpll ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp ; long tmp___0 ; uint32_t tmp___1 ; long tmp___2 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dpll_reg = (int )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { return; } else { } } else { } if (! dev_priv->lvds_downclock_avail) { return; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dpll_reg, 1); dpll = (int )tmp; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 46UL) == 0U && (dpll & 256) != 0) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_increase_pllclock", "upclocking LVDS\n"); } else { } assert_panel_unlocked(dev_priv, pipe); dpll = dpll & -257; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )dpll_reg, (uint32_t )dpll, 1); intel_wait_for_vblank(dev, (int )pipe); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dpll_reg, 1); dpll = (int )tmp___1; if ((dpll & 256) != 0) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_increase_pllclock", "failed to upclock LVDS!\n"); } else { } } else { } } else { } return; } } static void intel_mark_fb_busy(struct drm_device *dev , unsigned int frontbuffer_bits , struct intel_engine_cs *ring ) { struct drm_i915_private *dev_priv ; enum pipe pipe ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = 0; goto ldv_48018; ldv_48017: ; if (((unsigned int )(15 << (int )pipe * 4) & frontbuffer_bits) == 0U) { goto ldv_48016; } else { } intel_increase_pllclock(dev, pipe); ldv_48016: pipe = (enum pipe )((int )pipe + 1); ldv_48018: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_48017; } else { } return; } } void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj , struct intel_engine_cs *ring , enum fb_op_origin origin ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; int tmp ; long tmp___0 ; { dev = obj->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = mutex_is_locked(& dev->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_frontbuffer.c", 140, "WARN_ON(!mutex_is_locked(&dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned int )*((unsigned short *)obj + 206UL) == 0U) { return; } else { } if ((unsigned long )ring != (unsigned long )((struct intel_engine_cs *)0)) { mutex_lock_nested(& dev_priv->fb_tracking.lock, 0U); dev_priv->fb_tracking.busy_bits = dev_priv->fb_tracking.busy_bits | (unsigned int )obj->frontbuffer_bits; dev_priv->fb_tracking.flip_bits = dev_priv->fb_tracking.flip_bits & (unsigned int )(~ ((int )obj->frontbuffer_bits)); mutex_unlock(& dev_priv->fb_tracking.lock); } else { } intel_mark_fb_busy(dev, (unsigned int )obj->frontbuffer_bits, ring); intel_psr_invalidate(dev, (unsigned int )obj->frontbuffer_bits); intel_edp_drrs_invalidate(dev, (unsigned int )obj->frontbuffer_bits); intel_fbc_invalidate(dev_priv, (unsigned int )obj->frontbuffer_bits, origin); return; } } void intel_frontbuffer_flush(struct drm_device *dev , unsigned int frontbuffer_bits ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev_priv->fb_tracking.lock, 0U); frontbuffer_bits = ~ dev_priv->fb_tracking.busy_bits & frontbuffer_bits; mutex_unlock(& dev_priv->fb_tracking.lock); intel_mark_fb_busy(dev, frontbuffer_bits, (struct intel_engine_cs *)0); intel_edp_drrs_flush(dev, frontbuffer_bits); intel_psr_flush(dev, frontbuffer_bits); intel_fbc_flush(dev_priv, frontbuffer_bits); return; } } void intel_fb_obj_flush(struct drm_i915_gem_object *obj , bool retire ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; unsigned int frontbuffer_bits ; int __ret_warn_on ; int tmp ; long tmp___0 ; { dev = obj->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = mutex_is_locked(& dev->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_frontbuffer.c", 205, "WARN_ON(!mutex_is_locked(&dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned int )*((unsigned short *)obj + 206UL) == 0U) { return; } else { } frontbuffer_bits = (unsigned int )obj->frontbuffer_bits; if ((int )retire) { mutex_lock_nested(& dev_priv->fb_tracking.lock, 0U); frontbuffer_bits = dev_priv->fb_tracking.busy_bits & frontbuffer_bits; dev_priv->fb_tracking.busy_bits = dev_priv->fb_tracking.busy_bits & ~ frontbuffer_bits; mutex_unlock(& dev_priv->fb_tracking.lock); } else { } intel_frontbuffer_flush(dev, frontbuffer_bits); return; } } void intel_frontbuffer_flip_prepare(struct drm_device *dev , unsigned int frontbuffer_bits ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev_priv->fb_tracking.lock, 0U); dev_priv->fb_tracking.flip_bits = dev_priv->fb_tracking.flip_bits | frontbuffer_bits; dev_priv->fb_tracking.busy_bits = dev_priv->fb_tracking.busy_bits & ~ frontbuffer_bits; mutex_unlock(& dev_priv->fb_tracking.lock); intel_psr_single_frame_update(dev); return; } } void intel_frontbuffer_flip_complete(struct drm_device *dev , unsigned int frontbuffer_bits ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev_priv->fb_tracking.lock, 0U); frontbuffer_bits = dev_priv->fb_tracking.flip_bits & frontbuffer_bits; dev_priv->fb_tracking.flip_bits = dev_priv->fb_tracking.flip_bits & ~ frontbuffer_bits; mutex_unlock(& dev_priv->fb_tracking.lock); intel_frontbuffer_flush(dev, frontbuffer_bits); return; } } bool ldv_queue_work_on_593(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_594(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_595(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_596(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_597(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_607(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_609(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_608(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_611(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_610(struct workqueue_struct *ldv_func_arg1 ) ; extern void drm_edid_to_eld(struct drm_connector * , struct edid * ) ; extern struct edid *drm_get_edid(struct drm_connector * , struct i2c_adapter * ) ; extern int drm_add_edid_modes(struct drm_connector * , struct edid * ) ; extern int drm_mode_connector_update_edid_property(struct drm_connector * , struct edid const * ) ; extern struct drm_property *drm_property_create_enum(struct drm_device * , int , char const * , struct drm_prop_enum_list const * , int ) ; int intel_connector_update_modes(struct drm_connector *connector , struct edid *edid ) ; int intel_ddc_get_modes(struct drm_connector *connector , struct i2c_adapter *adapter ) ; void intel_attach_force_audio_property(struct drm_connector *connector ) ; void intel_attach_broadcast_rgb_property(struct drm_connector *connector ) ; int intel_connector_update_modes(struct drm_connector *connector , struct edid *edid ) { int ret ; { drm_mode_connector_update_edid_property(connector, (struct edid const *)edid); ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); return (ret); } } int intel_ddc_get_modes(struct drm_connector *connector , struct i2c_adapter *adapter ) { struct edid *edid ; int ret ; { edid = drm_get_edid(connector, adapter); if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { return (0); } else { } ret = intel_connector_update_modes(connector, edid); kfree((void const *)edid); return (ret); } } static struct drm_prop_enum_list const force_audio_names[4U] = { {-2, (char *)"force-dvi"}, {-1, (char *)"off"}, {0, (char *)"auto"}, {1, (char *)"on"}}; void intel_attach_force_audio_property(struct drm_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_property *prop ; { dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; prop = dev_priv->force_audio_property; if ((unsigned long )prop == (unsigned long )((struct drm_property *)0)) { prop = drm_property_create_enum(dev, 0, "audio", (struct drm_prop_enum_list const *)(& force_audio_names), 4); if ((unsigned long )prop == (unsigned long )((struct drm_property *)0)) { return; } else { } dev_priv->force_audio_property = prop; } else { } drm_object_attach_property(& connector->base, prop, 0ULL); return; } } static struct drm_prop_enum_list const broadcast_rgb_names[3U] = { {0, (char *)"Automatic"}, {1, (char *)"Full"}, {2, (char *)"Limited 16:235"}}; void intel_attach_broadcast_rgb_property(struct drm_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_property *prop ; { dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; prop = dev_priv->broadcast_rgb_property; if ((unsigned long )prop == (unsigned long )((struct drm_property *)0)) { prop = drm_property_create_enum(dev, 8, "Broadcast RGB", (struct drm_prop_enum_list const *)(& broadcast_rgb_names), 3); if ((unsigned long )prop == (unsigned long )((struct drm_property *)0)) { return; } else { } dev_priv->broadcast_rgb_property = prop; } else { } drm_object_attach_property(& connector->base, prop, 0ULL); return; } } bool ldv_queue_work_on_607(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_608(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_609(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_610(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_611(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static int __atomic_add_unless___4(atomic_t *v , int a , int u ) { int c ; int old ; long tmp ; long tmp___0 ; { c = atomic_read((atomic_t const *)v); ldv_5708: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5707; } else { } old = atomic_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5707; } else { } c = old; goto ldv_5708; ldv_5707: ; return (c); } } __inline static int atomic_add_unless___4(atomic_t *v , int a , int u ) { int tmp ; { tmp = __atomic_add_unless___4(v, a, u); return (tmp != u); } } bool ldv_queue_work_on_621(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_623(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_622(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_625(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_624(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void memset_io(void volatile *addr , unsigned char val , size_t count ) { { memset((void *)addr, (int )val, count); return; } } __inline static void memcpy_toio(void volatile *dst , void const *src , size_t count ) { { memcpy((void *)dst, src, count); return; } } __inline static void kref_get___12(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub___14(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___14(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___14(kref, 1U, release); return (tmp); } } __inline static int kref_put_mutex___4(struct kref *kref , void (*release)(struct kref * ) , struct mutex *lock ) { int __ret_warn_on ; long tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; long tmp___3 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 138); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = atomic_add_unless___4(& kref->refcount, -1, 1); tmp___3 = ldv__builtin_expect(tmp___2 == 0, 0L); if (tmp___3 != 0L) { mutex_lock_nested(lock, 0U); tmp___0 = atomic_dec_and_test(& kref->refcount); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { mutex_unlock(lock); return (0); } else { } (*release)(kref); return (1); } else { } return (0); } } __inline static void *io_mapping_map_wc(struct io_mapping *mapping , unsigned long offset ) { { return ((void *)mapping + offset); } } __inline static void io_mapping_unmap(void *vaddr ) { { return; } } __inline static void drm_gem_object_unreference___13(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___14(& obj->refcount, & drm_gem_object_free); } else { } return; } } __inline static void drm_gem_object_unreference_unlocked___3(struct drm_gem_object *obj ) { struct drm_device *dev ; int tmp ; { if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) { return; } else { } dev = obj->dev; tmp = kref_put_mutex___4(& obj->refcount, & drm_gem_object_free, & dev->struct_mutex); if (tmp != 0) { mutex_unlock(& dev->struct_mutex); } else { lock_acquire(& dev->struct_mutex.dep_map, 0U, 0, 0, 1, (struct lockdep_map *)0, 0UL); lock_release(& dev->struct_mutex.dep_map, 0, 0UL); } return; } } __inline static struct drm_i915_gem_request *i915_gem_request_reference___5(struct drm_i915_gem_request *req ) { { if ((unsigned long )req != (unsigned long )((struct drm_i915_gem_request *)0)) { kref_get___12(& req->ref); } else { } return (req); } } __inline static void i915_gem_request_unreference___4(struct drm_i915_gem_request *req ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = mutex_is_locked(& ((req->ring)->dev)->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_drv.h", 2216, "WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); kref_put___14(& req->ref, & i915_gem_request_free); return; } } __inline static void i915_gem_request_assign___4(struct drm_i915_gem_request **pdst , struct drm_i915_gem_request *src ) { { if ((unsigned long )src != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_reference___5(src); } else { } if ((unsigned long )*pdst != (unsigned long )((struct drm_i915_gem_request *)0)) { i915_gem_request_unreference___4(*pdst); } else { } *pdst = src; return; } } int intel_overlay_put_image(struct drm_device *dev , void *data , struct drm_file *file_priv ) ; int intel_overlay_attrs(struct drm_device *dev , void *data , struct drm_file *file_priv ) ; static struct overlay_registers *intel_overlay_map_regs(struct intel_overlay *overlay ) { struct drm_i915_private *dev_priv ; struct overlay_registers *regs ; unsigned long tmp ; void *tmp___0 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)(overlay->dev)->dev_private; __p = to_i915((struct drm_device const *)overlay->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { regs = (struct overlay_registers *)((overlay->reg_bo)->__annonCompField84.phys_handle)->vaddr; } else { tmp = i915_gem_obj_ggtt_offset(overlay->reg_bo); tmp___0 = io_mapping_map_wc(dev_priv->gtt.mappable, tmp); regs = (struct overlay_registers *)tmp___0; } return (regs); } } static void intel_overlay_unmap_regs(struct intel_overlay *overlay , struct overlay_registers *regs ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)overlay->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { io_mapping_unmap((void *)regs); } else { } return; } } static int intel_overlay_do_wait_request(struct intel_overlay *overlay , void (*tail)(struct intel_overlay * ) ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int ret ; int __ret_warn_on ; long tmp ; { dev = overlay->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring); __ret_warn_on = (unsigned long )overlay->last_flip_req != (unsigned long )((struct drm_i915_gem_request *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 220, "WARN_ON(overlay->last_flip_req)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); i915_gem_request_assign___4(& overlay->last_flip_req, ring->outstanding_lazy_request); ret = __i915_add_request(ring, (struct drm_file *)0, (struct drm_i915_gem_object *)0); if (ret != 0) { return (ret); } else { } overlay->flip_tail = tail; ret = i915_wait_request(overlay->last_flip_req); if (ret != 0) { return (ret); } else { } i915_gem_request_assign___4(& overlay->last_flip_req, (struct drm_i915_gem_request *)0); return (0); } } static int intel_overlay_on(struct intel_overlay *overlay ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int ret ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; struct drm_i915_private *__p ; long tmp___0 ; int tmp___1 ; { dev = overlay->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring); __ret_warn_on = (int )overlay->active; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 244, "WARN_ON(overlay->active)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p = to_i915((struct drm_device const *)dev); __ret_warn_on___0 = (unsigned int )((unsigned short )__p->info.device_id) == 13687U && (dev_priv->quirks & 1UL) == 0UL; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 245, "WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); ret = intel_ring_begin(ring, 4); if (ret != 0) { return (ret); } else { } overlay->active = 1; intel_ring_emit(ring, 144703488U); intel_ring_emit(ring, overlay->flip_addr | 1U); intel_ring_emit(ring, 25231360U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); tmp___1 = intel_overlay_do_wait_request(overlay, (void (*)(struct intel_overlay * ))0); return (tmp___1); } } static int intel_overlay_continue(struct intel_overlay *overlay , bool load_polyphase_filter ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; u32 flip_addr ; u32 tmp ; int ret ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; int tmp___3 ; { dev = overlay->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring); flip_addr = overlay->flip_addr; __ret_warn_on = ! overlay->active; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 273, "WARN_ON(!overlay->active)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((int )load_polyphase_filter) { flip_addr = flip_addr | 1U; } else { } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 196616L, 1); if ((tmp & 131072U) != 0U) { tmp___1 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_overlay_continue", "overlay underrun, DOVSTA: %x\n", tmp); } else { } } else { } ret = intel_ring_begin(ring, 2); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 142606336U); intel_ring_emit(ring, flip_addr); intel_ring_advance(ring); __ret_warn_on___0 = (unsigned long )overlay->last_flip_req != (unsigned long )((struct drm_i915_gem_request *)0); tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 291, "WARN_ON(overlay->last_flip_req)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); i915_gem_request_assign___4(& overlay->last_flip_req, ring->outstanding_lazy_request); tmp___3 = __i915_add_request(ring, (struct drm_file *)0, (struct drm_i915_gem_object *)0); return (tmp___3); } } static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay ) { struct drm_i915_gem_object *obj ; { obj = overlay->old_vid_bo; i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference___13(& obj->base); overlay->old_vid_bo = (struct drm_i915_gem_object *)0; return; } } static void intel_overlay_off_tail(struct intel_overlay *overlay ) { struct drm_i915_gem_object *obj ; int __ret_warn_on ; long tmp ; long tmp___0 ; { obj = overlay->vid_bo; __ret_warn_on = (unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 312, "WARN_ON(!obj)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference___13(& obj->base); overlay->vid_bo = (struct drm_i915_gem_object *)0; (overlay->crtc)->overlay = (struct intel_overlay *)0; overlay->crtc = (struct intel_crtc *)0; overlay->active = 0; return; } } static int intel_overlay_off(struct intel_overlay *overlay ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; u32 flip_addr ; int ret ; int __ret_warn_on ; long tmp ; struct drm_i915_private *__p ; int tmp___0 ; { dev = overlay->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring); flip_addr = overlay->flip_addr; __ret_warn_on = ! overlay->active; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 333, "WARN_ON(!overlay->active)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); flip_addr = flip_addr | 1U; ret = intel_ring_begin(ring, 6); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 142606336U); intel_ring_emit(ring, flip_addr); intel_ring_emit(ring, 25231360U); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 13687U) { intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); intel_ring_emit(ring, 0U); } else { intel_ring_emit(ring, 146800640U); intel_ring_emit(ring, flip_addr); intel_ring_emit(ring, 25231360U); } intel_ring_advance(ring); tmp___0 = intel_overlay_do_wait_request(overlay, & intel_overlay_off_tail); return (tmp___0); } } static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay ) { int ret ; { if ((unsigned long )overlay->last_flip_req == (unsigned long )((struct drm_i915_gem_request *)0)) { return (0); } else { } ret = i915_wait_request(overlay->last_flip_req); if (ret != 0) { return (ret); } else { } if ((unsigned long )overlay->flip_tail != (unsigned long )((void (*)(struct intel_overlay * ))0)) { (*(overlay->flip_tail))(overlay); } else { } i915_gem_request_assign___4(& overlay->last_flip_req, (struct drm_i915_gem_request *)0); return (0); } } static int intel_overlay_release_old_vid(struct intel_overlay *overlay ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_engine_cs *ring ; int ret ; int __ret_warn_on ; int tmp ; long tmp___0 ; uint32_t tmp___1 ; { dev = overlay->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ring = (struct intel_engine_cs *)(& dev_priv->ring); tmp = mutex_is_locked(& dev->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 397, "WARN_ON(!mutex_is_locked(&dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned long )overlay->old_vid_bo == (unsigned long )((struct drm_i915_gem_object *)0)) { return (0); } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8364L, 1); if ((tmp___1 & 512U) != 0U) { ret = intel_ring_begin(ring, 2); if (ret != 0) { return (ret); } else { } intel_ring_emit(ring, 25231360U); intel_ring_emit(ring, 0U); intel_ring_advance(ring); ret = intel_overlay_do_wait_request(overlay, & intel_overlay_release_old_vid_tail); if (ret != 0) { return (ret); } else { } } else { } intel_overlay_release_old_vid_tail(overlay); i915_gem_track_fb(overlay->old_vid_bo, (struct drm_i915_gem_object *)0, (unsigned int )(1 << ((int )(overlay->crtc)->pipe * 4 + 3))); return (0); } } void intel_overlay_reset(struct drm_i915_private *dev_priv ) { struct intel_overlay *overlay ; { overlay = dev_priv->overlay; if ((unsigned long )overlay == (unsigned long )((struct intel_overlay *)0)) { return; } else { } intel_overlay_release_old_vid(overlay); overlay->last_flip_req = (struct drm_i915_gem_request *)0; overlay->old_xscale = 0U; overlay->old_yscale = 0U; overlay->crtc = (struct intel_crtc *)0; overlay->active = 0; return; } } static int packed_depth_bytes(u32 format ) { { switch (format & 65280U) { case 256U: ; return (4); case 512U: ; default: ; return (-22); } } } static int packed_width_bytes(u32 format , short width ) { { switch (format & 65280U) { case 256U: ; return ((int )width << 1); default: ; return (-22); } } } static int uv_hsubsampling(u32 format ) { { switch (format & 65280U) { case 256U: ; case 768U: ; return (2); case 512U: ; case 1024U: ; return (4); default: ; return (-22); } } } static int uv_vsubsampling(u32 format ) { { switch (format & 65280U) { case 768U: ; case 1024U: ; return (2); case 256U: ; case 512U: ; return (1); default: ; return (-22); } } } static u32 calc_swidthsw(struct drm_device *dev , u32 offset , u32 width ) { u32 mask ; u32 shift ; u32 ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { mask = 31U; shift = 5U; } else { mask = 63U; shift = 6U; } ret = (((offset + width) + mask) >> (int )shift) - (offset >> (int )shift); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 2U) { ret = ret << 1; } else { } ret = ret - 1U; return (ret << 2); } } static u16 const y_static_hcoeffs[85U] = { 12288U, 46240U, 6448U, 6432U, 46240U, 12288U, 46336U, 6608U, 6272U, 46144U, 12288U, 46400U, 6792U, 12160U, 46048U, 12288U, 46464U, 6960U, 11808U, 45952U, 12288U, 46528U, 7128U, 11456U, 45856U, 12320U, 46560U, 7264U, 11136U, 45760U, 12320U, 46560U, 7416U, 10784U, 45664U, 12320U, 46560U, 7552U, 10464U, 45568U, 12320U, 46528U, 7688U, 16192U, 45504U, 12320U, 46464U, 7800U, 15584U, 45408U, 12352U, 46368U, 7896U, 15008U, 45344U, 12352U, 46240U, 7984U, 14464U, 45280U, 12352U, 46080U, 8056U, 13952U, 45216U, 12320U, 45888U, 8120U, 13472U, 45152U, 12320U, 45632U, 8160U, 13024U, 45120U, 12320U, 45376U, 8184U, 12640U, 45088U, 45056U, 12288U, 2048U, 12288U, 45056U}; static u16 const uv_static_hcoeffs[51U] = { 12288U, 6144U, 6144U, 45056U, 6352U, 11872U, 45056U, 6544U, 11488U, 45088U, 6760U, 11072U, 45120U, 6944U, 10720U, 45152U, 7128U, 10368U, 45184U, 7304U, 15968U, 45216U, 7464U, 15360U, 45248U, 7608U, 14816U, 45280U, 7744U, 14304U, 45312U, 7864U, 13856U, 45312U, 7960U, 13472U, 45312U, 8040U, 13152U, 45280U, 8104U, 12864U, 45248U, 8160U, 12608U, 45152U, 8176U, 12448U, 12288U, 2048U, 12288U}; static void update_polyphase_filter(struct overlay_registers *regs ) { { memcpy_toio((void volatile *)(& regs->Y_HCOEFS), (void const *)(& y_static_hcoeffs), 170UL); memcpy_toio((void volatile *)(& regs->UV_HCOEFS), (void const *)(& uv_static_hcoeffs), 102UL); return; } } static bool update_scaling_factors(struct intel_overlay *overlay , struct overlay_registers *regs , struct put_image_params *params ) { u32 xscale ; u32 yscale ; u32 xscale_UV ; u32 yscale_UV ; bool scale_changed ; int uv_hscale ; int tmp ; int uv_vscale ; int tmp___0 ; { scale_changed = 0; tmp = uv_hsubsampling((u32 )params->format); uv_hscale = tmp; tmp___0 = uv_vsubsampling((u32 )params->format); uv_vscale = tmp___0; if ((int )params->dst_w > 1) { xscale = (u32 )((((int )params->src_scan_w + -1) << 12) / (int )params->dst_w); } else { xscale = 4096U; } if ((int )params->dst_h > 1) { yscale = (u32 )((((int )params->src_scan_h + -1) << 12) / (int )params->dst_h); } else { yscale = 4096U; } xscale_UV = xscale / (u32 )uv_hscale; yscale_UV = yscale / (u32 )uv_vscale; xscale = xscale_UV * (u32 )uv_hscale; yscale = yscale_UV * (u32 )uv_vscale; if (overlay->old_xscale != xscale || overlay->old_yscale != yscale) { scale_changed = 1; } else { } overlay->old_xscale = xscale; overlay->old_yscale = yscale; iowrite32(((yscale << 20) | ((xscale >> 12) << 16)) | ((xscale & 4095U) << 3), (void *)(& regs->YRGBSCALE)); iowrite32(((yscale_UV << 20) | ((xscale_UV >> 12) << 16)) | ((xscale_UV & 4095U) << 3), (void *)(& regs->UVSCALE)); iowrite32(((yscale >> 12) << 16) | (yscale_UV >> 12), (void *)(& regs->UVSCALEV)); if ((int )scale_changed) { update_polyphase_filter(regs); } else { } return (scale_changed); } } static void update_colorkey(struct intel_overlay *overlay , struct overlay_registers *regs ) { u32 key ; u32 flags ; { key = overlay->color_key; flags = 0U; if ((unsigned int )*((unsigned char *)overlay + 43UL) != 0U) { flags = flags | 2147483648U; } else { } switch ((((overlay->crtc)->base.primary)->fb)->bits_per_pixel) { case 8: key = 0U; flags = flags | 16777215U; goto ldv_48242; case 16: ; if ((((overlay->crtc)->base.primary)->fb)->depth == 15U) { key = (((key & 31744U) << 9) | ((key & 992U) << 6)) | ((key << 3) & 255U); flags = flags | 460551U; } else { key = (((key & 63488U) << 8) | ((key & 2016U) << 5)) | ((key << 3) & 255U); flags = flags | 459527U; } goto ldv_48242; case 24: ; case 32: flags = flags; goto ldv_48242; } ldv_48242: iowrite32(key, (void *)(& regs->DCLRKV)); iowrite32(flags, (void *)(& regs->DCLRKM)); return; } } static u32 overlay_cmd_reg(struct put_image_params *params ) { u32 cmd ; { cmd = 1U; if (params->format & 1) { switch (params->format & 65280) { case 256: cmd = cmd | 13312U; goto ldv_48251; case 768: cmd = cmd | 12288U; goto ldv_48251; case 512: ; case 1024: cmd = cmd | 14336U; goto ldv_48251; } ldv_48251: ; } else { switch (params->format & 65280) { case 256: cmd = cmd | 8192U; goto ldv_48256; case 512: cmd = cmd | 9216U; goto ldv_48256; } ldv_48256: ; switch (params->format & 16711680) { case 0: ; goto ldv_48259; case 65536: cmd = cmd | 16384U; goto ldv_48259; case 131072: cmd = cmd | 32768U; goto ldv_48259; case 196608: cmd = cmd | 49152U; goto ldv_48259; } ldv_48259: ; } return (cmd); } } static int intel_overlay_do_put_image(struct intel_overlay *overlay , struct drm_i915_gem_object *new_bo , struct put_image_params *params ) { int ret ; int tmp_width ; struct overlay_registers *regs ; bool scale_changed ; struct drm_device *dev ; u32 swidth ; u32 swidthsw ; u32 sheight ; u32 ostride ; enum pipe pipe ; int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; u32 oconfig ; struct drm_i915_private *__p ; unsigned long tmp___4 ; int uv_hscale ; int tmp___5 ; int uv_vscale ; int tmp___6 ; u32 tmp_U ; u32 tmp_V ; u32 __max1 ; u32 __max2 ; unsigned long tmp___7 ; unsigned long tmp___8 ; u32 tmp___9 ; { scale_changed = 0; dev = overlay->dev; pipe = (overlay->crtc)->pipe; tmp = mutex_is_locked(& dev->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 720, "WARN_ON(!mutex_is_locked(&dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = drm_modeset_is_locked(& dev->mode_config.connection_mutex); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } __ret_warn_on___0 = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 721, "WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); ret = intel_overlay_release_old_vid(overlay); if (ret != 0) { return (ret); } else { } ret = i915_gem_object_pin_to_display_plane(new_bo, 0U, (struct intel_engine_cs *)0, & i915_ggtt_view_normal); if (ret != 0) { return (ret); } else { } ret = i915_gem_object_put_fence(new_bo); if (ret != 0) { goto out_unpin; } else { } if (! overlay->active) { regs = intel_overlay_map_regs(overlay); if ((unsigned long )regs == (unsigned long )((struct overlay_registers *)0)) { ret = -12; goto out_unpin; } else { } oconfig = 8U; __p = to_i915((struct drm_device const *)overlay->dev); if ((unsigned int )((unsigned char )__p->info.gen) == 4U) { oconfig = oconfig | 32U; } else { } oconfig = ((int )pipe == 0 ? 0U : 262144U) | oconfig; iowrite32(oconfig, (void *)(& regs->OCONFIG)); intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_on(overlay); if (ret != 0) { goto out_unpin; } else { } } else { } regs = intel_overlay_map_regs(overlay); if ((unsigned long )regs == (unsigned long )((struct overlay_registers *)0)) { ret = -12; goto out_unpin; } else { } iowrite32((u32 )(((int )params->dst_y << 16) | (int )params->dst_x), (void *)(& regs->DWINPOS)); iowrite32((u32 )(((int )params->dst_h << 16) | (int )params->dst_w), (void *)(& regs->DWINSZ)); if ((params->format & 2) != 0) { tmp_width = packed_width_bytes((u32 )params->format, (int )params->src_w); } else { tmp_width = (int )params->src_w; } swidth = (u32 )params->src_w; swidthsw = calc_swidthsw(overlay->dev, (u32 )params->offset_Y, (u32 )tmp_width); sheight = (u32 )params->src_h; tmp___4 = i915_gem_obj_ggtt_offset(new_bo); iowrite32((u32 )tmp___4 + (u32 )params->offset_Y, (void *)(& regs->OBUF_0Y)); ostride = (u32 )params->stride_Y; if (params->format & 1) { tmp___5 = uv_hsubsampling((u32 )params->format); uv_hscale = tmp___5; tmp___6 = uv_vsubsampling((u32 )params->format); uv_vscale = tmp___6; swidth = (u32 )((int )params->src_w / uv_hscale << 16) | swidth; tmp_U = calc_swidthsw(overlay->dev, (u32 )params->offset_U, (u32 )((int )params->src_w / uv_hscale)); tmp_V = calc_swidthsw(overlay->dev, (u32 )params->offset_V, (u32 )((int )params->src_w / uv_hscale)); __max1 = tmp_U; __max2 = tmp_V; swidthsw = ((__max1 > __max2 ? __max1 : __max2) << 16) | swidthsw; sheight = (u32 )((int )params->src_h / uv_vscale << 16) | sheight; tmp___7 = i915_gem_obj_ggtt_offset(new_bo); iowrite32((u32 )tmp___7 + (u32 )params->offset_U, (void *)(& regs->OBUF_0U)); tmp___8 = i915_gem_obj_ggtt_offset(new_bo); iowrite32((u32 )tmp___8 + (u32 )params->offset_V, (void *)(& regs->OBUF_0V)); ostride = (u32 )((int )params->stride_UV << 16) | ostride; } else { } iowrite32(swidth, (void *)(& regs->SWIDTH)); iowrite32(swidthsw, (void *)(& regs->SWIDTHSW)); iowrite32(sheight, (void *)(& regs->SHEIGHT)); iowrite32(ostride, (void *)(& regs->OSTRIDE)); scale_changed = update_scaling_factors(overlay, regs, params); update_colorkey(overlay, regs); tmp___9 = overlay_cmd_reg(params); iowrite32(tmp___9, (void *)(& regs->OCMD)); intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_continue(overlay, (int )scale_changed); if (ret != 0) { goto out_unpin; } else { } i915_gem_track_fb(overlay->vid_bo, new_bo, (unsigned int )(1 << ((int )pipe * 4 + 3))); overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = new_bo; intel_frontbuffer_flip(dev, (unsigned int )(1 << ((int )pipe * 4 + 3))); return (0); out_unpin: i915_gem_object_ggtt_unpin(new_bo); return (ret); } } int intel_overlay_switch_off(struct intel_overlay *overlay ) { struct overlay_registers *regs ; struct drm_device *dev ; int ret ; int __ret_warn_on ; int tmp ; long tmp___0 ; int __ret_warn_on___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; { dev = overlay->dev; tmp = mutex_is_locked(& dev->struct_mutex); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 831, "WARN_ON(!mutex_is_locked(&dev->struct_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = drm_modeset_is_locked(& dev->mode_config.connection_mutex); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } __ret_warn_on___0 = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 832, "WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) { return (ret); } else { } if (! overlay->active) { return (0); } else { } ret = intel_overlay_release_old_vid(overlay); if (ret != 0) { return (ret); } else { } regs = intel_overlay_map_regs(overlay); iowrite32(0U, (void *)(& regs->OCMD)); intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_off(overlay); if (ret != 0) { return (ret); } else { } intel_overlay_off_tail(overlay); return (0); } } static int check_overlay_possible_on_crtc(struct intel_overlay *overlay , struct intel_crtc *crtc ) { { if (! crtc->active) { return (-22); } else { } if ((int )(crtc->config)->double_wide) { return (-22); } else { } return (0); } } static void update_pfit_vscale_ratio(struct intel_overlay *overlay ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 pfit_control ; uint32_t tmp ; u32 ratio ; uint32_t tmp___0 ; struct drm_i915_private *__p ; { dev = overlay->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397872U), 1); pfit_control = tmp; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397876U), 1); ratio = tmp___0 >> 16; } else { if ((pfit_control & 512U) != 0U) { ratio = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397880U), 1); } else { ratio = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397876U), 1); } ratio = ratio >> 20; } overlay->pfit_vscale_ratio = ratio; return; } } static int check_overlay_dst(struct intel_overlay *overlay , struct drm_intel_overlay_put_image *rec ) { struct drm_display_mode *mode ; { mode = & (overlay->crtc)->base.mode; if ((((int )rec->dst_x < mode->hdisplay && (int )rec->dst_x + (int )rec->dst_width <= mode->hdisplay) && (int )rec->dst_y < mode->vdisplay) && (int )rec->dst_y + (int )rec->dst_height <= mode->vdisplay) { return (0); } else { return (-22); } } } static int check_overlay_scaling(struct put_image_params *rec ) { u32 tmp ; { tmp = (u32 )(((int )rec->src_scan_h << 16) / (int )rec->dst_h >> 16); if (tmp > 7U) { return (-22); } else { } tmp = (u32 )(((int )rec->src_scan_w << 16) / (int )rec->dst_w >> 16); if (tmp > 7U) { return (-22); } else { } return (0); } } static int check_overlay_src(struct drm_device *dev , struct drm_intel_overlay_put_image *rec , struct drm_i915_gem_object *new_bo ) { int uv_hscale ; int tmp ; int uv_vscale ; int tmp___0 ; u32 stride_mask ; int depth ; u32 tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; int tmp___2 ; { tmp = uv_hsubsampling(rec->flags); uv_hscale = tmp; tmp___0 = uv_vsubsampling(rec->flags); uv_vscale = tmp___0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 9570U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 13687U) { _L: /* CIL Label */ if ((unsigned int )rec->src_height > 1088U || (unsigned int )rec->src_width > 1024U) { return (-22); } else { } } else if ((unsigned int )rec->src_height > 2046U || (unsigned int )rec->src_width > 2048U) { return (-22); } else { } } if ((unsigned int )rec->src_height <= 11U || (unsigned int )rec->src_width <= 19U) { return (-22); } else { } switch (rec->flags & 255U) { case 3U: ; return (-22); case 2U: ; if (uv_vscale != 1) { return (-22); } else { } depth = packed_depth_bytes(rec->flags); if (depth < 0) { return (depth); } else { } rec->stride_UV = 0U; rec->offset_U = 0U; rec->offset_V = 0U; if (rec->offset_Y % (__u32 )depth != 0U) { return (-22); } else { } goto ldv_48357; case 1U: ; if (uv_vscale < 0 || uv_hscale < 0) { return (-22); } else { } goto ldv_48357; default: ; return (-22); } ldv_48357: ; if ((int )rec->src_width % uv_hscale != 0) { return (-22); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) == 13687U) { stride_mask = 255U; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) == 9570U) { stride_mask = 255U; } else { stride_mask = 63U; } } if (((u32 )rec->stride_Y & stride_mask) != 0U || ((u32 )rec->stride_UV & stride_mask) != 0U) { return (-22); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 4U && (unsigned int )rec->stride_Y <= 511U) { return (-22); } else { } tmp___1 = (rec->flags & 255U) == 1U ? 4096U : 8192U; if ((u32 )rec->stride_Y > tmp___1 || (unsigned int )rec->stride_UV > 2048U) { return (-22); } else { } switch (rec->flags & 255U) { case 3U: ; case 2U: tmp___2 = packed_width_bytes(rec->flags, (int )((short )rec->src_width)); if (tmp___2 > (int )rec->stride_Y) { return (-22); } else { } tmp___1 = (u32 )((int )rec->stride_Y * (int )rec->src_height); if ((size_t )(rec->offset_Y + tmp___1) > new_bo->base.size) { return (-22); } else { } goto ldv_48380; case 1U: ; if ((int )rec->src_width > (int )rec->stride_Y) { return (-22); } else { } if ((int )rec->src_width / uv_hscale > (int )rec->stride_UV) { return (-22); } else { } tmp___1 = (u32 )((int )rec->stride_Y * (int )rec->src_height); if ((size_t )(rec->offset_Y + tmp___1) > new_bo->base.size) { return (-22); } else { } tmp___1 = (u32 )((int )rec->stride_UV * ((int )rec->src_height / uv_vscale)); if ((size_t )(rec->offset_U + tmp___1) > new_bo->base.size || (size_t )(rec->offset_V + tmp___1) > new_bo->base.size) { return (-22); } else { } goto ldv_48380; } ldv_48380: ; return (0); } } static int intel_panel_fitter_pipe(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; u32 pfit_control ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 3U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 13687U) { return (-1); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) == 0U) { return (-1); } else { } } } else { } pfit_control = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397872U), 1); if ((int )pfit_control >= 0) { return (-1); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 4U) { return ((int )(pfit_control >> 29) & 3); } else { } return (1); } } int intel_overlay_put_image(struct drm_device *dev , void *data , struct drm_file *file_priv ) { struct drm_intel_overlay_put_image *put_image_rec ; struct drm_i915_private *dev_priv ; struct intel_overlay *overlay ; struct drm_crtc *drmmode_crtc ; struct intel_crtc *crtc ; struct drm_i915_gem_object *new_bo ; struct put_image_params *params ; int ret ; long tmp ; void *tmp___0 ; struct drm_crtc const *__mptr ; struct drm_gem_object const *__mptr___0 ; struct drm_gem_object *tmp___1 ; long tmp___2 ; struct drm_display_mode *mode ; int tmp___3 ; { put_image_rec = (struct drm_intel_overlay_put_image *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; overlay = dev_priv->overlay; if ((unsigned long )overlay == (unsigned long )((struct intel_overlay *)0)) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_overlay_put_image", "userspace bug: no overlay\n"); } else { } return (-19); } else { } if ((put_image_rec->flags & 16777216U) == 0U) { drm_modeset_lock_all(dev); mutex_lock_nested(& dev->struct_mutex, 0U); ret = intel_overlay_switch_off(overlay); mutex_unlock(& dev->struct_mutex); drm_modeset_unlock_all(dev); return (ret); } else { } tmp___0 = kmalloc(36UL, 208U); params = (struct put_image_params *)tmp___0; if ((unsigned long )params == (unsigned long )((struct put_image_params *)0)) { return (-12); } else { } drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id); if ((unsigned long )drmmode_crtc == (unsigned long )((struct drm_crtc *)0)) { ret = -2; goto out_free; } else { } __mptr = (struct drm_crtc const *)drmmode_crtc; crtc = (struct intel_crtc *)__mptr; tmp___1 = drm_gem_object_lookup(dev, file_priv, put_image_rec->bo_handle); __mptr___0 = (struct drm_gem_object const *)tmp___1; new_bo = (struct drm_i915_gem_object *)__mptr___0; if ((unsigned long )(& new_bo->base) == (unsigned long )((struct drm_gem_object *)0)) { ret = -2; goto out_free; } else { } drm_modeset_lock_all(dev); mutex_lock_nested(& dev->struct_mutex, 0U); if ((unsigned int )*((unsigned char *)new_bo + 409UL) != 0U) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_overlay_put_image", "buffer used for overlay image can not be tiled\n"); } else { } ret = -22; goto out_unlock; } else { } ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) { goto out_unlock; } else { } if ((unsigned long )overlay->crtc != (unsigned long )crtc) { mode = & crtc->base.mode; ret = intel_overlay_switch_off(overlay); if (ret != 0) { goto out_unlock; } else { } ret = check_overlay_possible_on_crtc(overlay, crtc); if (ret != 0) { goto out_unlock; } else { } overlay->crtc = crtc; crtc->overlay = overlay; if (mode->hdisplay > 1024) { tmp___3 = intel_panel_fitter_pipe(dev); if (tmp___3 == (int )crtc->pipe) { overlay->pfit_active = 1; update_pfit_vscale_ratio(overlay); } else { overlay->pfit_active = 0; } } else { overlay->pfit_active = 0; } } else { } ret = check_overlay_dst(overlay, put_image_rec); if (ret != 0) { goto out_unlock; } else { } if ((int )overlay->pfit_active) { params->dst_y = (short )(((unsigned int )put_image_rec->dst_y << 12) / overlay->pfit_vscale_ratio); params->dst_h = (short )((unsigned int )((unsigned short )(((unsigned int )put_image_rec->dst_height << 12) / overlay->pfit_vscale_ratio)) + 1U); } else { params->dst_y = (short )put_image_rec->dst_y; params->dst_h = (short )put_image_rec->dst_height; } params->dst_x = (short )put_image_rec->dst_x; params->dst_w = (short )put_image_rec->dst_width; params->src_w = (short )put_image_rec->src_width; params->src_h = (short )put_image_rec->src_height; params->src_scan_w = (short )put_image_rec->src_scan_width; params->src_scan_h = (short )put_image_rec->src_scan_height; if ((int )params->src_scan_h > (int )params->src_h || (int )params->src_scan_w > (int )params->src_w) { ret = -22; goto out_unlock; } else { } ret = check_overlay_src(dev, put_image_rec, new_bo); if (ret != 0) { goto out_unlock; } else { } params->format = (int )put_image_rec->flags & 16777215; params->stride_Y = (short )put_image_rec->stride_Y; params->stride_UV = (short )put_image_rec->stride_UV; params->offset_Y = (int )put_image_rec->offset_Y; params->offset_U = (int )put_image_rec->offset_U; params->offset_V = (int )put_image_rec->offset_V; ret = check_overlay_scaling(params); if (ret != 0) { goto out_unlock; } else { } ret = intel_overlay_do_put_image(overlay, new_bo, params); if (ret != 0) { goto out_unlock; } else { } mutex_unlock(& dev->struct_mutex); drm_modeset_unlock_all(dev); kfree((void const *)params); return (0); out_unlock: mutex_unlock(& dev->struct_mutex); drm_modeset_unlock_all(dev); drm_gem_object_unreference_unlocked___3(& new_bo->base); out_free: kfree((void const *)params); return (ret); } } static void update_reg_attrs(struct intel_overlay *overlay , struct overlay_registers *regs ) { { iowrite32((overlay->contrast << 18) | (overlay->brightness & 255U), (void *)(& regs->OCLRC0)); iowrite32(overlay->saturation, (void *)(& regs->OCLRC1)); return; } } static bool check_gamma_bounds(u32 gamma1 , u32 gamma2 ) { int i ; { if ((gamma1 & 4278190080U) != 0U || (gamma2 & 4278190080U) != 0U) { return (0); } else { } i = 0; goto ldv_48442; ldv_48441: ; if (((gamma1 >> i * 8) & 255U) >= ((gamma2 >> i * 8) & 255U)) { return (0); } else { } i = i + 1; ldv_48442: ; if (i <= 2) { goto ldv_48441; } else { } return (1); } } static bool check_gamma5_errata(u32 gamma5 ) { int i ; { i = 0; goto ldv_48449; ldv_48448: ; if (((gamma5 >> i * 8) & 255U) == 128U) { return (0); } else { } i = i + 1; ldv_48449: ; if (i <= 2) { goto ldv_48448; } else { } return (1); } } static int check_gamma(struct drm_intel_overlay_attrs *attrs ) { bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; bool tmp___5 ; int tmp___6 ; bool tmp___7 ; int tmp___8 ; bool tmp___9 ; int tmp___10 ; bool tmp___11 ; int tmp___12 ; bool tmp___13 ; int tmp___14 ; { tmp = check_gamma_bounds(0U, attrs->gamma0); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-22); } else { tmp___1 = check_gamma_bounds(attrs->gamma0, attrs->gamma1); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (-22); } else { tmp___3 = check_gamma_bounds(attrs->gamma1, attrs->gamma2); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return (-22); } else { tmp___5 = check_gamma_bounds(attrs->gamma2, attrs->gamma3); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { return (-22); } else { tmp___7 = check_gamma_bounds(attrs->gamma3, attrs->gamma4); if (tmp___7) { tmp___8 = 0; } else { tmp___8 = 1; } if (tmp___8) { return (-22); } else { tmp___9 = check_gamma_bounds(attrs->gamma4, attrs->gamma5); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } if (tmp___10) { return (-22); } else { tmp___11 = check_gamma_bounds(attrs->gamma5, 16777215U); if (tmp___11) { tmp___12 = 0; } else { tmp___12 = 1; } if (tmp___12) { return (-22); } else { } } } } } } } tmp___13 = check_gamma5_errata(attrs->gamma5); if (tmp___13) { tmp___14 = 0; } else { tmp___14 = 1; } if (tmp___14) { return (-22); } else { } return (0); } } int intel_overlay_attrs(struct drm_device *dev , void *data , struct drm_file *file_priv ) { struct drm_intel_overlay_attrs *attrs ; struct drm_i915_private *dev_priv ; struct intel_overlay *overlay ; struct overlay_registers *regs ; int ret ; long tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { attrs = (struct drm_intel_overlay_attrs *)data; dev_priv = (struct drm_i915_private *)dev->dev_private; overlay = dev_priv->overlay; if ((unsigned long )overlay == (unsigned long )((struct intel_overlay *)0)) { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_overlay_attrs", "userspace bug: no overlay\n"); } else { } return (-19); } else { } drm_modeset_lock_all(dev); mutex_lock_nested(& dev->struct_mutex, 0U); ret = -22; if ((attrs->flags & 1U) == 0U) { attrs->color_key = overlay->color_key; attrs->brightness = (__s32 )overlay->brightness; attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { attrs->gamma0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 196644L, 1); attrs->gamma1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 196640L, 1); attrs->gamma2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 196636L, 1); attrs->gamma3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 196632L, 1); attrs->gamma4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 196628L, 1); attrs->gamma5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 196624L, 1); } else { } } else { if (attrs->brightness < -128 || attrs->brightness > 127) { goto out_unlock; } else { } if (attrs->contrast > 255U) { goto out_unlock; } else { } if (attrs->saturation > 1023U) { goto out_unlock; } else { } overlay->color_key = attrs->color_key; overlay->brightness = (u32 )attrs->brightness; overlay->contrast = attrs->contrast; overlay->saturation = attrs->saturation; regs = intel_overlay_map_regs(overlay); if ((unsigned long )regs == (unsigned long )((struct overlay_registers *)0)) { ret = -12; goto out_unlock; } else { } update_reg_attrs(overlay, regs); intel_overlay_unmap_regs(overlay, regs); if ((attrs->flags & 2U) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 2U) { goto out_unlock; } else { } if ((int )overlay->active) { ret = -16; goto out_unlock; } else { } ret = check_gamma(attrs); if (ret != 0) { goto out_unlock; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 196644L, attrs->gamma0, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 196640L, attrs->gamma1, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 196636L, attrs->gamma2, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 196632L, attrs->gamma3, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 196628L, attrs->gamma4, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 196624L, attrs->gamma5, 1); } else { } } overlay->color_key_enabled = (attrs->flags & 4U) == 0U; ret = 0; out_unlock: mutex_unlock(& dev->struct_mutex); drm_modeset_unlock_all(dev); return (ret); } } void intel_setup_overlay(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_overlay *overlay ; struct drm_i915_gem_object *reg_bo ; struct overlay_registers *regs ; int ret ; struct drm_i915_private *__p ; void *tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p___0 ; unsigned long tmp___2 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { return; } else { } tmp = kzalloc(96UL, 208U); overlay = (struct intel_overlay *)tmp; if ((unsigned long )overlay == (unsigned long )((struct intel_overlay *)0)) { return; } else { } mutex_lock_nested(& dev->struct_mutex, 0U); __ret_warn_on = (unsigned long )dev_priv->overlay != (unsigned long )((struct intel_overlay *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 1362, "WARN_ON(dev_priv->overlay)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { goto out_free; } else { } overlay->dev = dev; reg_bo = (struct drm_i915_gem_object *)0; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) == 0U) { reg_bo = i915_gem_object_create_stolen(dev, 4096U); } else { } if ((unsigned long )reg_bo == (unsigned long )((struct drm_i915_gem_object *)0)) { reg_bo = i915_gem_alloc_object(dev, 4096UL); } else { } if ((unsigned long )reg_bo == (unsigned long )((struct drm_i915_gem_object *)0)) { goto out_free; } else { } overlay->reg_bo = reg_bo; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 46UL) != 0U) { ret = i915_gem_object_attach_phys(reg_bo, 4096); if (ret != 0) { drm_err("failed to attach phys overlay regs\n"); goto out_free_bo; } else { } overlay->flip_addr = (u32 )(reg_bo->__annonCompField84.phys_handle)->busaddr; } else { ret = i915_gem_obj_ggtt_pin(reg_bo, 4096U, 1U); if (ret != 0) { drm_err("failed to pin overlay register bo\n"); goto out_free_bo; } else { } tmp___2 = i915_gem_obj_ggtt_offset(reg_bo); overlay->flip_addr = (u32 )tmp___2; ret = i915_gem_object_set_to_gtt_domain(reg_bo, 1); if (ret != 0) { drm_err("failed to move overlay register bo into the GTT\n"); goto out_unpin_bo; } else { } } overlay->color_key = 66046U; overlay->color_key_enabled = 1U; overlay->brightness = 4294967277U; overlay->contrast = 75U; overlay->saturation = 146U; regs = intel_overlay_map_regs(overlay); if ((unsigned long )regs == (unsigned long )((struct overlay_registers *)0)) { goto out_unpin_bo; } else { } memset_io((void volatile *)regs, 0, 1792UL); update_polyphase_filter(regs); update_reg_attrs(overlay, regs); intel_overlay_unmap_regs(overlay, regs); dev_priv->overlay = overlay; mutex_unlock(& dev->struct_mutex); printk("\016[drm] initialized overlay support\n"); return; out_unpin_bo: __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 46UL) == 0U) { i915_gem_object_ggtt_unpin(reg_bo); } else { } out_free_bo: drm_gem_object_unreference___13(& reg_bo->base); out_free: mutex_unlock(& dev->struct_mutex); kfree((void const *)overlay); return; } } void intel_cleanup_overlay(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int __ret_warn_on ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->overlay == (unsigned long )((struct intel_overlay *)0)) { return; } else { } __ret_warn_on = (int )(dev_priv->overlay)->active; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_overlay.c", 1441, "WARN_ON(dev_priv->overlay->active)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); drm_gem_object_unreference_unlocked___3(& ((dev_priv->overlay)->reg_bo)->base); kfree((void const *)dev_priv->overlay); return; } } static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay ) { struct drm_i915_private *dev_priv ; struct overlay_registers *regs ; unsigned long tmp ; void *tmp___0 ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)(overlay->dev)->dev_private; __p = to_i915((struct drm_device const *)overlay->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { regs = (struct overlay_registers *)((overlay->reg_bo)->__annonCompField84.phys_handle)->vaddr; } else { tmp = i915_gem_obj_ggtt_offset(overlay->reg_bo); tmp___0 = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, tmp); regs = (struct overlay_registers *)tmp___0; } return (regs); } } static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay , struct overlay_registers *regs ) { struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)overlay->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { __kunmap_atomic((void *)regs); } else { } return; } } struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_overlay *overlay ; struct intel_overlay_error_state *error ; struct overlay_registers *regs ; void *tmp ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; overlay = dev_priv->overlay; if ((unsigned long )overlay == (unsigned long )((struct intel_overlay *)0) || ! overlay->active) { return ((struct intel_overlay_error_state *)0); } else { } tmp = kmalloc(1808UL, 32U); error = (struct intel_overlay_error_state *)tmp; if ((unsigned long )error == (unsigned long )((struct intel_overlay_error_state *)0)) { return ((struct intel_overlay_error_state *)0); } else { } error->dovsta = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 196616L, 1); error->isr = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 8364L, 1); __p = to_i915((struct drm_device const *)overlay->dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { error->base = (unsigned long )((overlay->reg_bo)->__annonCompField84.phys_handle)->vaddr; } else { error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); } regs = intel_overlay_map_regs_atomic(overlay); if ((unsigned long )regs == (unsigned long )((struct overlay_registers *)0)) { goto err; } else { } memcpy_fromio((void *)(& error->regs), (void const volatile *)regs, 1792UL); intel_overlay_unmap_regs_atomic(overlay, regs); return (error); err: kfree((void const *)error); return ((struct intel_overlay_error_state *)0); } } void intel_overlay_print_error_state(struct drm_i915_error_state_buf *m , struct intel_overlay_error_state *error ) { { i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", error->dovsta, error->isr); i915_error_printf(m, " Register file at 0x%08lx:\n", error->base); i915_error_printf(m, " OBUF_0Y:\t0x%08x\n", error->regs.OBUF_0Y); i915_error_printf(m, " OBUF_1Y:\t0x%08x\n", error->regs.OBUF_1Y); i915_error_printf(m, " OBUF_0U:\t0x%08x\n", error->regs.OBUF_0U); i915_error_printf(m, " OBUF_0V:\t0x%08x\n", error->regs.OBUF_0V); i915_error_printf(m, " OBUF_1U:\t0x%08x\n", error->regs.OBUF_1U); i915_error_printf(m, " OBUF_1V:\t0x%08x\n", error->regs.OBUF_1V); i915_error_printf(m, " OSTRIDE:\t0x%08x\n", error->regs.OSTRIDE); i915_error_printf(m, " YRGB_VPH:\t0x%08x\n", error->regs.YRGB_VPH); i915_error_printf(m, " UV_VPH:\t0x%08x\n", error->regs.UV_VPH); i915_error_printf(m, " HORZ_PH:\t0x%08x\n", error->regs.HORZ_PH); i915_error_printf(m, " INIT_PHS:\t0x%08x\n", error->regs.INIT_PHS); i915_error_printf(m, " DWINPOS:\t0x%08x\n", error->regs.DWINPOS); i915_error_printf(m, " DWINSZ:\t0x%08x\n", error->regs.DWINSZ); i915_error_printf(m, " SWIDTH:\t0x%08x\n", error->regs.SWIDTH); i915_error_printf(m, " SWIDTHSW:\t0x%08x\n", error->regs.SWIDTHSW); i915_error_printf(m, " SHEIGHT:\t0x%08x\n", error->regs.SHEIGHT); i915_error_printf(m, " YRGBSCALE:\t0x%08x\n", error->regs.YRGBSCALE); i915_error_printf(m, " UVSCALE:\t0x%08x\n", error->regs.UVSCALE); i915_error_printf(m, " OCLRC0:\t0x%08x\n", error->regs.OCLRC0); i915_error_printf(m, " OCLRC1:\t0x%08x\n", error->regs.OCLRC1); i915_error_printf(m, " DCLRKV:\t0x%08x\n", error->regs.DCLRKV); i915_error_printf(m, " DCLRKM:\t0x%08x\n", error->regs.DCLRKM); i915_error_printf(m, " SCLRKVH:\t0x%08x\n", error->regs.SCLRKVH); i915_error_printf(m, " SCLRKVL:\t0x%08x\n", error->regs.SCLRKVL); i915_error_printf(m, " SCLRKEN:\t0x%08x\n", error->regs.SCLRKEN); i915_error_printf(m, " OCONFIG:\t0x%08x\n", error->regs.OCONFIG); i915_error_printf(m, " OCMD:\t0x%08x\n", error->regs.OCMD); i915_error_printf(m, " OSTART_0Y:\t0x%08x\n", error->regs.OSTART_0Y); i915_error_printf(m, " OSTART_1Y:\t0x%08x\n", error->regs.OSTART_1Y); i915_error_printf(m, " OSTART_0U:\t0x%08x\n", error->regs.OSTART_0U); i915_error_printf(m, " OSTART_0V:\t0x%08x\n", error->regs.OSTART_0V); i915_error_printf(m, " OSTART_1U:\t0x%08x\n", error->regs.OSTART_1U); i915_error_printf(m, " OSTART_1V:\t0x%08x\n", error->regs.OSTART_1V); i915_error_printf(m, " OTILEOFF_0Y:\t0x%08x\n", error->regs.OTILEOFF_0Y); i915_error_printf(m, " OTILEOFF_1Y:\t0x%08x\n", error->regs.OTILEOFF_1Y); i915_error_printf(m, " OTILEOFF_0U:\t0x%08x\n", error->regs.OTILEOFF_0U); i915_error_printf(m, " OTILEOFF_0V:\t0x%08x\n", error->regs.OTILEOFF_0V); i915_error_printf(m, " OTILEOFF_1U:\t0x%08x\n", error->regs.OTILEOFF_1U); i915_error_printf(m, " OTILEOFF_1V:\t0x%08x\n", error->regs.OTILEOFF_1V); i915_error_printf(m, " FASTHSCALE:\t0x%08x\n", error->regs.FASTHSCALE); i915_error_printf(m, " UVSCALEV:\t0x%08x\n", error->regs.UVSCALEV); return; } } bool ldv_queue_work_on_621(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_622(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_623(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_624(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_625(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___15(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } bool ldv_queue_work_on_635(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_637(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_636(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_639(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_638(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_sync_640(struct delayed_work *ldv_func_arg1 ) ; __inline static bool queue_delayed_work___3(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = ldv_queue_delayed_work_on_636(8192, wq, dwork, delay); return (tmp); } } __inline static bool schedule_delayed_work___1(struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = queue_delayed_work___3(system_wq, dwork, delay); return (tmp); } } void invoke_work_16(void) ; void activate_work_16(struct work_struct *work , int state ) ; void call_and_disable_work_16(struct work_struct *work ) ; void call_and_disable_all_16(int state ) ; void disable_work_16(struct work_struct *work ) ; __inline static bool drm_can_sleep___7(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39633; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39633; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39633; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39633; default: __bad_percpu_size(); } ldv_39633: pscr_ret__ = pfo_ret__; goto ldv_39639; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39643; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39643; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39643; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39643; default: __bad_percpu_size(); } ldv_39643: pscr_ret__ = pfo_ret_____0; goto ldv_39639; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39652; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39652; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39652; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39652; default: __bad_percpu_size(); } ldv_39652: pscr_ret__ = pfo_ret_____1; goto ldv_39639; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39661; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39661; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39661; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39661; default: __bad_percpu_size(); } ldv_39661: pscr_ret__ = pfo_ret_____2; goto ldv_39639; default: __bad_size_call_parameter(); goto ldv_39639; } ldv_39639: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___15(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } extern ssize_t drm_dp_dpcd_write(struct drm_dp_aux * , unsigned int , void * , size_t ) ; __inline static ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux , unsigned int offset , u8 value ) { ssize_t tmp ; { tmp = drm_dp_dpcd_write(aux, offset, (void *)(& value), 1UL); return (tmp); } } __inline static struct intel_digital_port *dp_to_dig_port(struct intel_dp *intel_dp ) { struct intel_dp const *__mptr ; { __mptr = (struct intel_dp const *)intel_dp; return ((struct intel_digital_port *)__mptr + 0xffffffffffffff20UL); } } uint32_t intel_dp_pack_aux(uint8_t const *src , int src_bytes ) ; void intel_psr_enable(struct intel_dp *intel_dp ) ; void intel_psr_disable(struct intel_dp *intel_dp ) ; static bool is_edp_psr(struct intel_dp *intel_dp ) { { return (((int )intel_dp->psr_dpcd[0] & 1) != 0); } } static bool vlv_is_psr_active_on_pipe(struct drm_device *dev , int pipe ) { struct drm_i915_private *dev_priv ; uint32_t val ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 1966228), 1); val = tmp & 7U; return ((bool )(val == 3U || val == 4U)); } } static void intel_psr_write_vsc(struct intel_dp *intel_dp , struct edp_vsc_psr *vsc_psr ) { struct intel_digital_port *dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; u32 ctl_reg ; u32 data_reg ; uint32_t *data ; unsigned int i ; uint32_t *tmp___0 ; { tmp = dp_to_dig_port(intel_dp); dig_port = tmp; dev = dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)dig_port->base.base.crtc; crtc = (struct intel_crtc *)__mptr; ctl_reg = ((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )(crtc->config)->cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393728U; data_reg = ((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )(crtc->config)->cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394016U; data = (uint32_t *)vsc_psr; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )ctl_reg, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ctl_reg, 0); i = 0U; goto ldv_48005; ldv_48004: ; if (i <= 35U) { tmp___0 = data; data = data + 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(data_reg + i), *tmp___0, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(data_reg + i), 0U, 1); } i = i + 4U; ldv_48005: ; if (i <= 35U) { goto ldv_48004; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )ctl_reg, 1048576U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ctl_reg, 0); return; } } static void vlv_psr_setup_vsc(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; enum pipe pipe ; struct drm_crtc const *__mptr ; uint32_t val ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = intel_dig_port->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 1966240), 1); val = val & 1073741823U; val = val | 1073741824U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 1966240), val, 1); return; } } static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp ) { struct edp_vsc_psr psr_vsc ; { memset((void *)(& psr_vsc), 0, 36UL); psr_vsc.sdp_header.HB0 = 0U; psr_vsc.sdp_header.HB1 = 7U; psr_vsc.sdp_header.HB2 = 3U; psr_vsc.sdp_header.HB3 = 11U; intel_psr_write_vsc(intel_dp, & psr_vsc); return; } } static void hsw_psr_setup_vsc(struct intel_dp *intel_dp ) { struct edp_vsc_psr psr_vsc ; { memset((void *)(& psr_vsc), 0, 36UL); psr_vsc.sdp_header.HB0 = 0U; psr_vsc.sdp_header.HB1 = 7U; psr_vsc.sdp_header.HB2 = 2U; psr_vsc.sdp_header.HB3 = 8U; intel_psr_write_vsc(intel_dp, & psr_vsc); return; } } static void vlv_psr_enable_sink(struct intel_dp *intel_dp ) { { drm_dp_dpcd_writeb(& intel_dp->aux, 368U, 3); return; } } static void hsw_psr_enable_sink(struct intel_dp *intel_dp ) { struct intel_digital_port *dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t aux_clock_divider ; uint32_t aux_data_reg ; uint32_t aux_ctl_reg ; int precharge ; uint8_t aux_msg[5U] ; int i ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; uint32_t tmp___0 ; uint32_t val ; struct drm_i915_private *__p___5 ; { tmp = dp_to_dig_port(intel_dp); dig_port = tmp; dev = dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; precharge = 3; aux_msg[0] = 128U; aux_msg[1] = 6U; aux_msg[2] = 0U; aux_msg[3] = 0U; aux_msg[4] = 1U; aux_clock_divider = (*(intel_dp->get_aux_clock_divider))(intel_dp, 0); drm_dp_dpcd_writeb(& intel_dp->aux, 368U, 1); if ((int )dev_priv->psr.aux_frame_sync) { drm_dp_dpcd_writeb(& intel_dp->aux, 279U, 1); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) <= 8U) { __p___0 = to_i915((struct drm_device const *)dev); aux_data_reg = (unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U ? 411668U : 456724U; } else { aux_data_reg = 409620U; } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) <= 8U) { __p___3 = to_i915((struct drm_device const *)dev); aux_ctl_reg = (unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U ? 411664U : 456720U; } else { aux_ctl_reg = 409616U; } i = 0; goto ldv_48066; ldv_48065: tmp___0 = intel_dp_pack_aux((uint8_t const *)(& aux_msg) + (unsigned long )i, (int )(5U - (unsigned int )i)); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(aux_data_reg + (uint32_t )i), tmp___0, 1); i = i + 4; ldv_48066: ; if ((unsigned int )i <= 4U) { goto ldv_48065; } else { } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) > 8U) { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )aux_ctl_reg, 1); val = val & 4093640703U; val = val | 201326592U; val = val & 4262461439U; val = val | 5242880U; val = val & 4294950911U; val = val & 4294959103U; val = val & 4294963199U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )aux_ctl_reg, val, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )aux_ctl_reg, ((uint32_t )(precharge << 16) | aux_clock_divider) | 5242880U, 1); } drm_dp_dpcd_writeb(& intel_dp->aux, 368U, 1); return; } } static void vlv_psr_enable_source(struct intel_dp *intel_dp ) { struct intel_digital_port *dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; enum pipe pipe ; struct drm_crtc const *__mptr ; { tmp = dp_to_dig_port(intel_dp); dig_port = tmp; dev = dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = dig_port->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 1966224), 517U, 1); return; } } static void vlv_psr_activate(struct intel_dp *intel_dp ) { struct intel_digital_port *dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; enum pipe pipe ; struct drm_crtc const *__mptr ; uint32_t tmp___0 ; { tmp = dp_to_dig_port(intel_dp); dig_port = tmp; dev = dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = dig_port->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 1966224), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 1966224), tmp___0 | 256U, 1); return; } } static void hsw_psr_enable_source(struct intel_dp *intel_dp ) { struct intel_digital_port *dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t max_sleep_time ; uint32_t idle_frames ; uint32_t val ; uint32_t link_entry_time ; uint32_t tmp___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { tmp = dp_to_dig_port(intel_dp); dig_port = tmp; dev = dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; max_sleep_time = 31U; idle_frames = dev_priv->vbt.psr.idle_frames != 0 ? (uint32_t )(dev_priv->vbt.psr.idle_frames + 1) : 2U; val = 0U; link_entry_time = 0U; if ((int )intel_dp->psr_dpcd[1] & 1) { val = val | 16U; val = val | 768U; val = val | 4096U; idle_frames = idle_frames + 4U; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { tmp___0 = link_entry_time; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) != 8U) { tmp___0 = link_entry_time; } else { tmp___0 = 0U; } } __p___3 = to_i915((struct drm_device const *)dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U ? 411648L : 456704L, (((tmp___0 | val) | (max_sleep_time << 20)) | idle_frames) | 2147483648U, 1); if ((int )dev_priv->psr.psr2_support) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 456960L, 3221225728U, 1); } else { } return; } } static bool intel_psr_match_conditions(struct intel_dp *intel_dp ) { struct intel_digital_port *dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; int __ret_warn_on___0 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; int __ret_warn_on___1 ; bool tmp___6 ; int tmp___7 ; long tmp___8 ; long tmp___9 ; struct drm_i915_private *__p ; long tmp___10 ; long tmp___11 ; struct drm_i915_private *__p___0 ; uint32_t tmp___12 ; long tmp___13 ; struct drm_i915_private *__p___1 ; long tmp___14 ; struct drm_i915_private *__p___2 ; { tmp = dp_to_dig_port(intel_dp); dig_port = tmp; dev = dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; crtc = dig_port->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->psr.lock.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 293, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->psr.lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = drm_modeset_is_locked(& dev->mode_config.connection_mutex); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } __ret_warn_on___0 = tmp___4; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 294, "WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); tmp___6 = drm_modeset_is_locked(& crtc->mutex); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } __ret_warn_on___1 = tmp___7; tmp___8 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 295, "WARN_ON(!drm_modeset_is_locked(&crtc->mutex))"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); dev_priv->psr.source_ok = 0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && (unsigned int )dig_port->port != 0U) { tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("intel_psr_match_conditions", "HSW ties PSR to DDI A (eDP)\n"); } else { } return (0); } else { } if (i915.enable_psr == 0) { tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("intel_psr_match_conditions", "PSR disable by flag\n"); } else { } return (0); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { tmp___12 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )(intel_crtc->config)->cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458784U), 1); if ((int )tmp___12 < 0) { tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("intel_psr_match_conditions", "PSR condition failed: Stereo 3D is Enabled\n"); } else { } return (0); } else { } } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U && ((intel_crtc->config)->base.adjusted_mode.flags & 16U) != 0U) { tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("intel_psr_match_conditions", "PSR condition failed: Interlaced is Enabled\n"); } else { } return (0); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U && ((int )dev_priv->vbt.psr.full_link || (unsigned int )dig_port->port != 0U)) { tmp___14 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("intel_psr_match_conditions", "PSR condition failed: Link Standby requested/needed but not supported on this platform\n"); } else { } return (0); } else { } dev_priv->psr.source_ok = 1; return (1); } } static void intel_psr_activate(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; struct drm_i915_private *__p ; uint32_t tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; int __ret_warn_on___1 ; int tmp___3 ; int tmp___4 ; long tmp___5 ; struct drm_i915_private *__p___0 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p + 45UL) != 0U ? 411648L : 456704L, 1); __ret_warn_on = (int )tmp___0 < 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 338, "WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (int )dev_priv->psr.active; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 339, "WARN_ON(dev_priv->psr.active)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (debug_locks != 0) { tmp___3 = lock_is_held(& dev_priv->psr.lock.dep_map); if (tmp___3 == 0) { tmp___4 = 1; } else { tmp___4 = 0; } } else { tmp___4 = 0; } __ret_warn_on___1 = tmp___4; tmp___5 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 340, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->psr.lock))"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) != 0U) { hsw_psr_enable_source(intel_dp); } else { vlv_psr_activate(intel_dp); } dev_priv->psr.active = 1; return; } } void intel_psr_enable(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; long tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; long tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; bool tmp___5 ; int tmp___6 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)intel_dig_port->base.base.crtc; crtc = (struct intel_crtc *)__mptr; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { goto _L___0; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) != 8U) { _L___0: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { goto _L; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) != 8U) { _L: /* CIL Label */ __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_psr_enable", "PSR not supported on this platform\n"); } else { } return; } else { } } else { } } } else { } } else { } } } else { } tmp___2 = is_edp_psr(intel_dp); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_psr_enable", "PSR not supported by this panel\n"); } else { } return; } else { } mutex_lock_nested(& dev_priv->psr.lock, 0U); if ((unsigned long )dev_priv->psr.enabled != (unsigned long )((struct intel_dp *)0)) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_psr_enable", "PSR already in use\n"); } else { } goto unlock; } else { } tmp___5 = intel_psr_match_conditions(intel_dp); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { goto unlock; } else { } dev_priv->psr.busy_frontbuffer_bits = 0U; __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 46UL) != 0U) { hsw_psr_setup_vsc(intel_dp); if ((int )dev_priv->psr.psr2_support) { if ((crtc->config)->pipe_src_w > 3200 || (crtc->config)->pipe_src_h > 2000) { dev_priv->psr.psr2_support = 0; } else { skl_psr_setup_su_vsc(intel_dp); } } else { } __p___6 = to_i915((struct drm_device const *)dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U ? 411744L : 456800L, 234881024U, 1); hsw_psr_enable_sink(intel_dp); __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) > 8U) { intel_psr_activate(intel_dp); } else { } } else { vlv_psr_setup_vsc(intel_dp); vlv_psr_enable_sink(intel_dp); vlv_psr_enable_source(intel_dp); } dev_priv->psr.enabled = intel_dp; unlock: mutex_unlock(& dev_priv->psr.lock); return; } } static void vlv_psr_disable(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; uint32_t val ; int __ret_warn_on ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; int __ret_warn_on___0 ; bool tmp___5 ; long tmp___6 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)intel_dig_port->base.base.crtc; intel_crtc = (struct intel_crtc *)__mptr; if ((int )dev_priv->psr.active) { tmp___1 = msecs_to_jiffies(1U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48279; ldv_48278: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )intel_crtc->pipe * 4096 + 1966228), 1); if ((tmp___2 & 128U) != 0U) { ret__ = -110; } else { } goto ldv_48277; } else { } tmp___3 = drm_can_sleep___7(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48279: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )intel_crtc->pipe * 4096 + 1966228), 1); if ((tmp___4 & 128U) != 0U) { goto ldv_48278; } else { } ldv_48277: ; if (ret__ != 0) { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 443, "PSR transition took longer than expected\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )intel_crtc->pipe * 4096 + 1966224), 1); val = val & 4294967039U; val = val & 4294967294U; val = val & 4294967267U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )intel_crtc->pipe * 4096 + 1966224), val, 1); dev_priv->psr.active = 0; } else { tmp___5 = vlv_is_psr_active_on_pipe(dev, (int )intel_crtc->pipe); __ret_warn_on___0 = (int )tmp___5; tmp___6 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 453, "WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } return; } } static void hsw_psr_disable(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; uint32_t tmp___0 ; struct drm_i915_private *__p___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; struct drm_i915_private *__p___1 ; uint32_t tmp___2 ; bool tmp___3 ; struct drm_i915_private *__p___2 ; uint32_t tmp___4 ; int __ret_warn_on ; struct drm_i915_private *__p___3 ; uint32_t tmp___5 ; long tmp___6 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((int )dev_priv->psr.active) { __p = to_i915((struct drm_device const *)dev); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p + 45UL) != 0U ? 411648L : 456704L, 1); __p___0 = to_i915((struct drm_device const *)dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U ? 411648L : 456704L, tmp___0 & 2147483647U, 1); tmp___1 = msecs_to_jiffies(2000U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48325; ldv_48324: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { __p___1 = to_i915((struct drm_device const *)dev); tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U ? 411712L : 456768L, 1); if ((tmp___2 & 3758096384U) != 0U) { ret__ = -110; } else { } goto ldv_48323; } else { } tmp___3 = drm_can_sleep___7(); if ((int )tmp___3) { usleep_range(10000UL, 20000UL); } else { cpu_relax(); } ldv_48325: __p___2 = to_i915((struct drm_device const *)dev); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U ? 411712L : 456768L, 1); if ((tmp___4 & 3758096384U) != 0U) { goto ldv_48324; } else { } ldv_48323: ; if (ret__ != 0) { drm_err("Timed out waiting for PSR Idle State\n"); } else { } dev_priv->psr.active = 0; } else { __p___3 = to_i915((struct drm_device const *)dev); tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U ? 411648L : 456704L, 1); __ret_warn_on = (int )tmp___5 < 0; tmp___6 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 474, "WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } return; } } void intel_psr_disable(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev_priv->psr.lock, 0U); if ((unsigned long )dev_priv->psr.enabled == (unsigned long )((struct intel_dp *)0)) { mutex_unlock(& dev_priv->psr.lock); return; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { hsw_psr_disable(intel_dp); } else { vlv_psr_disable(intel_dp); } dev_priv->psr.enabled = (struct intel_dp *)0; mutex_unlock(& dev_priv->psr.lock); ldv_cancel_delayed_work_sync_640(& dev_priv->psr.work); return; } } static void intel_psr_work(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; struct intel_dp *intel_dp ; struct drm_crtc *crtc ; struct intel_digital_port *tmp ; enum pipe pipe ; struct drm_crtc const *__mptr___0 ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; struct drm_i915_private *__p ; uint32_t tmp___1 ; bool tmp___2 ; struct drm_i915_private *__p___0 ; uint32_t tmp___3 ; unsigned long timeout_____0 ; unsigned long tmp___4 ; int ret_____0 ; uint32_t tmp___5 ; bool tmp___6 ; uint32_t tmp___7 ; struct drm_i915_private *__p___1 ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff4218UL; intel_dp = dev_priv->psr.enabled; tmp = dp_to_dig_port(intel_dp); crtc = tmp->base.base.crtc; __mptr___0 = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr___0)->pipe; __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p___1 + 46UL) != 0U) { tmp___0 = msecs_to_jiffies(50U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48386; ldv_48385: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { __p = to_i915((struct drm_device const *)dev_priv->dev); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p + 45UL) != 0U ? 411712L : 456768L, 1); if ((tmp___1 & 3758096384U) != 0U) { ret__ = -110; } else { } goto ldv_48384; } else { } tmp___2 = drm_can_sleep___7(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48386: __p___0 = to_i915((struct drm_device const *)dev_priv->dev); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U ? 411712L : 456768L, 1); if ((tmp___3 & 3758096384U) != 0U) { goto ldv_48385; } else { } ldv_48384: ; if (ret__ != 0) { drm_err("Timed out waiting for PSR Idle for re-enable\n"); return; } else { } } else { tmp___4 = msecs_to_jiffies(1U); timeout_____0 = (tmp___4 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_48398; ldv_48397: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 1966228), 1); if ((tmp___5 & 128U) != 0U) { ret_____0 = -110; } else { } goto ldv_48396; } else { } tmp___6 = drm_can_sleep___7(); if ((int )tmp___6) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48398: tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 1966228), 1); if ((tmp___7 & 128U) != 0U) { goto ldv_48397; } else { } ldv_48396: ; if (ret_____0 != 0) { drm_err("Timed out waiting for PSR Idle for re-enable\n"); return; } else { } } mutex_lock_nested(& dev_priv->psr.lock, 0U); intel_dp = dev_priv->psr.enabled; if ((unsigned long )intel_dp == (unsigned long )((struct intel_dp *)0)) { goto unlock; } else { } if (dev_priv->psr.busy_frontbuffer_bits != 0U) { goto unlock; } else { } intel_psr_activate(intel_dp); unlock: mutex_unlock(& dev_priv->psr.lock); return; } } static void intel_psr_exit(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_dp *intel_dp ; struct drm_crtc *crtc ; struct intel_digital_port *tmp ; enum pipe pipe ; struct drm_crtc const *__mptr ; u32 val ; struct drm_i915_private *__p ; int __ret_warn_on ; long tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; intel_dp = dev_priv->psr.enabled; tmp = dp_to_dig_port(intel_dp); crtc = tmp->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; if (! dev_priv->psr.active) { return; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 46UL) != 0U) { __p = to_i915((struct drm_device const *)dev); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )*((unsigned char *)__p + 45UL) != 0U ? 411648L : 456704L, 1); __ret_warn_on = (int )val >= 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_psr.c", 566, "WARN_ON(!(val & EDP_PSR_ENABLE))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p___0 = to_i915((struct drm_device const *)dev); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U ? 411648L : 456704L, val & 2147483647U, 1); } else { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 1966224), 1); val = val & 4294967039U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 1966224), val, 1); drm_dp_dpcd_writeb(& intel_dp->aux, 1536U, 1); } dev_priv->psr.active = 0; return; } } void intel_psr_single_frame_update(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; enum pipe pipe ; u32 val ; struct drm_i915_private *__p ; struct intel_digital_port *tmp ; struct drm_crtc const *__mptr ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { return; } else { } mutex_lock_nested(& dev_priv->psr.lock, 0U); if ((unsigned long )dev_priv->psr.enabled == (unsigned long )((struct intel_dp *)0)) { mutex_unlock(& dev_priv->psr.lock); return; } else { } tmp = dp_to_dig_port(dev_priv->psr.enabled); crtc = tmp->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 1966224), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 1966224), val | 128U, 1); mutex_unlock(& dev_priv->psr.lock); return; } } void intel_psr_invalidate(struct drm_device *dev , unsigned int frontbuffer_bits ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; enum pipe pipe ; struct intel_digital_port *tmp ; struct drm_crtc const *__mptr ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev_priv->psr.lock, 0U); if ((unsigned long )dev_priv->psr.enabled == (unsigned long )((struct intel_dp *)0)) { mutex_unlock(& dev_priv->psr.lock); return; } else { } tmp = dp_to_dig_port(dev_priv->psr.enabled); crtc = tmp->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; intel_psr_exit(dev); frontbuffer_bits = (unsigned int )(15 << (int )pipe * 4) & frontbuffer_bits; dev_priv->psr.busy_frontbuffer_bits = dev_priv->psr.busy_frontbuffer_bits | frontbuffer_bits; mutex_unlock(& dev_priv->psr.lock); return; } } void intel_psr_flush(struct drm_device *dev , unsigned int frontbuffer_bits ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; enum pipe pipe ; struct intel_digital_port *tmp ; struct drm_crtc const *__mptr ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; unsigned long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_lock_nested(& dev_priv->psr.lock, 0U); if ((unsigned long )dev_priv->psr.enabled == (unsigned long )((struct intel_dp *)0)) { mutex_unlock(& dev_priv->psr.lock); return; } else { } tmp = dp_to_dig_port(dev_priv->psr.enabled); crtc = tmp->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; dev_priv->psr.busy_frontbuffer_bits = dev_priv->psr.busy_frontbuffer_bits & ~ frontbuffer_bits; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && ((unsigned int )(1 << ((int )pipe * 4 + 2)) & frontbuffer_bits) != 0U) { intel_psr_exit(dev); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) == 0U) { intel_psr_exit(dev); } else { } if (! dev_priv->psr.active && dev_priv->psr.busy_frontbuffer_bits == 0U) { tmp___0 = msecs_to_jiffies(100U); schedule_delayed_work___1(& dev_priv->psr.work, tmp___0); } else { } mutex_unlock(& dev_priv->psr.lock); return; } } void intel_psr_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __init_work(& dev_priv->psr.work.work, 0); __constr_expr_0___0.counter = 137438953408L; dev_priv->psr.work.work.data = __constr_expr_0___0; lockdep_init_map(& dev_priv->psr.work.work.lockdep_map, "(&(&dev_priv->psr.work)->work)", & __key, 0); INIT_LIST_HEAD(& dev_priv->psr.work.work.entry); dev_priv->psr.work.work.func = & intel_psr_work; init_timer_key(& dev_priv->psr.work.timer, 2097152U, "(&(&dev_priv->psr.work)->timer)", & __key___0); dev_priv->psr.work.timer.function = & delayed_work_timer_fn; dev_priv->psr.work.timer.data = (unsigned long )(& dev_priv->psr.work); __mutex_init(& dev_priv->psr.lock, "&dev_priv->psr.lock", & __key___1); return; } } void invoke_work_16(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_16_0 == 2 || ldv_work_16_0 == 3) { ldv_work_16_0 = 4; intel_psr_work(ldv_work_struct_16_0); ldv_work_16_0 = 1; } else { } goto ldv_48490; case 1: ; if (ldv_work_16_1 == 2 || ldv_work_16_1 == 3) { ldv_work_16_1 = 4; intel_psr_work(ldv_work_struct_16_0); ldv_work_16_1 = 1; } else { } goto ldv_48490; case 2: ; if (ldv_work_16_2 == 2 || ldv_work_16_2 == 3) { ldv_work_16_2 = 4; intel_psr_work(ldv_work_struct_16_0); ldv_work_16_2 = 1; } else { } goto ldv_48490; case 3: ; if (ldv_work_16_3 == 2 || ldv_work_16_3 == 3) { ldv_work_16_3 = 4; intel_psr_work(ldv_work_struct_16_0); ldv_work_16_3 = 1; } else { } goto ldv_48490; default: ldv_stop(); } ldv_48490: ; return; } } void activate_work_16(struct work_struct *work , int state ) { { if (ldv_work_16_0 == 0) { ldv_work_struct_16_0 = work; ldv_work_16_0 = state; return; } else { } if (ldv_work_16_1 == 0) { ldv_work_struct_16_1 = work; ldv_work_16_1 = state; return; } else { } if (ldv_work_16_2 == 0) { ldv_work_struct_16_2 = work; ldv_work_16_2 = state; return; } else { } if (ldv_work_16_3 == 0) { ldv_work_struct_16_3 = work; ldv_work_16_3 = state; return; } else { } return; } } void call_and_disable_work_16(struct work_struct *work ) { { if ((ldv_work_16_0 == 2 || ldv_work_16_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_16_0) { intel_psr_work(work); ldv_work_16_0 = 1; return; } else { } if ((ldv_work_16_1 == 2 || ldv_work_16_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_16_1) { intel_psr_work(work); ldv_work_16_1 = 1; return; } else { } if ((ldv_work_16_2 == 2 || ldv_work_16_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_16_2) { intel_psr_work(work); ldv_work_16_2 = 1; return; } else { } if ((ldv_work_16_3 == 2 || ldv_work_16_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_16_3) { intel_psr_work(work); ldv_work_16_3 = 1; return; } else { } return; } } void work_init_16(void) { { ldv_work_16_0 = 0; ldv_work_16_1 = 0; ldv_work_16_2 = 0; ldv_work_16_3 = 0; return; } } void call_and_disable_all_16(int state ) { { if (ldv_work_16_0 == state) { call_and_disable_work_16(ldv_work_struct_16_0); } else { } if (ldv_work_16_1 == state) { call_and_disable_work_16(ldv_work_struct_16_1); } else { } if (ldv_work_16_2 == state) { call_and_disable_work_16(ldv_work_struct_16_2); } else { } if (ldv_work_16_3 == state) { call_and_disable_work_16(ldv_work_struct_16_3); } else { } return; } } void disable_work_16(struct work_struct *work ) { { if ((ldv_work_16_0 == 3 || ldv_work_16_0 == 2) && (unsigned long )ldv_work_struct_16_0 == (unsigned long )work) { ldv_work_16_0 = 1; } else { } if ((ldv_work_16_1 == 3 || ldv_work_16_1 == 2) && (unsigned long )ldv_work_struct_16_1 == (unsigned long )work) { ldv_work_16_1 = 1; } else { } if ((ldv_work_16_2 == 3 || ldv_work_16_2 == 2) && (unsigned long )ldv_work_struct_16_2 == (unsigned long )work) { ldv_work_16_2 = 1; } else { } if ((ldv_work_16_3 == 3 || ldv_work_16_3 == 2) && (unsigned long )ldv_work_struct_16_3 == (unsigned long )work) { ldv_work_16_3 = 1; } else { } return; } } bool ldv_queue_work_on_635(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_636(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_637(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_638(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_639(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_cancel_delayed_work_sync_640(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___16(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } bool ldv_queue_work_on_651(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_653(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_652(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_655(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_654(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool drm_can_sleep___8(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39965; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39965; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39965; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39965; default: __bad_percpu_size(); } ldv_39965: pscr_ret__ = pfo_ret__; goto ldv_39971; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39975; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39975; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39975; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39975; default: __bad_percpu_size(); } ldv_39975: pscr_ret__ = pfo_ret_____0; goto ldv_39971; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39984; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39984; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39984; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39984; default: __bad_percpu_size(); } ldv_39984: pscr_ret__ = pfo_ret_____1; goto ldv_39971; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39993; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39993; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39993; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39993; default: __bad_percpu_size(); } ldv_39993: pscr_ret__ = pfo_ret_____2; goto ldv_39971; default: __bad_size_call_parameter(); goto ldv_39971; } ldv_39971: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___16(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv , u32 reg ) ; void vlv_gpio_nc_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) ; u32 vlv_ccu_read(struct drm_i915_private *dev_priv , u32 reg ) ; void vlv_ccu_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) ; u32 vlv_gps_core_read(struct drm_i915_private *dev_priv , u32 reg ) ; void vlv_gps_core_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) ; u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv , u32 reg ) ; void vlv_flisdsi_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) ; static int vlv_sideband_rw(struct drm_i915_private *dev_priv , u32 devfn , u32 port , u32 opcode , u32 addr , u32 *val ) { u32 cmd ; u32 be ; u32 bar ; bool is_read ; int __ret_warn_on ; int tmp ; long tmp___0 ; long tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; long tmp___6 ; unsigned long timeout_____0 ; unsigned long tmp___7 ; int ret_____0 ; uint32_t tmp___8 ; bool tmp___9 ; uint32_t tmp___10 ; { be = 15U; bar = 0U; is_read = (bool )(opcode == 0U || opcode == 6U); cmd = ((((devfn << 24) | (opcode << 16)) | (port << 8)) | (be << 4)) | (bar << 1); tmp = mutex_is_locked(& dev_priv->sb_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sideband.c", 52, "WARN_ON(!mutex_is_locked(&dev_priv->sb_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = msecs_to_jiffies(5U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48001; ldv_48000: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581312L, 1); if ((int )tmp___3 & 1) { ret__ = -110; } else { } goto ldv_47999; } else { } tmp___4 = drm_can_sleep___8(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48001: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581312L, 1); if ((int )tmp___5 & 1) { goto ldv_48000; } else { } ldv_47999: ; if (ret__ != 0) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("vlv_sideband_rw", "IOSF sideband idle wait (%s) timed out\n", (int )is_read ? (char *)"read" : (char *)"write"); } else { } return (-11); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581320L, addr, 1); if (! is_read) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581316L, *val, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581312L, cmd, 1); tmp___7 = msecs_to_jiffies(5U); timeout_____0 = (tmp___7 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_48014; ldv_48013: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581312L, 1); if ((int )tmp___8 & 1) { ret_____0 = -110; } else { } goto ldv_48012; } else { } tmp___9 = drm_can_sleep___8(); if ((int )tmp___9) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48014: tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581312L, 1); if ((int )tmp___10 & 1) { goto ldv_48013; } else { } ldv_48012: ; if (ret_____0 != 0) { tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("vlv_sideband_rw", "IOSF sideband finish wait (%s) timed out\n", (int )is_read ? (char *)"read" : (char *)"write"); } else { } return (-110); } else { } if ((int )is_read) { *val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581316L, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1581316L, 0U, 1); return (0); } } u32 vlv_punit_read(struct drm_i915_private *dev_priv , u32 addr ) { u32 val ; int __ret_warn_on ; int tmp ; long tmp___0 ; { val = 0U; tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sideband.c", 82, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_sideband_rw(dev_priv, 0U, 4U, 6U, addr, & val); mutex_unlock(& dev_priv->sb_lock); return (val); } } void vlv_punit_write(struct drm_i915_private *dev_priv , u32 addr , u32 val ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sideband.c", 94, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_sideband_rw(dev_priv, 0U, 4U, 7U, addr, & val); mutex_unlock(& dev_priv->sb_lock); return; } } u32 vlv_bunit_read(struct drm_i915_private *dev_priv , u32 reg ) { u32 val ; { val = 0U; vlv_sideband_rw(dev_priv, 0U, 3U, 6U, reg, & val); return (val); } } void vlv_bunit_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) { { vlv_sideband_rw(dev_priv, 0U, 3U, 7U, reg, & val); return; } } u32 vlv_nc_read(struct drm_i915_private *dev_priv , u8 addr ) { u32 val ; int __ret_warn_on ; int tmp ; long tmp___0 ; { val = 0U; tmp = mutex_is_locked(& dev_priv->rps.hw_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sideband.c", 122, "WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_sideband_rw(dev_priv, 0U, 17U, 6U, (u32 )addr, & val); mutex_unlock(& dev_priv->sb_lock); return (val); } } u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv , u32 reg ) { u32 val ; { val = 0U; vlv_sideband_rw(dev_priv, 0U, 19U, 6U, reg, & val); return (val); } } void vlv_gpio_nc_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) { { vlv_sideband_rw(dev_priv, 0U, 19U, 7U, reg, & val); return; } } u32 vlv_cck_read(struct drm_i915_private *dev_priv , u32 reg ) { u32 val ; { val = 0U; vlv_sideband_rw(dev_priv, 0U, 20U, 6U, reg, & val); return (val); } } void vlv_cck_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) { { vlv_sideband_rw(dev_priv, 0U, 20U, 7U, reg, & val); return; } } u32 vlv_ccu_read(struct drm_i915_private *dev_priv , u32 reg ) { u32 val ; { val = 0U; vlv_sideband_rw(dev_priv, 0U, 169U, 6U, reg, & val); return (val); } } void vlv_ccu_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) { { vlv_sideband_rw(dev_priv, 0U, 169U, 7U, reg, & val); return; } } u32 vlv_gps_core_read(struct drm_i915_private *dev_priv , u32 reg ) { u32 val ; { val = 0U; vlv_sideband_rw(dev_priv, 0U, 72U, 6U, reg, & val); return (val); } } void vlv_gps_core_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) { { vlv_sideband_rw(dev_priv, 0U, 72U, 7U, reg, & val); return; } } u32 vlv_dpio_read(struct drm_i915_private *dev_priv , enum pipe pipe , int reg ) { u32 val ; int __ret_warn_on ; long tmp ; { val = 0U; vlv_sideband_rw(dev_priv, 0U, (u32 )dev_priv->dpio_phy_iosf_port[(int )pipe >> 1], 0U, (u32 )reg, & val); __ret_warn_on = val == 4294967295U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sideband.c", 200, "DPIO read pipe %c reg 0x%x == 0x%x\n", (int )pipe + 65, reg, val); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (val); } } void vlv_dpio_write(struct drm_i915_private *dev_priv , enum pipe pipe , int reg , u32 val ) { { vlv_sideband_rw(dev_priv, 0U, (u32 )dev_priv->dpio_phy_iosf_port[(int )pipe >> 1], 1U, (u32 )reg, & val); return; } } u32 intel_sbi_read(struct drm_i915_private *dev_priv , u16 reg , enum intel_sbi_destination destination ) { u32 value ; int __ret_warn_on ; int tmp ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; unsigned long timeout_____0 ; unsigned long tmp___5 ; int ret_____0 ; uint32_t tmp___6 ; bool tmp___7 ; uint32_t tmp___8 ; uint32_t tmp___9 ; { value = 0U; tmp = mutex_is_locked(& dev_priv->sb_lock); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sideband.c", 216, "WARN_ON(!mutex_is_locked(&dev_priv->sb_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = msecs_to_jiffies(100U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48119; ldv_48118: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811016L, 1); if ((int )tmp___2 & 1) { ret__ = -110; } else { } goto ldv_48117; } else { } tmp___3 = drm_can_sleep___8(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48119: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811016L, 1); if ((int )tmp___4 & 1) { goto ldv_48118; } else { } ldv_48117: ; if (ret__ != 0) { drm_err("timeout waiting for SBI to become ready\n"); return (0U); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811008L, (uint32_t )((int )reg << 16), 1); if ((unsigned int )destination == 0U) { value = 1536U; } else { value = 66048U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811016L, value | 1U, 1); tmp___5 = msecs_to_jiffies(100U); timeout_____0 = (tmp___5 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_48131; ldv_48130: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811016L, 1); if ((tmp___6 & 3U) != 0U) { ret_____0 = -110; } else { } goto ldv_48129; } else { } tmp___7 = drm_can_sleep___8(); if ((int )tmp___7) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48131: tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811016L, 1); if ((tmp___8 & 3U) != 0U) { goto ldv_48130; } else { } ldv_48129: ; if (ret_____0 != 0) { drm_err("timeout waiting for SBI to complete read transaction\n"); return (0U); } else { } tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811012L, 1); return (tmp___9); } } void intel_sbi_write(struct drm_i915_private *dev_priv , u16 reg , u32 value , enum intel_sbi_destination destination ) { u32 tmp ; int __ret_warn_on ; int tmp___0 ; long tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; unsigned long timeout_____0 ; unsigned long tmp___6 ; int ret_____0 ; uint32_t tmp___7 ; bool tmp___8 ; uint32_t tmp___9 ; { tmp___0 = mutex_is_locked(& dev_priv->sb_lock); __ret_warn_on = tmp___0 == 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sideband.c", 246, "WARN_ON(!mutex_is_locked(&dev_priv->sb_lock))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___2 = msecs_to_jiffies(100U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48152; ldv_48151: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811016L, 1); if ((int )tmp___3 & 1) { ret__ = -110; } else { } goto ldv_48150; } else { } tmp___4 = drm_can_sleep___8(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48152: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811016L, 1); if ((int )tmp___5 & 1) { goto ldv_48151; } else { } ldv_48150: ; if (ret__ != 0) { drm_err("timeout waiting for SBI to become ready\n"); return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811008L, (uint32_t )((int )reg << 16), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811012L, value, 1); if ((unsigned int )destination == 0U) { tmp = 1792U; } else { tmp = 66304U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 811016L, tmp | 1U, 1); tmp___6 = msecs_to_jiffies(100U); timeout_____0 = (tmp___6 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_48164; ldv_48163: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811016L, 1); if ((tmp___7 & 3U) != 0U) { ret_____0 = -110; } else { } goto ldv_48162; } else { } tmp___8 = drm_can_sleep___8(); if ((int )tmp___8) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48164: tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 811016L, 1); if ((tmp___9 & 3U) != 0U) { goto ldv_48163; } else { } ldv_48162: ; if (ret_____0 != 0) { drm_err("timeout waiting for SBI to complete write transaction\n"); return; } else { } return; } } u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv , u32 reg ) { u32 val ; { val = 0U; vlv_sideband_rw(dev_priv, 0U, 27U, 6U, reg, & val); return (val); } } void vlv_flisdsi_write(struct drm_i915_private *dev_priv , u32 reg , u32 val ) { { vlv_sideband_rw(dev_priv, 0U, 27U, 7U, reg, & val); return; } } bool ldv_queue_work_on_651(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_652(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_653(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_654(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_655(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___17(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void arch_local_irq_disable___0(void) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.irq_disable.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (841), "i" (12UL)); ldv_4879: ; goto ldv_4879; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (45UL), [paravirt_opptr] "i" (& pv_irq_ops.irq_disable.func), [paravirt_clobber] "i" (1): "memory", "cc"); return; } } __inline static void arch_local_irq_enable(void) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.irq_enable.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (846), "i" (12UL)); ldv_4888: ; goto ldv_4888; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (46UL), [paravirt_opptr] "i" (& pv_irq_ops.irq_enable.func), [paravirt_clobber] "i" (1): "memory", "cc"); return; } } __inline static bool IS_ERR(void const *ptr ) ; __inline static bool static_key_false___11(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } __inline static int rcu_read_lock_sched_held___11(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags___17(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } bool ldv_queue_work_on_665(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_667(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_666(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_669(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_668(struct workqueue_struct *ldv_func_arg1 ) ; __inline static struct drm_plane *drm_plane_find(struct drm_device *dev , uint32_t id ) { struct drm_mode_object *mo ; struct drm_mode_object const *__mptr ; struct drm_plane *tmp ; { mo = drm_mode_object_find(dev, id, 4008636142U); if ((unsigned long )mo != (unsigned long )((struct drm_mode_object *)0)) { __mptr = (struct drm_mode_object const *)mo; tmp = (struct drm_plane *)__mptr + 0xffffffffffffff28UL; } else { tmp = (struct drm_plane *)0; } return (tmp); } } __inline static wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc ) { unsigned int tmp ; { tmp = drm_crtc_index(crtc); return (& ((crtc->dev)->vblank + (unsigned long )tmp)->queue); } } __inline static void drm_rect_adjust_size(struct drm_rect *r , int dw , int dh ) { { r->x1 = r->x1 - (dw >> 1); r->y1 = r->y1 - (dh >> 1); r->x2 = r->x2 + ((dw + 1) >> 1); r->y2 = r->y2 + ((dh + 1) >> 1); return; } } extern bool drm_rect_clip_scaled(struct drm_rect * , struct drm_rect * , struct drm_rect const * , int , int ) ; extern int drm_rect_calc_hscale(struct drm_rect const * , struct drm_rect const * , int , int ) ; extern int drm_rect_calc_vscale(struct drm_rect const * , struct drm_rect const * , int , int ) ; extern int drm_rect_calc_hscale_relaxed(struct drm_rect * , struct drm_rect * , int , int ) ; extern int drm_rect_calc_vscale_relaxed(struct drm_rect * , struct drm_rect * , int , int ) ; extern void drm_rect_debug_print(struct drm_rect const * , bool ) ; extern void drm_rect_rotate(struct drm_rect * , int , int , unsigned int ) ; extern void drm_rect_rotate_inv(struct drm_rect * , int , int , unsigned int ) ; extern int drm_plane_helper_update(struct drm_plane * , struct drm_crtc * , struct drm_framebuffer * , int , int , unsigned int , unsigned int , uint32_t , uint32_t , uint32_t , uint32_t ) ; __inline static void trace_i915_pipe_update_start(struct intel_crtc *crtc , u32 min , u32 max ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_291 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_293 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___11(& __tracepoint_i915_pipe_update_start.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_pipe_update_start.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___11(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 43, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_43207: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct intel_crtc * , u32 , u32 ))it_func))(__data, crtc, min, max); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_43207; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_pipe_update_start.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___11(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 43, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } __inline static void trace_i915_pipe_update_vblank_evaded(struct intel_crtc *crtc , u32 min , u32 max , u32 frame ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_295 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_297 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___11(& __tracepoint_i915_pipe_update_vblank_evaded.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_pipe_update_vblank_evaded.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___11(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 68, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_43270: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct intel_crtc * , u32 , u32 , u32 ))it_func))(__data, crtc, min, max, frame); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_43270; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_pipe_update_vblank_evaded.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___11(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 68, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } __inline static void trace_i915_pipe_update_end(struct intel_crtc *crtc , u32 frame ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_299 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_301 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false___11(& __tracepoint_i915_pipe_update_end.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_pipe_update_end.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held___11(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 89, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_43332: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct intel_crtc * , u32 ))it_func))(__data, crtc, frame); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_43332; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_i915_pipe_update_end.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held___11(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/i915_trace.h", 89, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } __inline static unsigned long msecs_to_jiffies_timeout(unsigned int const m ) { unsigned long j ; unsigned long tmp ; unsigned long __min1 ; unsigned long __min2 ; { tmp = msecs_to_jiffies(m); j = tmp; __min1 = 4611686018427387902UL; __min2 = j + 1UL; return (__min1 < __min2 ? __min1 : __min2); } } int intel_sprite_set_colorkey(struct drm_device *dev , void *data , struct drm_file *file_priv ) ; __inline static struct intel_crtc_state *intel_atomic_get_crtc_state___0(struct drm_atomic_state *state , struct intel_crtc *crtc ) { struct drm_crtc_state *crtc_state ; void *tmp ; bool tmp___0 ; struct drm_crtc_state const *__mptr ; { crtc_state = drm_atomic_get_crtc_state(state, & crtc->base); tmp___0 = IS_ERR((void const *)crtc_state); if ((int )tmp___0) { tmp = ERR_CAST((void const *)crtc_state); return ((struct intel_crtc_state *)tmp); } else { } __mptr = (struct drm_crtc_state const *)crtc_state; return ((struct intel_crtc_state *)__mptr); } } static bool format_is_yuv(uint32_t format ) { { switch (format) { case 1448695129U: ; case 1498831189U: ; case 1498765654U: ; case 1431918169U: ; return (1); default: ; return (0); } } } static int usecs_to_scanlines(struct drm_display_mode const *mode , int usecs ) { { if ((int )mode->crtc_htotal == 0) { return (1); } else { } return ((((int )mode->crtc_clock * usecs + (int )mode->crtc_htotal * 1000) + -1) / ((int )mode->crtc_htotal * 1000)); } } bool intel_pipe_update_start(struct intel_crtc *crtc , uint32_t *start_vbl_count ) { struct drm_device *dev ; struct drm_display_mode const *mode ; enum pipe pipe ; long timeout ; unsigned long tmp ; int scanline ; int min ; int max ; int vblank_start ; wait_queue_head_t *wq ; wait_queue_head_t *tmp___0 ; wait_queue_t wait ; struct task_struct *tmp___1 ; int tmp___2 ; int __ret_warn_on ; int tmp___3 ; long tmp___4 ; long tmp___5 ; { dev = crtc->base.dev; mode = (struct drm_display_mode const *)(& (crtc->config)->base.adjusted_mode); pipe = crtc->pipe; tmp = msecs_to_jiffies_timeout(1U); timeout = (long )tmp; tmp___0 = drm_crtc_vblank_waitqueue(& crtc->base); wq = tmp___0; tmp___1 = get_current(); wait.flags = 0U; wait.private = (void *)tmp___1; wait.func = & autoremove_wake_function; wait.task_list.next = & wait.task_list; wait.task_list.prev = & wait.task_list; vblank_start = mode->crtc_vblank_start; if (((unsigned int )mode->flags & 16U) != 0U) { vblank_start = (vblank_start + 1) / 2; } else { } tmp___2 = usecs_to_scanlines(mode, 100); min = vblank_start - tmp___2; max = vblank_start + -1; if (min <= 0 || max <= 0) { return (0); } else { } tmp___3 = drm_crtc_vblank_get(& crtc->base); __ret_warn_on = tmp___3 != 0; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sprite.c", 102, "WARN_ON(drm_crtc_vblank_get(&crtc->base))"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___5 != 0L) { return (0); } else { } arch_local_irq_disable___0(); trace_hardirqs_off(); trace_i915_pipe_update_start(crtc, (u32 )min, (u32 )max); ldv_48078: prepare_to_wait(wq, & wait, 2); scanline = intel_get_crtc_scanline(crtc); if (scanline < min || scanline > max) { goto ldv_48077; } else { } if (timeout <= 0L) { drm_err("Potential atomic update failure on pipe %c\n", (int )crtc->pipe + 65); goto ldv_48077; } else { } trace_hardirqs_on(); arch_local_irq_enable(); timeout = schedule_timeout(timeout); arch_local_irq_disable___0(); trace_hardirqs_off(); goto ldv_48078; ldv_48077: finish_wait(wq, & wait); drm_crtc_vblank_put(& crtc->base); *start_vbl_count = (*((dev->driver)->get_vblank_counter))(dev, (int )pipe); trace_i915_pipe_update_vblank_evaded(crtc, (u32 )min, (u32 )max, *start_vbl_count); return (1); } } void intel_pipe_update_end(struct intel_crtc *crtc , u32 start_vbl_count ) { struct drm_device *dev ; enum pipe pipe ; u32 end_vbl_count ; u32 tmp ; { dev = crtc->base.dev; pipe = crtc->pipe; tmp = (*((dev->driver)->get_vblank_counter))(dev, (int )pipe); end_vbl_count = tmp; trace_i915_pipe_update_end(crtc, end_vbl_count); trace_hardirqs_on(); arch_local_irq_enable(); if (start_vbl_count != end_vbl_count) { drm_err("Atomic update failure on pipe %c (start=%u end=%u)\n", (int )pipe + 65, start_vbl_count, end_vbl_count); } else { } return; } } static void skl_update_plane(struct drm_plane *drm_plane , struct drm_crtc *crtc , struct drm_framebuffer *fb , int crtc_x , int crtc_y , unsigned int crtc_w , unsigned int crtc_h , uint32_t x , uint32_t y , uint32_t src_w , uint32_t src_h ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr___0 ; struct drm_i915_gem_object *tmp ; int pipe ; int plane ; u32 plane_ctl ; u32 stride_div ; u32 stride ; int pixel_size ; int tmp___0 ; struct drm_intel_sprite_colorkey const *key ; unsigned long surf_addr ; u32 tile_height ; u32 plane_offset ; u32 plane_size ; unsigned int rotation ; int x_offset ; int y_offset ; struct intel_crtc_state *crtc_state ; struct drm_crtc const *__mptr___1 ; int scaler_id ; u32 tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; struct drm_plane_state const *__mptr___2 ; bool tmp___4 ; uint32_t ps_ctrl ; long tmp___5 ; { dev = drm_plane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_plane const *)drm_plane; intel_plane = (struct intel_plane *)__mptr; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___0 = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr___0)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; pipe = intel_plane->pipe; plane = intel_plane->plane + 1; tmp___0 = drm_format_plane_cpp(fb->pixel_format, 0); pixel_size = tmp___0; key = (struct drm_intel_sprite_colorkey const *)(& intel_plane->ckey); __mptr___1 = (struct drm_crtc const *)crtc; crtc_state = ((struct intel_crtc *)__mptr___1)->config; plane_ctl = 2155872256U; tmp___1 = skl_plane_ctl_format(fb->pixel_format); plane_ctl = tmp___1 | plane_ctl; tmp___2 = skl_plane_ctl_tiling(fb->modifier[0]); plane_ctl = tmp___2 | plane_ctl; rotation = (drm_plane->state)->rotation; tmp___3 = skl_plane_ctl_rotation(rotation); plane_ctl = tmp___3 | plane_ctl; intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h, pixel_size, 1, (int )((bool )(src_w != crtc_w || src_h != crtc_h))); stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], fb->pixel_format); __mptr___2 = (struct drm_plane_state const *)drm_plane->state; scaler_id = ((struct intel_plane_state *)__mptr___2)->scaler_id; src_w = src_w - 1U; src_h = src_h - 1U; crtc_w = crtc_w - 1U; crtc_h = crtc_h - 1U; if ((unsigned int )key->flags != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459412) + (pipe * -4096 + -459156)) * plane) + 459156), key->min_value, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459424) + (pipe * -4096 + -459168)) * plane) + 459168), key->max_value, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459416) + (pipe * -4096 + -459160)) * plane) + 459160), key->channel_mask, 1); } else { } if (((unsigned int )key->flags & 2U) != 0U) { plane_ctl = plane_ctl | 4194304U; } else if (((unsigned int )key->flags & 4U) != 0U) { plane_ctl = plane_ctl | 2097152U; } else { } surf_addr = intel_plane_obj_offset(intel_plane, obj); tmp___4 = intel_rotation_90_or_270(rotation); if ((int )tmp___4) { tile_height = intel_tile_height(dev, fb->pixel_format, fb->modifier[0]); stride = ((fb->height + tile_height) - 1U) / tile_height; plane_size = (src_w << 16) | src_h; x_offset = (int )(((stride * tile_height - y) - src_h) - 1U); y_offset = (int )x; } else { stride = fb->pitches[0] / stride_div; plane_size = (src_h << 16) | src_w; x_offset = (int )x; y_offset = (int )y; } plane_offset = (u32 )((y_offset << 16) | x_offset); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459428) + (pipe * -4096 + -459172)) * plane) + 459172), plane_offset, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459400) + (pipe * -4096 + -459144)) * plane) + 459144), stride, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459408) + (pipe * -4096 + -459152)) * plane) + 459152), plane_size, 1); if (scaler_id >= 0) { ps_ctrl = 0U; tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("skl_update_plane", "plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, (plane + 1) << 25); } else { } ps_ctrl = ((uint32_t )((plane + 1) << 25) | crtc_state->scaler_state.scalers[scaler_id].mode) | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((scaler_id * 256 + ((scaler_id * 256 + 428416) + (scaler_id * -256 + -426368)) * pipe) + 426368), ps_ctrl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((scaler_id * 256 + ((scaler_id * 256 + 428384) + (scaler_id * -256 + -426336)) * pipe) + 426336), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((scaler_id * 256 + ((scaler_id * 256 + 428400) + (scaler_id * -256 + -426352)) * pipe) + 426352), (uint32_t )((crtc_x << 16) | crtc_y), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((scaler_id * 256 + ((scaler_id * 256 + 428404) + (scaler_id * -256 + -426356)) * pipe) + 426356), ((crtc_w + 1U) << 16) | (crtc_h + 1U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459404) + (pipe * -4096 + -459148)) * plane) + 459148), 0U, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459404) + (pipe * -4096 + -459148)) * plane) + 459148), (uint32_t )((crtc_y << 16) | crtc_x), 1); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459392) + (pipe * -4096 + -459136)) * plane) + 459136), plane_ctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459420) + (pipe * -4096 + -459164)) * plane) + 459164), (uint32_t )surf_addr, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459420) + (pipe * -4096 + -459164)) * plane) + 459164), 0); return; } } static void skl_disable_plane(struct drm_plane *dplane , struct drm_crtc *crtc , bool force ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; int pipe ; int plane ; { dev = dplane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_plane const *)dplane; intel_plane = (struct intel_plane *)__mptr; pipe = intel_plane->pipe; plane = intel_plane->plane + 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459392) + (pipe * -4096 + -459136)) * plane) + 459136), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459420) + (pipe * -4096 + -459164)) * plane) + 459164), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((pipe * 4096 + ((pipe * 4096 + 459420) + (pipe * -4096 + -459164)) * plane) + 459164), 0); intel_update_sprite_watermarks(dplane, crtc, 0U, 0U, 0, 0, 0); return; } } static void chv_update_csc(struct intel_plane *intel_plane , uint32_t format ) { struct drm_i915_private *dev_priv ; int plane ; bool tmp ; int tmp___0 ; { dev_priv = (struct drm_i915_private *)(intel_plane->base.dev)->dev_private; plane = intel_plane->plane; tmp = format_is_yuv(format); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021632), 1984U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021636), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021640), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021644), 312547721U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021648), 1929248768U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021652), 2042303137U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021656), 312541184U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021660), 8263U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021664), 61603904U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021668), 29361728U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021672), 29361728U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021676), 67043328U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021680), 67043328U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(plane * 4096 + 2021684), 67043328U, 1); return; } } static void vlv_update_plane(struct drm_plane *dplane , struct drm_crtc *crtc , struct drm_framebuffer *fb , int crtc_x , int crtc_y , unsigned int crtc_w , unsigned int crtc_h , uint32_t x , uint32_t y , uint32_t src_w , uint32_t src_h ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr___0 ; struct drm_i915_gem_object *tmp ; int pipe ; int plane ; u32 sprctl ; unsigned long sprsurf_offset ; unsigned long linear_offset ; int pixel_size ; int tmp___0 ; struct drm_intel_sprite_colorkey const *key ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; unsigned long tmp___1 ; { dev = dplane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_plane const *)dplane; intel_plane = (struct intel_plane *)__mptr; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___0 = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr___0)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; pipe = intel_plane->pipe; plane = intel_plane->plane; tmp___0 = drm_format_plane_cpp(fb->pixel_format, 0); pixel_size = tmp___0; key = (struct drm_intel_sprite_colorkey const *)(& intel_plane->ckey); sprctl = 2147483648U; switch (fb->pixel_format) { case 1448695129U: sprctl = sprctl; goto ldv_48176; case 1431918169U: sprctl = sprctl | 131072U; goto ldv_48176; case 1498831189U: sprctl = sprctl | 65536U; goto ldv_48176; case 1498765654U: sprctl = sprctl | 196608U; goto ldv_48176; case 909199186U: sprctl = sprctl | 335544320U; goto ldv_48176; case 875713112U: sprctl = sprctl | 402653184U; goto ldv_48176; case 875713089U: sprctl = sprctl | 469762048U; goto ldv_48176; case 808665688U: sprctl = sprctl | 536870912U; goto ldv_48176; case 808665665U: sprctl = sprctl | 603979776U; goto ldv_48176; case 875709016U: sprctl = sprctl | 939524096U; goto ldv_48176; case 875708993U: sprctl = sprctl | 1006632960U; goto ldv_48176; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sprite.c"), "i" (390), "i" (12UL)); ldv_48188: ; goto ldv_48188; } ldv_48176: sprctl = sprctl | 1073741824U; if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { sprctl = sprctl | 1024U; } else { } intel_update_sprite_watermarks(dplane, crtc, src_w, src_h, pixel_size, 1, (int )((bool )(src_w != crtc_w || src_h != crtc_h))); src_w = src_w - 1U; src_h = src_h - 1U; crtc_w = crtc_w - 1U; crtc_h = crtc_h - 1U; linear_offset = (unsigned long )(fb->pitches[0] * y + x * (uint32_t )pixel_size); sprsurf_offset = intel_gen4_compute_page_offset((int *)(& x), (int *)(& y), (unsigned int )obj->tiling_mode, (unsigned int )pixel_size, fb->pitches[0]); linear_offset = linear_offset - sprsurf_offset; if ((dplane->state)->rotation == 4U) { sprctl = sprctl | 32768U; x = x + src_w; y = y + src_h; linear_offset = (unsigned long )(fb->pitches[0] * src_h + src_w * (uint32_t )pixel_size) + linear_offset; } else { } if ((unsigned int )key->flags != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040212), key->min_value, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040224), key->max_value, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040216), key->channel_mask, 1); } else { } if (((unsigned int )key->flags & 4U) != 0U) { sprctl = sprctl | 4194304U; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { if (pipe == 1) { chv_update_csc(intel_plane, fb->pixel_format); } else { } } else { } } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040200), fb->pitches[0], 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040204), (uint32_t )((crtc_y << 16) | crtc_x), 1); if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040228), (y << 16) | x, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040196), (uint32_t )linear_offset, 1); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040232), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040208), (crtc_h << 16) | crtc_w, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040192), sprctl, 1); tmp___1 = i915_gem_obj_ggtt_offset(obj); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040220), (uint32_t )tmp___1 + (uint32_t )sprsurf_offset, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040220), 0); return; } } static void vlv_disable_plane(struct drm_plane *dplane , struct drm_crtc *crtc , bool force ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; int pipe ; int plane ; { dev = dplane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_plane const *)dplane; intel_plane = (struct intel_plane *)__mptr; pipe = intel_plane->pipe; plane = intel_plane->plane; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040192), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040220), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((pipe * 2 + plane) * 256 + 2040220), 0); intel_update_sprite_watermarks(dplane, crtc, 0U, 0U, 0, 0, 0); return; } } static void ivb_update_plane(struct drm_plane *plane , struct drm_crtc *crtc , struct drm_framebuffer *fb , int crtc_x , int crtc_y , unsigned int crtc_w , unsigned int crtc_h , uint32_t x , uint32_t y , uint32_t src_w , uint32_t src_h ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr___0 ; struct drm_i915_gem_object *tmp ; enum pipe pipe ; u32 sprctl ; u32 sprscale ; unsigned long sprsurf_offset ; unsigned long linear_offset ; int pixel_size ; int tmp___0 ; struct drm_intel_sprite_colorkey const *key ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; unsigned long tmp___1 ; { dev = plane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___0 = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr___0)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; pipe = intel_plane->pipe; sprscale = 0U; tmp___0 = drm_format_plane_cpp(fb->pixel_format, 0); pixel_size = tmp___0; key = (struct drm_intel_sprite_colorkey const *)(& intel_plane->ckey); sprctl = 2147483648U; switch (fb->pixel_format) { case 875709016U: sprctl = sprctl | 68157440U; goto ldv_48242; case 875713112U: sprctl = sprctl | 67108864U; goto ldv_48242; case 1448695129U: sprctl = sprctl; goto ldv_48242; case 1431918169U: sprctl = sprctl | 131072U; goto ldv_48242; case 1498831189U: sprctl = sprctl | 65536U; goto ldv_48242; case 1498765654U: sprctl = sprctl | 196608U; goto ldv_48242; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sprite.c"), "i" (514), "i" (12UL)); ldv_48249: ; goto ldv_48249; } ldv_48242: sprctl = sprctl | 1073741824U; if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { sprctl = sprctl | 1024U; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { sprctl = sprctl & 4294950911U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { sprctl = sprctl & 4294950911U; } else { sprctl = sprctl | 16384U; } } else { sprctl = sprctl | 16384U; } } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { sprctl = sprctl | 16777216U; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 8U) { sprctl = sprctl | 16777216U; } else { } } else { } } intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size, 1, (int )((bool )(src_w != crtc_w || src_h != crtc_h))); src_w = src_w - 1U; src_h = src_h - 1U; crtc_w = crtc_w - 1U; crtc_h = crtc_h - 1U; if (crtc_w != src_w || crtc_h != src_h) { sprscale = ((src_w << 16) | src_h) | 2147483648U; } else { } linear_offset = (unsigned long )(fb->pitches[0] * y + x * (uint32_t )pixel_size); sprsurf_offset = intel_gen4_compute_page_offset((int *)(& x), (int *)(& y), (unsigned int )obj->tiling_mode, (unsigned int )pixel_size, fb->pitches[0]); linear_offset = linear_offset - sprsurf_offset; if ((plane->state)->rotation == 4U) { sprctl = sprctl | 32768U; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U) { x = x + src_w; y = y + src_h; linear_offset = (unsigned long )(fb->pitches[0] * src_h + src_w * (uint32_t )pixel_size) + linear_offset; } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) != 8U) { x = x + src_w; y = y + src_h; linear_offset = (unsigned long )(fb->pitches[0] * src_h + src_w * (uint32_t )pixel_size) + linear_offset; } else { } } } else { } } else { } if ((unsigned int )key->flags != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459412), key->min_value, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459424), key->max_value, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459416), key->channel_mask, 1); } else { } if (((unsigned int )key->flags & 2U) != 0U) { sprctl = sprctl | 4U; } else if (((unsigned int )key->flags & 4U) != 0U) { sprctl = sprctl | 4194304U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459400), fb->pitches[0], 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459404), (uint32_t )((crtc_y << 16) | crtc_x), 1); __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459428), (y << 16) | x, 1); } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 45UL) == 0U) { __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___10->info.gen) == 8U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459428), (y << 16) | x, 1); } else { goto _L; } } else _L: /* CIL Label */ if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459428), (y << 16) | x, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459396), (uint32_t )linear_offset, 1); } } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459408), (crtc_h << 16) | crtc_w, 1); if ((int )intel_plane->can_scale) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459524), sprscale, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459392), sprctl, 1); tmp___1 = i915_gem_obj_ggtt_offset(obj); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe * 4096 + 459420), (uint32_t )tmp___1 + (uint32_t )sprsurf_offset, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 4096 + 459420), 0); return; } } static void ivb_disable_plane(struct drm_plane *plane , struct drm_crtc *crtc , bool force ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; int pipe ; uint32_t tmp ; { dev = plane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; pipe = intel_plane->pipe; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 459392), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459392), tmp & 2147483647U, 1); if ((int )intel_plane->can_scale) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459524), 0U, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 459420), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 459420), 0); return; } } static void ilk_update_plane(struct drm_plane *plane , struct drm_crtc *crtc , struct drm_framebuffer *fb , int crtc_x , int crtc_y , unsigned int crtc_w , unsigned int crtc_h , uint32_t x , uint32_t y , uint32_t src_w , uint32_t src_h ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; struct drm_i915_gem_object *obj ; struct drm_framebuffer const *__mptr___0 ; struct drm_i915_gem_object *tmp ; int pipe ; unsigned long dvssurf_offset ; unsigned long linear_offset ; u32 dvscntr ; u32 dvsscale ; int pixel_size ; int tmp___0 ; struct drm_intel_sprite_colorkey const *key ; struct drm_i915_private *__p ; unsigned long tmp___1 ; { dev = plane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; if ((unsigned long )fb != (unsigned long )((struct drm_framebuffer *)0)) { __mptr___0 = (struct drm_framebuffer const *)fb; tmp = ((struct intel_framebuffer *)__mptr___0)->obj; } else { tmp = (struct drm_i915_gem_object *)0; } obj = tmp; pipe = intel_plane->pipe; tmp___0 = drm_format_plane_cpp(fb->pixel_format, 0); pixel_size = tmp___0; key = (struct drm_intel_sprite_colorkey const *)(& intel_plane->ckey); dvscntr = 2147483648U; switch (fb->pixel_format) { case 875709016U: dvscntr = dvscntr | 68157440U; goto ldv_48362; case 875713112U: dvscntr = dvscntr | 67108864U; goto ldv_48362; case 1448695129U: dvscntr = dvscntr; goto ldv_48362; case 1431918169U: dvscntr = dvscntr | 131072U; goto ldv_48362; case 1498831189U: dvscntr = dvscntr | 65536U; goto ldv_48362; case 1498765654U: dvscntr = dvscntr | 196608U; goto ldv_48362; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sprite.c"), "i" (654), "i" (12UL)); ldv_48369: ; goto ldv_48369; } ldv_48362: dvscntr = dvscntr | 1073741824U; if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { dvscntr = dvscntr | 1024U; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { dvscntr = dvscntr | 16384U; } else { } intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size, 1, (int )((bool )(src_w != crtc_w || src_h != crtc_h))); src_w = src_w - 1U; src_h = src_h - 1U; crtc_w = crtc_w - 1U; crtc_h = crtc_h - 1U; dvsscale = 0U; if (crtc_w != src_w || crtc_h != src_h) { dvsscale = ((src_w << 16) | src_h) | 2147483648U; } else { } linear_offset = (unsigned long )(fb->pitches[0] * y + x * (uint32_t )pixel_size); dvssurf_offset = intel_gen4_compute_page_offset((int *)(& x), (int *)(& y), (unsigned int )obj->tiling_mode, (unsigned int )pixel_size, fb->pitches[0]); linear_offset = linear_offset - dvssurf_offset; if ((plane->state)->rotation == 4U) { dvscntr = dvscntr | 32768U; x = x + src_w; y = y + src_h; linear_offset = (unsigned long )(fb->pitches[0] * src_h + src_w * (uint32_t )pixel_size) + linear_offset; } else { } if ((unsigned int )key->flags != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467348), key->min_value, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467360), key->max_value, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467352), key->channel_mask, 1); } else { } if (((unsigned int )key->flags & 2U) != 0U) { dvscntr = dvscntr | 4U; } else if (((unsigned int )key->flags & 4U) != 0U) { dvscntr = dvscntr | 4194304U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467336), fb->pitches[0], 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467340), (uint32_t )((crtc_y << 16) | crtc_x), 1); if ((unsigned int )*((unsigned char *)obj + 409UL) != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467364), (y << 16) | x, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467332), (uint32_t )linear_offset, 1); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467344), (crtc_h << 16) | crtc_w, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467460), dvsscale, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467328), dvscntr, 1); tmp___1 = i915_gem_obj_ggtt_offset(obj); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467356), (uint32_t )tmp___1 + (uint32_t )dvssurf_offset, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 467356), 0); return; } } static void ilk_disable_plane(struct drm_plane *plane , struct drm_crtc *crtc , bool force ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; int pipe ; { dev = plane->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; pipe = intel_plane->pipe; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467328), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467460), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(pipe * 4096 + 467356), 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(pipe * 4096 + 467356), 0); return; } } static int intel_check_sprite_plane(struct drm_plane *plane , struct intel_plane_state *state ) { struct drm_device *dev ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_crtc_state *crtc_state ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr___0 ; struct drm_framebuffer *fb ; int crtc_x ; int crtc_y ; unsigned int crtc_w ; unsigned int crtc_h ; uint32_t src_x ; uint32_t src_y ; uint32_t src_w ; uint32_t src_h ; struct drm_rect *src ; struct drm_rect *dst ; struct drm_rect const *clip ; int hscale ; int vscale ; int max_scale ; int min_scale ; bool can_scale ; int pixel_size ; int ret ; struct drm_crtc const *__mptr___1 ; struct intel_crtc_state *tmp ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p ; long tmp___2 ; long tmp___3 ; int tmp___4 ; int tmp___5 ; long tmp___6 ; long tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int __ret_warn_on ; long tmp___12 ; int tmp___13 ; int tmp___14 ; bool tmp___15 ; unsigned int width_bytes ; int __ret_warn_on___0 ; long tmp___16 ; long tmp___17 ; struct drm_i915_private *__p___0 ; bool tmp___18 ; unsigned int tmp___19 ; struct drm_i915_private *__p___1 ; { dev = plane->dev; __mptr = (struct drm_crtc const *)state->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; __mptr___0 = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr___0; fb = state->base.fb; src = & state->src; dst = & state->dst; clip = (struct drm_rect const *)(& state->clip); if ((unsigned long )intel_crtc == (unsigned long )((struct intel_crtc *)0)) { __mptr___1 = (struct drm_crtc const *)plane->crtc; intel_crtc = (struct intel_crtc *)__mptr___1; } else { intel_crtc = intel_crtc; } if ((unsigned long )state->base.state != (unsigned long )((struct drm_atomic_state *)0)) { tmp = intel_atomic_get_crtc_state___0(state->base.state, intel_crtc); crtc_state = tmp; } else { crtc_state = (struct intel_crtc_state *)0; } if ((unsigned long )fb == (unsigned long )((struct drm_framebuffer *)0)) { state->visible = 0; goto finish; } else { } if ((int )intel_plane->pipe != (int )intel_crtc->pipe) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_check_sprite_plane", "Wrong plane <-> crtc mapping\n"); } else { } return (-22); } else { } if ((fb->width <= 2U || fb->height <= 2U) || fb->pitches[0] > 16384U) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_check_sprite_plane", "Unsuitable framebuffer for plane\n"); } else { } return (-22); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { if (intel_plane->ckey.flags == 1U) { can_scale = 1; min_scale = 1; max_scale = skl_max_scale(intel_crtc, crtc_state); } else { can_scale = 0; min_scale = 65536; max_scale = 65536; } } else { can_scale = intel_plane->can_scale; max_scale = intel_plane->max_downscale << 16; min_scale = (int )intel_plane->can_scale ? 1 : 65536; } drm_rect_rotate(src, (int )(fb->width << 16), (int )(fb->height << 16), state->base.rotation); hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale); tmp___2 = ldv__builtin_expect(hscale < 0, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sprite.c"), "i" (810), "i" (12UL)); ldv_48428: ; goto ldv_48428; } else { } vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale); tmp___3 = ldv__builtin_expect(vscale < 0, 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sprite.c"), "i" (813), "i" (12UL)); ldv_48429: ; goto ldv_48429; } else { } state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale); crtc_x = dst->x1; crtc_y = dst->y1; tmp___4 = drm_rect_width((struct drm_rect const *)dst); crtc_w = (unsigned int )tmp___4; tmp___5 = drm_rect_height((struct drm_rect const *)dst); crtc_h = (unsigned int )tmp___5; if ((int )state->visible) { hscale = drm_rect_calc_hscale((struct drm_rect const *)src, (struct drm_rect const *)dst, min_scale, max_scale); if (hscale < 0) { tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_check_sprite_plane", "Horizontal scaling factor out of limits\n"); } else { } drm_rect_debug_print((struct drm_rect const *)src, 1); drm_rect_debug_print((struct drm_rect const *)dst, 0); return (hscale); } else { } vscale = drm_rect_calc_vscale((struct drm_rect const *)src, (struct drm_rect const *)dst, min_scale, max_scale); if (vscale < 0) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_check_sprite_plane", "Vertical scaling factor out of limits\n"); } else { } drm_rect_debug_print((struct drm_rect const *)src, 1); drm_rect_debug_print((struct drm_rect const *)dst, 0); return (vscale); } else { } tmp___8 = drm_rect_height((struct drm_rect const *)dst); tmp___9 = drm_rect_height((struct drm_rect const *)src); tmp___10 = drm_rect_width((struct drm_rect const *)dst); tmp___11 = drm_rect_width((struct drm_rect const *)src); drm_rect_adjust_size(src, tmp___10 * hscale - tmp___11, tmp___8 * vscale - tmp___9); drm_rect_rotate_inv(src, (int )(fb->width << 16), (int )(fb->height << 16), state->base.rotation); __ret_warn_on = ((src->x1 < (int )state->base.src_x || src->y1 < (int )state->base.src_y) || (uint32_t )src->x2 > state->base.src_x + state->base.src_w) || (uint32_t )src->y2 > state->base.src_y + state->base.src_h; tmp___12 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___12 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sprite.c", 854, "WARN_ON(src->x1 < (int) state->base.src_x || src->y1 < (int) state->base.src_y || src->x2 > (int) state->base.src_x + state->base.src_w || src->y2 > (int) state->base.src_y + state->base.src_h)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); src_x = (uint32_t )(src->x1 >> 16); tmp___13 = drm_rect_width((struct drm_rect const *)src); src_w = (uint32_t )(tmp___13 >> 16); src_y = (uint32_t )(src->y1 >> 16); tmp___14 = drm_rect_height((struct drm_rect const *)src); src_h = (uint32_t )(tmp___14 >> 16); tmp___15 = format_is_yuv(fb->pixel_format); if ((int )tmp___15) { src_x = src_x & 4294967294U; src_w = src_w & 4294967294U; if (! can_scale) { crtc_w = crtc_w & 4294967294U; } else { } if (crtc_w == 0U) { state->visible = 0; } else { } } else { } } else { } if ((int )state->visible && (src_w != crtc_w || src_h != crtc_h)) { __ret_warn_on___0 = ! can_scale; tmp___16 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___16 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sprite.c", 887, "WARN_ON(!can_scale)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (crtc_w <= 2U || crtc_h <= 2U) { state->visible = 0; } else { } if (src_w <= 2U || src_h <= 2U) { state->visible = 0; } else { } pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); width_bytes = (src_x * (uint32_t )pixel_size & 63U) + src_w * (uint32_t )pixel_size; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 8U && (((src_w > 2048U || src_h > 2048U) || width_bytes > 4096U) || fb->pitches[0] > 4096U)) { tmp___17 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___17 != 0L) { drm_ut_debug_printk("intel_check_sprite_plane", "Source dimensions exceed hardware limits\n"); } else { } return (-22); } else { } } else { } if ((int )state->visible) { src->x1 = (int )(src_x << 16); src->x2 = (int )((src_x + src_w) << 16); src->y1 = (int )(src_y << 16); src->y2 = (int )((src_y + src_h) << 16); } else { } dst->x1 = crtc_x; dst->x2 = (int )((unsigned int )crtc_x + crtc_w); dst->y1 = crtc_y; dst->y2 = (int )((unsigned int )crtc_y + crtc_h); finish: ; if ((int )intel_crtc->active) { intel_crtc->atomic.fb_bits = intel_crtc->atomic.fb_bits | (unsigned int )(1 << ((int )intel_crtc->pipe * 4 + 2)); tmp___18 = intel_wm_need_update(plane, & state->base); if ((int )tmp___18) { intel_crtc->atomic.update_wm = 1; } else { } if (! state->visible) { intel_crtc->atomic.wait_vblank = 1; tmp___19 = drm_plane_index(plane); intel_crtc->atomic.update_sprite_watermarks = intel_crtc->atomic.update_sprite_watermarks | (unsigned int )(1 << (int )tmp___19); } else { } } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 8U) { ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane, state, 0); if (ret != 0) { return (ret); } else { } } else { } return (0); } } static void intel_commit_sprite_plane(struct drm_plane *plane , struct intel_plane_state *state ) { struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; struct drm_framebuffer *fb ; int crtc_x ; int crtc_y ; unsigned int crtc_w ; unsigned int crtc_h ; uint32_t src_x ; uint32_t src_y ; uint32_t src_w ; uint32_t src_h ; struct drm_crtc const *__mptr___0 ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { crtc = state->base.crtc; __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; fb = state->base.fb; crtc = (unsigned long )crtc == (unsigned long )((struct drm_crtc *)0) ? plane->crtc : crtc; __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; plane->fb = fb; if ((int )intel_crtc->active) { if ((int )state->visible) { crtc_x = state->dst.x1; crtc_y = state->dst.y1; tmp = drm_rect_width((struct drm_rect const *)(& state->dst)); crtc_w = (unsigned int )tmp; tmp___0 = drm_rect_height((struct drm_rect const *)(& state->dst)); crtc_h = (unsigned int )tmp___0; src_x = (uint32_t )(state->src.x1 >> 16); src_y = (uint32_t )(state->src.y1 >> 16); tmp___1 = drm_rect_width((struct drm_rect const *)(& state->src)); src_w = (uint32_t )(tmp___1 >> 16); tmp___2 = drm_rect_height((struct drm_rect const *)(& state->src)); src_h = (uint32_t )(tmp___2 >> 16); (*(intel_plane->update_plane))(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); } else { (*(intel_plane->disable_plane))(plane, crtc, 0); } } else { } return; } } int intel_sprite_set_colorkey(struct drm_device *dev , void *data , struct drm_file *file_priv ) { struct drm_intel_sprite_colorkey *set ; struct drm_plane *plane ; struct intel_plane *intel_plane ; int ret ; struct drm_i915_private *__p ; struct drm_plane const *__mptr ; struct drm_plane_state const *__mptr___0 ; struct drm_i915_private *__p___0 ; { set = (struct drm_intel_sprite_colorkey *)data; ret = 0; if ((set->flags & 6U) == 6U) { return (-22); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && (set->flags & 2U) != 0U) { return (-22); } else { } drm_modeset_lock_all(dev); plane = drm_plane_find(dev, set->plane_id); if ((unsigned long )plane == (unsigned long )((struct drm_plane *)0) || (unsigned int )plane->type != 0U) { ret = -2; goto out_unlock; } else { } __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 8U) { __mptr___0 = (struct drm_plane_state const *)plane->state; if (((struct intel_plane_state *)__mptr___0)->scaler_id >= 0) { drm_err("colorkey not allowed with scaler\n"); ret = -22; goto out_unlock; } else { } } else { } intel_plane->ckey = *set; intel_plane_restore(plane); out_unlock: drm_modeset_unlock_all(dev); return (ret); } } int intel_plane_restore(struct drm_plane *plane ) { int tmp ; { if ((unsigned long )plane->crtc == (unsigned long )((struct drm_crtc *)0) || (unsigned long )(plane->state)->fb == (unsigned long )((struct drm_framebuffer *)0)) { return (0); } else { } tmp = drm_plane_helper_update(plane, plane->crtc, (plane->state)->fb, (plane->state)->crtc_x, (plane->state)->crtc_y, (plane->state)->crtc_w, (plane->state)->crtc_h, (plane->state)->src_x, (plane->state)->src_y, (plane->state)->src_w, (plane->state)->src_h); return (tmp); } } static uint32_t const ilk_plane_formats[5U] = { 875713112U, 1448695129U, 1431918169U, 1498831189U, 1498765654U}; static uint32_t const snb_plane_formats[6U] = { 875709016U, 875713112U, 1448695129U, 1431918169U, 1498831189U, 1498765654U}; static uint32_t const vlv_plane_formats[11U] = { 909199186U, 875708993U, 875713089U, 875709016U, 875713112U, 808665688U, 808665665U, 1448695129U, 1431918169U, 1498831189U, 1498765654U}; static uint32_t skl_plane_formats[9U] = { 909199186U, 875708993U, 875713089U, 875709016U, 875713112U, 1448695129U, 1431918169U, 1498831189U, 1498765654U}; int intel_plane_init(struct drm_device *dev , enum pipe pipe , int plane ) { struct intel_plane *intel_plane ; struct intel_plane_state *state ; unsigned long possible_crtcs ; uint32_t const *plane_formats ; int num_plane_formats ; int ret ; struct drm_i915_private *__p ; void *tmp ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { return (-19); } else { } tmp = kzalloc(808UL, 208U); intel_plane = (struct intel_plane *)tmp; if ((unsigned long )intel_plane == (unsigned long )((struct intel_plane *)0)) { return (-12); } else { } state = intel_create_plane_state(& intel_plane->base); if ((unsigned long )state == (unsigned long )((struct intel_plane_state *)0)) { kfree((void const *)intel_plane); return (-12); } else { } intel_plane->base.state = & state->base; __p___0 = to_i915((struct drm_device const *)dev); switch ((int )__p___0->info.gen) { case 5: ; case 6: intel_plane->can_scale = 1; intel_plane->max_downscale = 16; intel_plane->update_plane = & ilk_update_plane; intel_plane->disable_plane = & ilk_disable_plane; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 6U) { plane_formats = (uint32_t const *)(& snb_plane_formats); num_plane_formats = 6; } else { plane_formats = (uint32_t const *)(& ilk_plane_formats); num_plane_formats = 5; } goto ldv_48535; case 7: ; case 8: __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { intel_plane->can_scale = 1; intel_plane->max_downscale = 2; } else { intel_plane->can_scale = 0; intel_plane->max_downscale = 1; } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { intel_plane->update_plane = & vlv_update_plane; intel_plane->disable_plane = & vlv_disable_plane; plane_formats = (uint32_t const *)(& vlv_plane_formats); num_plane_formats = 11; } else { intel_plane->update_plane = & ivb_update_plane; intel_plane->disable_plane = & ivb_disable_plane; plane_formats = (uint32_t const *)(& snb_plane_formats); num_plane_formats = 6; } goto ldv_48535; case 9: intel_plane->can_scale = 1; intel_plane->update_plane = & skl_update_plane; intel_plane->disable_plane = & skl_disable_plane; state->scaler_id = -1; plane_formats = (uint32_t const *)(& skl_plane_formats); num_plane_formats = 9; goto ldv_48535; default: kfree((void const *)intel_plane); return (-19); } ldv_48535: intel_plane->pipe = pipe; intel_plane->plane = plane; intel_plane->check_plane = & intel_check_sprite_plane; intel_plane->commit_plane = & intel_commit_sprite_plane; intel_plane->ckey.flags = 1U; possible_crtcs = (unsigned long )(1 << (int )pipe); ret = drm_universal_plane_init(dev, & intel_plane->base, possible_crtcs, & intel_plane_funcs, plane_formats, (uint32_t )num_plane_formats, 0); if (ret != 0) { kfree((void const *)intel_plane); goto out; } else { } intel_create_rotation_property(dev, intel_plane); drm_plane_helper_add(& intel_plane->base, & intel_plane_helper_funcs); out: ; return (ret); } } bool ldv_queue_work_on_665(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_666(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_667(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_668(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_669(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_679(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_681(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_680(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_683(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_682(struct workqueue_struct *ldv_func_arg1 ) ; extern acpi_status acpi_get_name(acpi_handle , u32 , struct acpi_buffer * ) ; __inline static void acpi_os_free(void *memory ) { { kfree((void const *)memory); return; } } extern bool acpi_check_dsm(acpi_handle , u8 const * , int , u64 ) ; extern union acpi_object *acpi_evaluate_dsm(acpi_handle , u8 const * , int , int , union acpi_object * ) ; __inline static union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle , u8 const *uuid , int rev , int func , union acpi_object *argv4 , acpi_object_type type ) { union acpi_object *obj ; { obj = acpi_evaluate_dsm(handle, uuid, rev, func, argv4); if ((unsigned long )obj != (unsigned long )((union acpi_object *)0) && obj->type != type) { acpi_os_free((void *)obj); obj = (union acpi_object *)0; } else { } return (obj); } } __inline static bool is_acpi_node(struct fwnode_handle *fwnode ) { { return ((bool )((unsigned long )fwnode != (unsigned long )((struct fwnode_handle *)0) && (unsigned int )fwnode->type == 2U)); } } __inline static struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode ) { struct fwnode_handle const *__mptr ; struct acpi_device *tmp___0 ; bool tmp___1 ; { tmp___1 = is_acpi_node(fwnode); if ((int )tmp___1) { __mptr = (struct fwnode_handle const *)fwnode; tmp___0 = (struct acpi_device *)__mptr + 0xfffffffffffffff0UL; } else { tmp___0 = (struct acpi_device *)0; } return (tmp___0); } } __inline static acpi_handle acpi_device_handle(struct acpi_device *adev ) { { return ((unsigned long )adev != (unsigned long )((struct acpi_device *)0) ? adev->handle : (acpi_handle )0); } } void intel_register_dsm_handler(void) ; static struct intel_dsm_priv intel_dsm_priv ; static u8 const intel_dsm_guid[16U] = { 211U, 115U, 216U, 126U, 208U, 194U, 79U, 78U, 168U, 84U, 15U, 19U, 23U, 176U, 28U, 44U}; static char *intel_dsm_port_name(u8 id ) { { switch ((int )id) { case 0: ; return ((char *)"Reserved"); case 1: ; return ((char *)"Analog VGA"); case 2: ; return ((char *)"LVDS"); case 3: ; return ((char *)"Reserved"); case 4: ; return ((char *)"HDMI/DVI_B"); case 5: ; return ((char *)"HDMI/DVI_C"); case 6: ; return ((char *)"HDMI/DVI_D"); case 7: ; return ((char *)"DisplayPort_A"); case 8: ; return ((char *)"DisplayPort_B"); case 9: ; return ((char *)"DisplayPort_C"); case 10: ; return ((char *)"DisplayPort_D"); case 11: ; case 12: ; case 13: ; return ((char *)"Reserved"); case 14: ; return ((char *)"WiDi"); default: ; return ((char *)"bad type"); } } } static char *intel_dsm_mux_type(u8 type ) { { switch ((int )type) { case 0: ; return ((char *)"unknown"); case 1: ; return ((char *)"No MUX, iGPU only"); case 2: ; return ((char *)"No MUX, dGPU only"); case 3: ; return ((char *)"MUXed between iGPU and dGPU"); default: ; return ((char *)"bad type"); } } } static void intel_dsm_platform_mux_info(void) { int i ; union acpi_object *pkg ; union acpi_object *connector_count ; long tmp ; long tmp___0 ; union acpi_object *obj ; union acpi_object *connector_id ; union acpi_object *info ; long tmp___1 ; char *tmp___2 ; long tmp___3 ; char *tmp___4 ; long tmp___5 ; char *tmp___6 ; long tmp___7 ; char *tmp___8 ; long tmp___9 ; { pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, (u8 const *)(& intel_dsm_guid), 1, 1, (union acpi_object *)0, 4U); if ((unsigned long )pkg == (unsigned long )((union acpi_object *)0)) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsm_platform_mux_info", "failed to evaluate _DSM\n"); } else { } return; } else { } connector_count = pkg->package.elements; tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsm_platform_mux_info", "MUX info connectors: %lld\n", connector_count->integer.value); } else { } i = 1; goto ldv_51788; ldv_51787: obj = pkg->package.elements + (unsigned long )i; connector_id = obj->package.elements; info = obj->package.elements + 1UL; tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dsm_platform_mux_info", "Connector id: 0x%016llx\n", connector_id->integer.value); } else { } tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { tmp___2 = intel_dsm_port_name((int )*(info->buffer.pointer)); drm_ut_debug_printk("intel_dsm_platform_mux_info", " port id: %s\n", tmp___2); } else { } tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { tmp___4 = intel_dsm_mux_type((int )*(info->buffer.pointer + 1UL)); drm_ut_debug_printk("intel_dsm_platform_mux_info", " display mux info: %s\n", tmp___4); } else { } tmp___7 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___7 != 0L) { tmp___6 = intel_dsm_mux_type((int )*(info->buffer.pointer + 2UL)); drm_ut_debug_printk("intel_dsm_platform_mux_info", " aux/dc mux info: %s\n", tmp___6); } else { } tmp___9 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___9 != 0L) { tmp___8 = intel_dsm_mux_type((int )*(info->buffer.pointer + 3UL)); drm_ut_debug_printk("intel_dsm_platform_mux_info", " hpd mux info: %s\n", tmp___8); } else { } i = i + 1; ldv_51788: ; if ((u32 )i < pkg->package.count) { goto ldv_51787; } else { } acpi_os_free((void *)pkg); return; } } static bool intel_dsm_pci_probe(struct pci_dev *pdev ) { acpi_handle dhandle ; struct acpi_device *tmp ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; { tmp = to_acpi_node(pdev->dev.fwnode); dhandle = acpi_device_handle(tmp); if ((unsigned long )dhandle == (unsigned long )((acpi_handle )0)) { return (0); } else { } tmp___1 = acpi_check_dsm(dhandle, (u8 const *)(& intel_dsm_guid), 1, 2ULL); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsm_pci_probe", "no _DSM method for intel device\n"); } else { } return (0); } else { } intel_dsm_priv.dhandle = dhandle; intel_dsm_platform_mux_info(); return (1); } } static bool intel_dsm_detect(void) { char acpi_method_name[255U] ; unsigned int tmp ; struct acpi_buffer buffer ; struct pci_dev *pdev ; bool has_dsm ; int vga_count ; bool tmp___0 ; long tmp___1 ; { acpi_method_name[0] = 0; tmp = 1U; while (1) { if (tmp >= 255U) { break; } else { } acpi_method_name[tmp] = (char)0; tmp = tmp + 1U; } buffer.length = 255ULL; buffer.pointer = (void *)(& acpi_method_name); pdev = (struct pci_dev *)0; has_dsm = 0; vga_count = 0; goto ldv_51804; ldv_51803: vga_count = vga_count + 1; tmp___0 = intel_dsm_pci_probe(pdev); has_dsm = ((int )has_dsm | (int )tmp___0) != 0; ldv_51804: pdev = pci_get_class(196608U, pdev); if ((unsigned long )pdev != (unsigned long )((struct pci_dev *)0)) { goto ldv_51803; } else { } if (vga_count == 2 && (int )has_dsm) { acpi_get_name(intel_dsm_priv.dhandle, 0U, & buffer); tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dsm_detect", "VGA switcheroo: detected DSM switching method %s handle\n", (char *)(& acpi_method_name)); } else { } return (1); } else { } return (0); } } void intel_register_dsm_handler(void) { bool tmp ; int tmp___0 ; { tmp = intel_dsm_detect(); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } return; } } void intel_unregister_dsm_handler(void) { { return; } } bool ldv_queue_work_on_679(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_680(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_681(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_682(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_683(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern void __dynamic_dev_dbg(struct _ddebug * , struct device const * , char const * , ...) ; __inline static unsigned long arch_local_save_flags___18(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } bool ldv_queue_work_on_693(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_695(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_694(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_697(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_696(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_698(struct work_struct *ldv_func_arg1 ) ; __inline static bool queue_work___3(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_693(8192, wq, work); return (tmp); } } __inline static bool schedule_work___2(struct work_struct *work ) { bool tmp ; { tmp = queue_work___3(system_wq, work); return (tmp); } } extern void *ioremap_cache(resource_size_t , unsigned long ) ; void activate_work_17(struct work_struct *work , int state ) ; void call_and_disable_work_17(struct work_struct *work ) ; void disable_work_17(struct work_struct *work ) ; void call_and_disable_all_17(int state ) ; void invoke_work_17(void) ; extern acpi_status acpi_evaluate_integer(acpi_handle , acpi_string , struct acpi_object_list * , unsigned long long * ) ; extern int register_acpi_notifier(struct notifier_block * ) ; extern int unregister_acpi_notifier(struct notifier_block * ) ; extern int acpi_bus_get_device(acpi_handle , struct acpi_device ** ) ; __inline static void *acpi_os_ioremap(acpi_physical_address phys , acpi_size size ) { void *tmp ; { tmp = ioremap_cache(phys, (unsigned long )size); return (tmp); } } extern long acpi_is_video_device(acpi_handle ) ; extern enum acpi_backlight_type acpi_video_get_backlight_type(void) ; __inline static bool drm_can_sleep___9(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_43361; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_43361; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_43361; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_43361; default: __bad_percpu_size(); } ldv_43361: pscr_ret__ = pfo_ret__; goto ldv_43367; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_43371; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_43371; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_43371; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_43371; default: __bad_percpu_size(); } ldv_43371: pscr_ret__ = pfo_ret_____0; goto ldv_43367; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_43380; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_43380; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_43380; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_43380; default: __bad_percpu_size(); } ldv_43380: pscr_ret__ = pfo_ret_____1; goto ldv_43367; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_43389; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_43389; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_43389; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_43389; default: __bad_percpu_size(); } ldv_43389: pscr_ret__ = pfo_ret_____2; goto ldv_43367; default: __bad_size_call_parameter(); goto ldv_43367; } ldv_43367: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___18(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder ) ; void intel_panel_set_backlight_acpi(struct intel_connector *connector , u32 user_level , u32 user_max ) ; static int swsci(struct drm_device *dev , u32 function , u32 parm , u32 *parm_out ) { struct drm_i915_private *dev_priv ; struct opregion_swsci *swsci___0 ; u32 main_function ; u32 sub_function ; u32 scic ; u16 pci_swsci ; u32 dslp ; bool __print_once ; long tmp ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; bool tmp___2 ; long tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; swsci___0 = dev_priv->opregion.swsci; if ((unsigned long )swsci___0 == (unsigned long )((struct opregion_swsci *)0)) { return (-19); } else { } main_function = (function & 30U) >> 1; sub_function = (function & 65280U) >> 8; if (main_function == 6U) { if ((dev_priv->opregion.swsci_sbcb_sub_functions & (u32 )(1 << (int )sub_function)) == 0U) { return (-22); } else { } } else if (main_function == 4U) { if ((dev_priv->opregion.swsci_gbda_sub_functions & (u32 )(1 << (int )sub_function)) == 0U) { return (-22); } else { } } else { } dslp = ioread32((void *)(& swsci___0->dslp)); if (dslp == 0U) { dslp = 50U; } else if (dslp > 1500U) { if (! __print_once) { __print_once = 1; printk("\016[drm] ACPI BIOS requests an excessive sleep of %u ms, using %u ms instead\n", dslp, 1500); } else { } dslp = 1500U; } else { } scic = ioread32((void *)(& swsci___0->scic)); if ((int )scic & 1) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("swsci", "SWSCI request already in progress\n"); } else { } return (-16); } else { } scic = function | 1U; iowrite32(parm, (void *)(& swsci___0->parm)); iowrite32(scic, (void *)(& swsci___0->scic)); pci_read_config_word((struct pci_dev const *)dev->pdev, 232, & pci_swsci); if ((int )((short )pci_swsci) >= 0 || (int )pci_swsci & 1) { pci_swsci = (u16 )((unsigned int )pci_swsci | 32768U); pci_swsci = (unsigned int )pci_swsci & 65534U; pci_write_config_word((struct pci_dev const *)dev->pdev, 232, (int )pci_swsci); } else { } pci_swsci = (u16 )((unsigned int )pci_swsci | 1U); pci_write_config_word((struct pci_dev const *)dev->pdev, 232, (int )pci_swsci); tmp___1 = msecs_to_jiffies(dslp); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_51781; ldv_51780: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { scic = ioread32((void *)(& swsci___0->scic)); if ((int )scic & 1) { ret__ = -110; } else { } goto ldv_51779; } else { } tmp___2 = drm_can_sleep___9(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_51781: scic = ioread32((void *)(& swsci___0->scic)); if ((int )scic & 1) { goto ldv_51780; } else { } ldv_51779: ; if (ret__ != 0) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("swsci", "SWSCI request timed out\n"); } else { } return (-110); } else { } scic = (scic & 224U) >> 5; if (scic != 1U) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("swsci", "SWSCI request error %u\n", scic); } else { } return (-5); } else { } if ((unsigned long )parm_out != (unsigned long )((u32 *)0U)) { *parm_out = ioread32((void *)(& swsci___0->parm)); } else { } return (0); } } int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder , bool enable ) { struct drm_device *dev ; u32 parm ; u32 type ; u32 port ; struct drm_i915_private *__p ; enum port tmp ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; int tmp___3 ; { dev = intel_encoder->base.dev; parm = 0U; type = 0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { return (0); } else { } tmp = intel_ddi_get_encoder_port(intel_encoder); port = (u32 )tmp; if (port == 4U) { port = 0U; } else { parm = (u32 )(1 << (int )port) | parm; port = port + 1U; } if (! enable) { parm = parm | 1024U; } else { } switch ((unsigned int )intel_encoder->type) { case 1U: type = 0U; goto ldv_51798; case 10U: ; case 7U: ; case 6U: ; case 11U: type = 2U; goto ldv_51798; case 8U: type = 3U; goto ldv_51798; default: __ret_warn_once = 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_opregion.c", 363, "unsupported intel_encoder type %d\n", (unsigned int )intel_encoder->type); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return (-22); } ldv_51798: parm = (type << (int )(port * 3U + 16U)) | parm; tmp___3 = swsci(dev, 2060U, parm, (u32 *)0U); return (tmp___3); } } static struct __anonstruct_power_state_map_448 const power_state_map[5U] = { {0, 0U}, {1, 1U}, {2, 2U}, {3, 4U}, {4, 4U}}; int intel_opregion_notify_adapter(struct drm_device *dev , pci_power_t state ) { int i ; struct drm_i915_private *__p ; int tmp ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { return (0); } else { } i = 0; goto ldv_51828; ldv_51827: ; if ((int )power_state_map[i].pci_power_state == state) { tmp = swsci(dev, 1804U, power_state_map[i].parm, (u32 *)0U); return (tmp); } else { } i = i + 1; ldv_51828: ; if ((unsigned int )i <= 4U) { goto ldv_51827; } else { } return (-22); } } static u32 asle_set_backlight(struct drm_device *dev , u32 bclp ) { struct drm_i915_private *dev_priv ; struct intel_connector *intel_connector ; struct opregion_asle *asle ; long tmp ; long tmp___0 ; enum acpi_backlight_type tmp___1 ; long tmp___2 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; asle = dev_priv->opregion.asle; tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_set_backlight", "bclp = 0x%08x\n", bclp); } else { } tmp___1 = acpi_video_get_backlight_type(); if ((int )tmp___1 == 3) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("asle_set_backlight", "opregion backlight request ignored\n"); } else { } return (0U); } else { } if ((int )bclp >= 0) { return (4096U); } else { } bclp = bclp & 2147483647U; if (bclp > 255U) { return (4096U); } else { } drm_modeset_lock(& dev->mode_config.connection_mutex, (struct drm_modeset_acquire_ctx *)0); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("asle_set_backlight", "updating opregion backlight %d/255\n", bclp); } else { } __mptr = (struct list_head const *)dev->mode_config.connector_list.next; intel_connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_51843; ldv_51842: intel_panel_set_backlight_acpi(intel_connector, bclp, 255U); __mptr___0 = (struct list_head const *)intel_connector->base.head.next; intel_connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_51843: ; if ((unsigned long )(& intel_connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_51842; } else { } iowrite32((bclp * 100U + 254U) / 255U | 2147483648U, (void *)(& asle->cblv)); drm_modeset_unlock(& dev->mode_config.connection_mutex); return (0U); } } static u32 asle_set_als_illum(struct drm_device *dev , u32 alsi ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_set_als_illum", "Illum is not supported\n"); } else { } return (1024U); } } static u32 asle_set_pwm_freq(struct drm_device *dev , u32 pfmb ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_set_pwm_freq", "PWM freq is not supported\n"); } else { } return (65536U); } } static u32 asle_set_pfit(struct drm_device *dev , u32 pfit ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_set_pfit", "Pfit is not supported\n"); } else { } return (16384U); } } static u32 asle_set_supported_rotation_angles(struct drm_device *dev , u32 srot ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_set_supported_rotation_angles", "SROT is not supported\n"); } else { } return (262144U); } } static u32 asle_set_button_array(struct drm_device *dev , u32 iuer ) { long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; { if (iuer == 0U) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_set_button_array", "Button array event is not supported (nothing)\n"); } else { } } else { } if ((iuer & 16U) != 0U) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("asle_set_button_array", "Button array event is not supported (rotation lock)\n"); } else { } } else { } if ((iuer & 8U) != 0U) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("asle_set_button_array", "Button array event is not supported (volume down)\n"); } else { } } else { } if ((iuer & 4U) != 0U) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("asle_set_button_array", "Button array event is not supported (volume up)\n"); } else { } } else { } if ((iuer & 2U) != 0U) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("asle_set_button_array", "Button array event is not supported (windows)\n"); } else { } } else { } if ((int )iuer & 1) { tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("asle_set_button_array", "Button array event is not supported (power)\n"); } else { } } else { } return (1048576U); } } static u32 asle_set_convertible(struct drm_device *dev , u32 iuer ) { long tmp ; long tmp___0 ; { if ((iuer & 64U) != 0U) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_set_convertible", "Convertible is not supported (clamshell)\n"); } else { } } else { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("asle_set_convertible", "Convertible is not supported (slate)\n"); } else { } } return (4194304U); } } static u32 asle_set_docking(struct drm_device *dev , u32 iuer ) { long tmp ; long tmp___0 ; { if ((iuer & 128U) != 0U) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_set_docking", "Docking is not supported (docked)\n"); } else { } } else { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("asle_set_docking", "Docking is not supported (undocked)\n"); } else { } } return (16777216U); } } static u32 asle_isct_state(struct drm_device *dev ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_isct_state", "ISCT is not supported\n"); } else { } return (67108864U); } } static void asle_work(struct work_struct *work ) { struct intel_opregion *opregion ; struct work_struct const *__mptr ; struct drm_i915_private *dev_priv ; struct intel_opregion const *__mptr___0 ; struct drm_device *dev ; struct opregion_asle *asle ; u32 aslc_stat ; u32 aslc_req ; long tmp ; unsigned int tmp___0 ; u32 tmp___1 ; unsigned int tmp___2 ; u32 tmp___3 ; unsigned int tmp___4 ; u32 tmp___5 ; unsigned int tmp___6 ; u32 tmp___7 ; unsigned int tmp___8 ; u32 tmp___9 ; unsigned int tmp___10 ; u32 tmp___11 ; unsigned int tmp___12 ; u32 tmp___13 ; unsigned int tmp___14 ; u32 tmp___15 ; u32 tmp___16 ; { __mptr = (struct work_struct const *)work; opregion = (struct intel_opregion *)__mptr + 0xffffffffffffffc8UL; __mptr___0 = (struct intel_opregion const *)opregion; dev_priv = (struct drm_i915_private *)__mptr___0 + 0xffffffffffff5f58UL; dev = dev_priv->dev; asle = dev_priv->opregion.asle; aslc_stat = 0U; if ((unsigned long )asle == (unsigned long )((struct opregion_asle *)0)) { return; } else { } aslc_req = ioread32((void *)(& asle->aslc)); if ((aslc_req & 511U) == 0U) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("asle_work", "No request on ASLC interrupt 0x%08x\n", aslc_req); } else { } return; } else { } if ((int )aslc_req & 1) { tmp___0 = ioread32((void *)(& asle->alsi)); tmp___1 = asle_set_als_illum(dev, tmp___0); aslc_stat = tmp___1 | aslc_stat; } else { } if ((aslc_req & 2U) != 0U) { tmp___2 = ioread32((void *)(& asle->bclp)); tmp___3 = asle_set_backlight(dev, tmp___2); aslc_stat = tmp___3 | aslc_stat; } else { } if ((aslc_req & 4U) != 0U) { tmp___4 = ioread32((void *)(& asle->pfit)); tmp___5 = asle_set_pfit(dev, tmp___4); aslc_stat = tmp___5 | aslc_stat; } else { } if ((aslc_req & 8U) != 0U) { tmp___6 = ioread32((void *)(& asle->pfmb)); tmp___7 = asle_set_pwm_freq(dev, tmp___6); aslc_stat = tmp___7 | aslc_stat; } else { } if ((aslc_req & 16U) != 0U) { tmp___8 = ioread32((void *)(& asle->srot)); tmp___9 = asle_set_supported_rotation_angles(dev, tmp___8); aslc_stat = tmp___9 | aslc_stat; } else { } if ((aslc_req & 32U) != 0U) { tmp___10 = ioread32((void *)(& asle->iuer)); tmp___11 = asle_set_button_array(dev, tmp___10); aslc_stat = tmp___11 | aslc_stat; } else { } if ((aslc_req & 64U) != 0U) { tmp___12 = ioread32((void *)(& asle->iuer)); tmp___13 = asle_set_convertible(dev, tmp___12); aslc_stat = tmp___13 | aslc_stat; } else { } if ((aslc_req & 128U) != 0U) { tmp___14 = ioread32((void *)(& asle->iuer)); tmp___15 = asle_set_docking(dev, tmp___14); aslc_stat = tmp___15 | aslc_stat; } else { } if ((aslc_req & 256U) != 0U) { tmp___16 = asle_isct_state(dev); aslc_stat = tmp___16 | aslc_stat; } else { } iowrite32(aslc_stat, (void *)(& asle->aslc)); return; } } void intel_opregion_asle_intr(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->opregion.asle != (unsigned long )((struct opregion_asle *)0)) { schedule_work___2(& dev_priv->opregion.asle_work); } else { } return; } } static struct intel_opregion *system_opregion ; static int intel_opregion_video_event(struct notifier_block *nb , unsigned long val , void *data ) { struct opregion_acpi *acpi ; struct acpi_bus_event *event ; int ret ; int tmp ; unsigned int tmp___0 ; { event = (struct acpi_bus_event *)data; ret = 1; tmp = strcmp((char const *)(& event->device_class), "video"); if (tmp != 0) { return (0); } else { } if ((unsigned long )system_opregion == (unsigned long )((struct intel_opregion *)0)) { return (0); } else { } acpi = system_opregion->acpi; if (event->type == 128U) { tmp___0 = ioread32((void *)(& acpi->cevt)); if ((tmp___0 & 1U) == 0U) { ret = 32770; } else { } } else { } iowrite32(0U, (void *)(& acpi->csts)); return (ret); } } static struct notifier_block intel_opregion_notifier = {& intel_opregion_video_event, 0, 0}; static void intel_didl_outputs(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_opregion *opregion ; struct drm_connector *connector ; acpi_handle handle ; struct acpi_device *acpi_dev ; struct acpi_device *acpi_cdev ; struct acpi_device *acpi_video_bus ; unsigned long long device_id ; acpi_status status ; u32 temp ; int i ; struct acpi_device *tmp ; int tmp___0 ; struct list_head const *__mptr ; long tmp___1 ; struct list_head const *__mptr___0 ; long tmp___2 ; struct list_head const *__mptr___1 ; struct _ddebug descriptor ; long tmp___3 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; int output_type ; struct _ddebug descriptor___0 ; long tmp___4 ; struct list_head const *__mptr___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; opregion = & dev_priv->opregion; acpi_video_bus = (struct acpi_device *)0; i = 0; tmp = to_acpi_node((dev->pdev)->dev.fwnode); handle = acpi_device_handle(tmp); if ((unsigned long )handle == (unsigned long )((acpi_handle )0)) { return; } else { tmp___0 = acpi_bus_get_device(handle, & acpi_dev); if (tmp___0 != 0) { return; } else { } } tmp___2 = acpi_is_video_device(handle); if (tmp___2 != 0L) { acpi_video_bus = acpi_dev; } else { __mptr = (struct list_head const *)acpi_dev->children.next; acpi_cdev = (struct acpi_device *)__mptr + 0xffffffffffffffc8UL; goto ldv_51932; ldv_51931: tmp___1 = acpi_is_video_device(acpi_cdev->handle); if (tmp___1 != 0L) { acpi_video_bus = acpi_cdev; goto ldv_51930; } else { } __mptr___0 = (struct list_head const *)acpi_cdev->node.next; acpi_cdev = (struct acpi_device *)__mptr___0 + 0xffffffffffffffc8UL; ldv_51932: ; if ((unsigned long )(& acpi_cdev->node) != (unsigned long )(& acpi_dev->children)) { goto ldv_51931; } else { } ldv_51930: ; } if ((unsigned long )acpi_video_bus == (unsigned long )((struct acpi_device *)0)) { printk("\fi915: No ACPI video bus found\n"); return; } else { } __mptr___1 = (struct list_head const *)acpi_video_bus->children.next; acpi_cdev = (struct acpi_device *)__mptr___1 + 0xffffffffffffffc8UL; goto ldv_51941; ldv_51940: ; if (i > 7) { descriptor.modname = "i915"; descriptor.function = "intel_didl_outputs"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_opregion.c"; descriptor.format = "More than 8 outputs detected via ACPI\n"; descriptor.lineno = 649U; descriptor.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_dev_dbg(& descriptor, (struct device const *)(& (dev->pdev)->dev), "More than 8 outputs detected via ACPI\n"); } else { } return; } else { } status = acpi_evaluate_integer(acpi_cdev->handle, (char *)"_ADR", (struct acpi_object_list *)0, & device_id); if (status == 0U) { if (device_id == 0ULL) { goto blind_set; } else { } iowrite32((unsigned int )device_id & 3855U, (void *)(& (opregion->acpi)->didl) + (unsigned long )i); i = i + 1; } else { } __mptr___2 = (struct list_head const *)acpi_cdev->node.next; acpi_cdev = (struct acpi_device *)__mptr___2 + 0xffffffffffffffc8UL; ldv_51941: ; if ((unsigned long )(& acpi_cdev->node) != (unsigned long )(& acpi_video_bus->children)) { goto ldv_51940; } else { } end: ; if (i <= 7) { iowrite32(0U, (void *)(& (opregion->acpi)->didl) + (unsigned long )i); } else { } return; blind_set: i = 0; __mptr___3 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct drm_connector *)__mptr___3 + 0xffffffffffffffe8UL; goto ldv_51964; ldv_51963: output_type = 0; if (i > 7) { descriptor___0.modname = "i915"; descriptor___0.function = "intel_didl_outputs"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_opregion.c"; descriptor___0.format = "More than 8 outputs in connector list\n"; descriptor___0.lineno = 676U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_dev_dbg(& descriptor___0, (struct device const *)(& (dev->pdev)->dev), "More than 8 outputs in connector list\n"); } else { } return; } else { } switch (connector->connector_type) { case 1: ; case 4: output_type = 256; goto ldv_51952; case 5: ; case 6: ; case 8: ; case 9: output_type = 512; goto ldv_51952; case 2: ; case 3: ; case 10: ; case 11: ; case 12: output_type = 768; goto ldv_51952; case 7: output_type = 1024; goto ldv_51952; } ldv_51952: temp = ioread32((void *)(& (opregion->acpi)->didl) + (unsigned long )i); iowrite32(((temp | (u32 )output_type) | (u32 )i) | 2147483648U, (void *)(& (opregion->acpi)->didl) + (unsigned long )i); i = i + 1; __mptr___4 = (struct list_head const *)connector->head.next; connector = (struct drm_connector *)__mptr___4 + 0xffffffffffffffe8UL; ldv_51964: ; if ((unsigned long )(& connector->head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_51963; } else { } goto end; } } static void intel_setup_cadls(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_opregion *opregion ; int i ; u32 disp_id ; { dev_priv = (struct drm_i915_private *)dev->dev_private; opregion = & dev_priv->opregion; i = 0; ldv_51973: disp_id = ioread32((void *)(& (opregion->acpi)->didl) + (unsigned long )i); iowrite32(disp_id, (void *)(& (opregion->acpi)->cadl) + (unsigned long )i); i = i + 1; if (i <= 7 && disp_id != 0U) { goto ldv_51973; } else { } return; } } void intel_opregion_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_opregion *opregion ; { dev_priv = (struct drm_i915_private *)dev->dev_private; opregion = & dev_priv->opregion; if ((unsigned long )opregion->header == (unsigned long )((struct opregion_header *)0)) { return; } else { } if ((unsigned long )opregion->acpi != (unsigned long )((struct opregion_acpi *)0)) { intel_didl_outputs(dev); intel_setup_cadls(dev); iowrite32(0U, (void *)(& (opregion->acpi)->csts)); iowrite32(1U, (void *)(& (opregion->acpi)->drdy)); system_opregion = opregion; register_acpi_notifier(& intel_opregion_notifier); } else { } if ((unsigned long )opregion->asle != (unsigned long )((struct opregion_asle *)0)) { iowrite32(2U, (void *)(& (opregion->asle)->tche)); iowrite32(1U, (void *)(& (opregion->asle)->ardy)); } else { } return; } } void intel_opregion_fini(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_opregion *opregion ; { dev_priv = (struct drm_i915_private *)dev->dev_private; opregion = & dev_priv->opregion; if ((unsigned long )opregion->header == (unsigned long )((struct opregion_header *)0)) { return; } else { } if ((unsigned long )opregion->asle != (unsigned long )((struct opregion_asle *)0)) { iowrite32(0U, (void *)(& (opregion->asle)->ardy)); } else { } ldv_cancel_work_sync_698(& dev_priv->opregion.asle_work); if ((unsigned long )opregion->acpi != (unsigned long )((struct opregion_acpi *)0)) { iowrite32(0U, (void *)(& (opregion->acpi)->drdy)); system_opregion = (struct intel_opregion *)0; unregister_acpi_notifier(& intel_opregion_notifier); } else { } iounmap((void volatile *)opregion->header); opregion->header = (struct opregion_header *)0; opregion->acpi = (struct opregion_acpi *)0; opregion->swsci = (struct opregion_swsci *)0; opregion->asle = (struct opregion_asle *)0; opregion->vbt = (void *)0; opregion->lid_state = (u32 *)0U; return; } } static void swsci_setup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_opregion *opregion ; bool requested_callbacks ; u32 tmp ; int tmp___0 ; int tmp___1 ; u32 low ; u32 high ; u32 req ; long tmp___2 ; int tmp___3 ; long tmp___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; opregion = & dev_priv->opregion; requested_callbacks = 0; opregion->swsci_gbda_sub_functions = 1U; opregion->swsci_sbcb_sub_functions = 1U; tmp___0 = swsci(dev, 8U, 0U, & tmp); if (tmp___0 == 0) { tmp = tmp << 1; opregion->swsci_gbda_sub_functions = opregion->swsci_gbda_sub_functions | tmp; } else { } tmp___1 = swsci(dev, 264U, 0U, & tmp); if (tmp___1 == 0) { opregion->swsci_sbcb_sub_functions = opregion->swsci_sbcb_sub_functions | tmp; requested_callbacks = 1; } else { } tmp___3 = swsci(dev, 12U, 0U, & tmp); if (tmp___3 == 0) { low = tmp & 2047U; high = tmp & 4294963200U; tmp = ((high << 4) | (low << 1)) | 1U; if ((int )requested_callbacks) { req = opregion->swsci_sbcb_sub_functions; if ((req & tmp) != req) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("swsci_setup", "SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp); } else { } } else { } } else { opregion->swsci_sbcb_sub_functions = opregion->swsci_sbcb_sub_functions | tmp; } } else { } tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("swsci_setup", "SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n", opregion->swsci_gbda_sub_functions, opregion->swsci_sbcb_sub_functions); } else { } return; } } int intel_opregion_setup(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_opregion *opregion ; void *base ; u32 asls ; u32 mboxes ; char buf[17U] ; int err ; long tmp ; long tmp___0 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; long tmp___1 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; opregion = & dev_priv->opregion; err = 0; pci_read_config_dword((struct pci_dev const *)dev->pdev, 252, & asls); tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_opregion_setup", "graphic opregion physical addr: 0x%x\n", asls); } else { } if (asls == 0U) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_opregion_setup", "ACPI OpRegion not supported!\n"); } else { } return (-524); } else { } __init_work(& opregion->asle_work, 0); __constr_expr_0___0.counter = 137438953408L; opregion->asle_work.data = __constr_expr_0___0; lockdep_init_map(& opregion->asle_work.lockdep_map, "(&opregion->asle_work)", & __key, 0); INIT_LIST_HEAD(& opregion->asle_work.entry); opregion->asle_work.func = & asle_work; base = acpi_os_ioremap((acpi_physical_address )asls, 8192ULL); if ((unsigned long )base == (unsigned long )((void *)0)) { return (-12); } else { } memcpy_fromio((void *)(& buf), (void const volatile *)base, 17UL); tmp___2 = memcmp((void const *)(& buf), (void const *)"IntelGraphicsMem", 16UL); if (tmp___2 != 0) { tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_opregion_setup", "opregion signature mismatch\n"); } else { } err = -22; goto err_out; } else { } opregion->header = (struct opregion_header *)base; opregion->vbt = base + 1024UL; opregion->lid_state = (u32 *)base + 428U; mboxes = ioread32((void *)(& (opregion->header)->mboxes)); if ((int )mboxes & 1) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_opregion_setup", "Public ACPI methods supported\n"); } else { } opregion->acpi = (struct opregion_acpi *)base + 256U; } else { } if ((mboxes & 2U) != 0U) { tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_opregion_setup", "SWSCI supported\n"); } else { } opregion->swsci = (struct opregion_swsci *)base + 512U; swsci_setup(dev); } else { } if ((mboxes & 4U) != 0U) { tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_opregion_setup", "ASLE supported\n"); } else { } opregion->asle = (struct opregion_asle *)base + 768U; iowrite32(0U, (void *)(& (opregion->asle)->ardy)); } else { } return (0); err_out: iounmap((void volatile *)base); return (err); } } void activate_work_17(struct work_struct *work , int state ) { { if (ldv_work_17_0 == 0) { ldv_work_struct_17_0 = work; ldv_work_17_0 = state; return; } else { } if (ldv_work_17_1 == 0) { ldv_work_struct_17_1 = work; ldv_work_17_1 = state; return; } else { } if (ldv_work_17_2 == 0) { ldv_work_struct_17_2 = work; ldv_work_17_2 = state; return; } else { } if (ldv_work_17_3 == 0) { ldv_work_struct_17_3 = work; ldv_work_17_3 = state; return; } else { } return; } } void call_and_disable_work_17(struct work_struct *work ) { { if ((ldv_work_17_0 == 2 || ldv_work_17_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_17_0) { asle_work(work); ldv_work_17_0 = 1; return; } else { } if ((ldv_work_17_1 == 2 || ldv_work_17_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_17_1) { asle_work(work); ldv_work_17_1 = 1; return; } else { } if ((ldv_work_17_2 == 2 || ldv_work_17_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_17_2) { asle_work(work); ldv_work_17_2 = 1; return; } else { } if ((ldv_work_17_3 == 2 || ldv_work_17_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_17_3) { asle_work(work); ldv_work_17_3 = 1; return; } else { } return; } } void disable_work_17(struct work_struct *work ) { { if ((ldv_work_17_0 == 3 || ldv_work_17_0 == 2) && (unsigned long )ldv_work_struct_17_0 == (unsigned long )work) { ldv_work_17_0 = 1; } else { } if ((ldv_work_17_1 == 3 || ldv_work_17_1 == 2) && (unsigned long )ldv_work_struct_17_1 == (unsigned long )work) { ldv_work_17_1 = 1; } else { } if ((ldv_work_17_2 == 3 || ldv_work_17_2 == 2) && (unsigned long )ldv_work_struct_17_2 == (unsigned long )work) { ldv_work_17_2 = 1; } else { } if ((ldv_work_17_3 == 3 || ldv_work_17_3 == 2) && (unsigned long )ldv_work_struct_17_3 == (unsigned long )work) { ldv_work_17_3 = 1; } else { } return; } } void work_init_17(void) { { ldv_work_17_0 = 0; ldv_work_17_1 = 0; ldv_work_17_2 = 0; ldv_work_17_3 = 0; return; } } void call_and_disable_all_17(int state ) { { if (ldv_work_17_0 == state) { call_and_disable_work_17(ldv_work_struct_17_0); } else { } if (ldv_work_17_1 == state) { call_and_disable_work_17(ldv_work_struct_17_1); } else { } if (ldv_work_17_2 == state) { call_and_disable_work_17(ldv_work_struct_17_2); } else { } if (ldv_work_17_3 == state) { call_and_disable_work_17(ldv_work_struct_17_3); } else { } return; } } void invoke_work_17(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_17_0 == 2 || ldv_work_17_0 == 3) { ldv_work_17_0 = 4; asle_work(ldv_work_struct_17_0); ldv_work_17_0 = 1; } else { } goto ldv_52034; case 1: ; if (ldv_work_17_1 == 2 || ldv_work_17_1 == 3) { ldv_work_17_1 = 4; asle_work(ldv_work_struct_17_0); ldv_work_17_1 = 1; } else { } goto ldv_52034; case 2: ; if (ldv_work_17_2 == 2 || ldv_work_17_2 == 3) { ldv_work_17_2 = 4; asle_work(ldv_work_struct_17_0); ldv_work_17_2 = 1; } else { } goto ldv_52034; case 3: ; if (ldv_work_17_3 == 2 || ldv_work_17_3 == 3) { ldv_work_17_3 = 4; asle_work(ldv_work_struct_17_0); ldv_work_17_3 = 1; } else { } goto ldv_52034; default: ldv_stop(); } ldv_52034: ; return; } } void ldv_main_exported_67(void) { struct notifier_block *ldvarg133 ; void *tmp ; unsigned long ldvarg135 ; void *ldvarg134 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(24UL); ldvarg133 = (struct notifier_block *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg134 = tmp___0; ldv_memset((void *)(& ldvarg135), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_67 == 1) { intel_opregion_video_event(ldvarg133, ldvarg135, ldvarg134); ldv_state_variable_67 = 1; } else { } goto ldv_52046; default: ldv_stop(); } ldv_52046: ; return; } } bool ldv_queue_work_on_693(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_694(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_695(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_696(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_697(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_cancel_work_sync_698(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(ldv_func_arg1); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; bool ldv_queue_work_on_709(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_711(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_710(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_713(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_712(struct workqueue_struct *ldv_func_arg1 ) ; extern bool flush_work(struct work_struct * ) ; bool ldv_flush_work_714(struct work_struct *ldv_func_arg1 ) ; bool ldv_flush_work_715(struct work_struct *ldv_func_arg1 ) ; __inline static bool queue_work___4(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_709(8192, wq, work); return (tmp); } } __inline static bool schedule_work___3(struct work_struct *work ) { bool tmp ; { tmp = queue_work___4(system_wq, work); return (tmp); } } __inline static int kref_sub___15(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put___15(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub___15(kref, 1U, release); return (tmp); } } void invoke_work_18(void) ; extern void async_synchronize_full(void) ; extern void console_lock(void) ; extern int console_trylock(void) ; extern void console_unlock(void) ; __inline static struct apertures_struct *alloc_apertures(unsigned int max_num ) { struct apertures_struct *a ; void *tmp ; { tmp = kzalloc((unsigned long )max_num * 16UL + 8UL, 208U); a = (struct apertures_struct *)tmp; if ((unsigned long )a == (unsigned long )((struct apertures_struct *)0)) { return ((struct apertures_struct *)0); } else { } a->count = max_num; return (a); } } extern void cfb_fillrect(struct fb_info * , struct fb_fillrect const * ) ; extern void cfb_copyarea(struct fb_info * , struct fb_copyarea const * ) ; extern void cfb_imageblit(struct fb_info * , struct fb_image const * ) ; extern int unregister_framebuffer(struct fb_info * ) ; extern void fb_set_suspend(struct fb_info * , int ) ; extern struct fb_info *framebuffer_alloc(size_t , struct device * ) ; extern void framebuffer_release(struct fb_info * ) ; extern int fb_alloc_cmap(struct fb_cmap * , int , int ) ; extern void fb_dealloc_cmap(struct fb_cmap * ) ; extern void vga_switcheroo_client_fb_set(struct pci_dev * , struct fb_info * ) ; extern void drm_framebuffer_remove(struct drm_framebuffer * ) ; extern void drm_fb_helper_prepare(struct drm_device * , struct drm_fb_helper * , struct drm_fb_helper_funcs const * ) ; extern int drm_fb_helper_init(struct drm_device * , struct drm_fb_helper * , int , int ) ; extern void drm_fb_helper_fini(struct drm_fb_helper * ) ; extern int drm_fb_helper_blank(int , struct fb_info * ) ; extern int drm_fb_helper_pan_display(struct fb_var_screeninfo * , struct fb_info * ) ; extern int drm_fb_helper_set_par(struct fb_info * ) ; extern int drm_fb_helper_check_var(struct fb_var_screeninfo * , struct fb_info * ) ; extern bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper * ) ; extern void drm_fb_helper_fill_var(struct fb_info * , struct drm_fb_helper * , uint32_t , uint32_t ) ; extern void drm_fb_helper_fill_fix(struct fb_info * , uint32_t , uint32_t ) ; extern int drm_fb_helper_setcmap(struct fb_cmap * , struct fb_info * ) ; extern int drm_fb_helper_hotplug_event(struct drm_fb_helper * ) ; extern int drm_fb_helper_initial_config(struct drm_fb_helper * , int ) ; extern int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper * ) ; extern int drm_fb_helper_debug_enter(struct fb_info * ) ; extern int drm_fb_helper_debug_leave(struct fb_info * ) ; extern struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector * , int , int ) ; extern struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector * , int , int ) ; __inline static void drm_gem_object_unreference___14(struct drm_gem_object *obj ) { { if ((unsigned long )obj != (unsigned long )((struct drm_gem_object *)0)) { kref_put___15(& obj->refcount, & drm_gem_object_free); } else { } return; } } int intel_fbdev_init(struct drm_device *dev ) ; void intel_fbdev_initial_config(void *data , async_cookie_t cookie ) ; void intel_fbdev_fini(struct drm_device *dev ) ; void intel_fbdev_restore_mode(struct drm_device *dev ) ; static int intel_fbdev_set_par(struct fb_info *info ) { struct drm_fb_helper *fb_helper ; struct intel_fbdev *ifbdev ; struct drm_fb_helper const *__mptr ; int ret ; { fb_helper = (struct drm_fb_helper *)info->par; __mptr = (struct drm_fb_helper const *)fb_helper; ifbdev = (struct intel_fbdev *)__mptr; ret = drm_fb_helper_set_par(info); if (ret == 0) { mutex_lock_nested(& (fb_helper->dev)->struct_mutex, 0U); ret = i915_gem_object_set_to_gtt_domain((ifbdev->fb)->obj, 1); mutex_unlock(& (fb_helper->dev)->struct_mutex); } else { } return (ret); } } static int intel_fbdev_blank(int blank , struct fb_info *info ) { struct drm_fb_helper *fb_helper ; struct intel_fbdev *ifbdev ; struct drm_fb_helper const *__mptr ; int ret ; { fb_helper = (struct drm_fb_helper *)info->par; __mptr = (struct drm_fb_helper const *)fb_helper; ifbdev = (struct intel_fbdev *)__mptr; ret = drm_fb_helper_blank(blank, info); if (ret == 0) { mutex_lock_nested(& (fb_helper->dev)->struct_mutex, 0U); intel_fb_obj_invalidate((ifbdev->fb)->obj, (struct intel_engine_cs *)0, 0); mutex_unlock(& (fb_helper->dev)->struct_mutex); } else { } return (ret); } } static int intel_fbdev_pan_display(struct fb_var_screeninfo *var , struct fb_info *info ) { struct drm_fb_helper *fb_helper ; struct intel_fbdev *ifbdev ; struct drm_fb_helper const *__mptr ; int ret ; { fb_helper = (struct drm_fb_helper *)info->par; __mptr = (struct drm_fb_helper const *)fb_helper; ifbdev = (struct intel_fbdev *)__mptr; ret = drm_fb_helper_pan_display(var, info); if (ret == 0) { mutex_lock_nested(& (fb_helper->dev)->struct_mutex, 0U); intel_fb_obj_invalidate((ifbdev->fb)->obj, (struct intel_engine_cs *)0, 0); mutex_unlock(& (fb_helper->dev)->struct_mutex); } else { } return (ret); } } static struct fb_ops intelfb_ops = {& __this_module, 0, 0, 0, 0, & drm_fb_helper_check_var, & intel_fbdev_set_par, 0, & drm_fb_helper_setcmap, & intel_fbdev_blank, & intel_fbdev_pan_display, & cfb_fillrect, & cfb_copyarea, & cfb_imageblit, 0, 0, 0, 0, 0, 0, 0, 0, & drm_fb_helper_debug_enter, & drm_fb_helper_debug_leave}; static int intelfb_alloc(struct drm_fb_helper *helper , struct drm_fb_helper_surface_size *sizes ) { struct intel_fbdev *ifbdev ; struct drm_fb_helper const *__mptr ; struct drm_framebuffer *fb ; struct drm_device *dev ; struct drm_mode_fb_cmd2 mode_cmd ; struct drm_i915_gem_object *obj ; int size ; int ret ; long tmp ; bool tmp___0 ; struct drm_framebuffer const *__mptr___0 ; { __mptr = (struct drm_fb_helper const *)helper; ifbdev = (struct intel_fbdev *)__mptr; dev = helper->dev; mode_cmd.fb_id = 0U; mode_cmd.width = 0U; mode_cmd.height = 0U; mode_cmd.pixel_format = 0U; mode_cmd.flags = 0U; mode_cmd.handles[0] = 0U; mode_cmd.handles[1] = 0U; mode_cmd.handles[2] = 0U; mode_cmd.handles[3] = 0U; mode_cmd.pitches[0] = 0U; mode_cmd.pitches[1] = 0U; mode_cmd.pitches[2] = 0U; mode_cmd.pitches[3] = 0U; mode_cmd.offsets[0] = 0U; mode_cmd.offsets[1] = 0U; mode_cmd.offsets[2] = 0U; mode_cmd.offsets[3] = 0U; mode_cmd.modifier[0] = 0ULL; mode_cmd.modifier[1] = 0ULL; mode_cmd.modifier[2] = 0ULL; mode_cmd.modifier[3] = 0ULL; if (sizes->surface_bpp == 24U) { sizes->surface_bpp = 32U; } else { } mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = (mode_cmd.width * ((sizes->surface_bpp + 7U) / 8U) + 63U) & 4294967232U; mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = (int )(mode_cmd.pitches[0] * mode_cmd.height); size = (size + 4095) & -4096; obj = i915_gem_object_create_stolen(dev, (u32 )size); if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { obj = i915_gem_alloc_object(dev, (size_t )size); } else { } if ((unsigned long )obj == (unsigned long )((struct drm_i915_gem_object *)0)) { drm_err("failed to allocate framebuffer\n"); ret = -12; goto out; } else { } fb = __intel_framebuffer_create(dev, & mode_cmd, obj); tmp___0 = IS_ERR((void const *)fb); if ((int )tmp___0) { tmp = PTR_ERR((void const *)fb); ret = (int )tmp; goto out_unref; } else { } ret = intel_pin_and_fence_fb_obj((struct drm_plane *)0, fb, (struct drm_plane_state const *)0, (struct intel_engine_cs *)0); if (ret != 0) { drm_err("failed to pin obj: %d\n", ret); goto out_fb; } else { } __mptr___0 = (struct drm_framebuffer const *)fb; ifbdev->fb = (struct intel_framebuffer *)__mptr___0; return (0); out_fb: drm_framebuffer_remove(fb); out_unref: drm_gem_object_unreference___14(& obj->base); out: ; return (ret); } } static int intelfb_create(struct drm_fb_helper *helper , struct drm_fb_helper_surface_size *sizes ) { struct intel_fbdev *ifbdev ; struct drm_fb_helper const *__mptr ; struct intel_framebuffer *intel_fb ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct fb_info *info ; struct drm_framebuffer *fb ; struct drm_i915_gem_object *obj ; int size ; int ret ; bool prealloc ; long tmp ; struct intel_framebuffer *tmp___0 ; long tmp___1 ; long tmp___2 ; int __ret_warn_on ; long tmp___3 ; long tmp___4 ; unsigned long tmp___5 ; unsigned long tmp___6 ; void *tmp___7 ; unsigned long tmp___8 ; long tmp___9 ; { __mptr = (struct drm_fb_helper const *)helper; ifbdev = (struct intel_fbdev *)__mptr; intel_fb = ifbdev->fb; dev = helper->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; prealloc = 0; mutex_lock_nested(& dev->struct_mutex, 0U); if ((unsigned long )intel_fb != (unsigned long )((struct intel_framebuffer *)0) && (sizes->fb_width > intel_fb->base.width || sizes->fb_height > intel_fb->base.height)) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intelfb_create", "BIOS fb too small (%dx%d), we require (%dx%d), releasing it\n", intel_fb->base.width, intel_fb->base.height, sizes->fb_width, sizes->fb_height); } else { } drm_framebuffer_unreference(& intel_fb->base); tmp___0 = (struct intel_framebuffer *)0; ifbdev->fb = tmp___0; intel_fb = tmp___0; } else { } if ((unsigned long )intel_fb == (unsigned long )((struct intel_framebuffer *)0)) { goto _L; } else { __ret_warn_on = (unsigned long )intel_fb->obj == (unsigned long )((struct drm_i915_gem_object *)0); tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fbdev.c", 224, "WARN_ON(!intel_fb->obj)"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { _L: /* CIL Label */ tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intelfb_create", "no BIOS fb, allocating a new one\n"); } else { } ret = intelfb_alloc(helper, sizes); if (ret != 0) { goto out_unlock; } else { } intel_fb = ifbdev->fb; } else { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intelfb_create", "re-using BIOS fb\n"); } else { } prealloc = 1; sizes->fb_width = intel_fb->base.width; sizes->fb_height = intel_fb->base.height; } } obj = intel_fb->obj; size = (int )obj->base.size; info = framebuffer_alloc(0UL, & (dev->pdev)->dev); if ((unsigned long )info == (unsigned long )((struct fb_info *)0)) { ret = -12; goto out_unpin; } else { } info->par = (void *)helper; fb = & (ifbdev->fb)->base; ifbdev->helper.fb = fb; ifbdev->helper.fbdev = info; strcpy((char *)(& info->fix.id), "inteldrmfb"); info->flags = 2097153; info->fbops = & intelfb_ops; ret = fb_alloc_cmap(& info->cmap, 256, 0); if (ret != 0) { ret = -12; goto out_unpin; } else { } info->apertures = alloc_apertures(1U); if ((unsigned long )info->apertures == (unsigned long )((struct apertures_struct *)0)) { ret = -12; goto out_unpin; } else { } (info->apertures)->ranges[0].base = dev->mode_config.fb_base; (info->apertures)->ranges[0].size = (resource_size_t )dev_priv->gtt.mappable_end; tmp___5 = i915_gem_obj_ggtt_offset(obj); info->fix.smem_start = (unsigned long )(dev->mode_config.fb_base + (unsigned long long )tmp___5); info->fix.smem_len = (__u32 )size; tmp___6 = i915_gem_obj_ggtt_offset(obj); tmp___7 = ioremap_wc(dev_priv->gtt.mappable_base + (unsigned long long )tmp___6, (unsigned long )size); info->screen_base = (char *)tmp___7; if ((unsigned long )info->screen_base == (unsigned long )((char *)0)) { ret = -28; goto out_unpin; } else { } info->screen_size = (unsigned long )size; info->skip_vt_switch = 1; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, & ifbdev->helper, sizes->fb_width, sizes->fb_height); if ((unsigned long )((ifbdev->fb)->obj)->stolen != (unsigned long )((struct drm_mm_node *)0) && ! prealloc) { memset_io((void volatile *)info->screen_base, 0, info->screen_size); } else { } tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { tmp___8 = i915_gem_obj_ggtt_offset(obj); drm_ut_debug_printk("intelfb_create", "allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width, fb->height, tmp___8, obj); } else { } mutex_unlock(& dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); return (0); out_unpin: i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference___14(& obj->base); out_unlock: mutex_unlock(& dev->struct_mutex); return (ret); } } static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc , u16 red , u16 green , u16 blue , int regno ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; intel_crtc->lut_r[regno] = (u8 )((int )red >> 8); intel_crtc->lut_g[regno] = (u8 )((int )green >> 8); intel_crtc->lut_b[regno] = (u8 )((int )blue >> 8); return; } } static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc , u16 *red , u16 *green , u16 *blue , int regno ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; *red = (int )((u16 )intel_crtc->lut_r[regno]) << 8U; *green = (int )((u16 )intel_crtc->lut_g[regno]) << 8U; *blue = (int )((u16 )intel_crtc->lut_b[regno]) << 8U; return; } } static struct drm_fb_helper_crtc *intel_fb_helper_crtc(struct drm_fb_helper *fb_helper , struct drm_crtc *crtc ) { int i ; { i = 0; goto ldv_49222; ldv_49221: ; if ((unsigned long )(fb_helper->crtc_info + (unsigned long )i)->mode_set.crtc == (unsigned long )crtc) { return (fb_helper->crtc_info + (unsigned long )i); } else { } i = i + 1; ldv_49222: ; if (fb_helper->crtc_count > i) { goto ldv_49221; } else { } return ((struct drm_fb_helper_crtc *)0); } } static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper , struct drm_fb_helper_crtc **crtcs , struct drm_display_mode **modes , struct drm_fb_offset *offsets , bool *enabled , int width , int height ) { struct drm_device *dev ; int i ; int j ; bool *save_enabled ; bool fallback ; int num_connectors_enabled ; int num_connectors_detected ; uint64_t conn_configured ; uint64_t mask ; int pass ; void *tmp ; struct drm_fb_helper_connector *fb_conn ; struct drm_connector *connector ; struct drm_encoder *encoder ; struct drm_fb_helper_crtc *new_crtc ; long tmp___0 ; long tmp___1 ; long tmp___2 ; int __ret_warn_on ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; struct list_head const *__mptr ; int tmp___9 ; long tmp___10 ; struct drm_crtc const *__mptr___0 ; struct drm_crtc const *__mptr___1 ; long tmp___11 ; long tmp___12 ; long tmp___13 ; struct drm_i915_private *__p ; long tmp___14 ; { dev = fb_helper->dev; fallback = 1; num_connectors_enabled = 0; num_connectors_detected = 0; conn_configured = 0ULL; pass = 0; tmp = kcalloc((size_t )dev->mode_config.num_connector, 1UL, 208U); save_enabled = (bool *)tmp; if ((unsigned long )save_enabled == (unsigned long )((bool *)0)) { return (0); } else { } memcpy((void *)save_enabled, (void const *)enabled, (size_t )dev->mode_config.num_connector); mask = (uint64_t )((1 << fb_helper->connector_count) + -1); retry: i = 0; goto ldv_49263; ldv_49262: fb_conn = *(fb_helper->connector_info + (unsigned long )i); connector = fb_conn->connector; if (((uint64_t )(1 << i) & conn_configured) != 0ULL) { goto ldv_49248; } else { } if (pass == 0 && ! connector->has_tile) { goto ldv_49248; } else { } if ((unsigned int )connector->status == 1U) { num_connectors_detected = num_connectors_detected + 1; } else { } if (! *(enabled + (unsigned long )i)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "connector %s not enabled, skipping\n", connector->name); } else { } conn_configured = (uint64_t )(1 << i) | conn_configured; goto ldv_49248; } else { } if ((unsigned int )connector->force == 1U) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "connector %s is disabled by user, skipping\n", connector->name); } else { } *(enabled + (unsigned long )i) = 0; goto ldv_49248; } else { } encoder = connector->encoder; if ((unsigned long )encoder == (unsigned long )((struct drm_encoder *)0)) { goto _L; } else { __ret_warn_on = (unsigned long )encoder->crtc == (unsigned long )((struct drm_crtc *)0); tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fbdev.c", 431, "WARN_ON(!encoder->crtc)"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { _L: /* CIL Label */ if ((unsigned int )connector->force > 1U) { goto bail; } else { } tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "connector %s has no encoder or crtc, skipping\n", connector->name); } else { } *(enabled + (unsigned long )i) = 0; conn_configured = (uint64_t )(1 << i) | conn_configured; goto ldv_49248; } else { } } num_connectors_enabled = num_connectors_enabled + 1; new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc); j = 0; goto ldv_49254; ldv_49253: ; if ((unsigned long )*(crtcs + (unsigned long )j) == (unsigned long )new_crtc) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "fallback: cloned configuration\n"); } else { } goto bail; } else { } j = j + 1; ldv_49254: ; if (fb_helper->connector_count > j) { goto ldv_49253; } else { } tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "looking for cmdline mode on connector %s\n", connector->name); } else { } *(modes + (unsigned long )i) = drm_pick_cmdline_mode(fb_conn, width, height); if ((unsigned long )*(modes + (unsigned long )i) == (unsigned long )((struct drm_display_mode *)0)) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "looking for preferred mode on connector %s %d\n", connector->name, (int )connector->has_tile); } else { } *(modes + (unsigned long )i) = drm_has_preferred_mode(fb_conn, width, height); } else { } if ((unsigned long )*(modes + (unsigned long )i) == (unsigned long )((struct drm_display_mode *)0)) { tmp___9 = list_empty((struct list_head const *)(& connector->modes)); if (tmp___9 == 0) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "using first mode listed on connector %s\n", connector->name); } else { } __mptr = (struct list_head const *)connector->modes.next; *(modes + (unsigned long )i) = (struct drm_display_mode *)__mptr; } else { } } else { } if ((unsigned long )*(modes + (unsigned long )i) == (unsigned long )((struct drm_display_mode *)0)) { tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "looking for current mode on connector %s\n", connector->name); } else { } __mptr___0 = (struct drm_crtc const *)encoder->crtc; intel_mode_from_pipe_config(& (encoder->crtc)->hwmode, ((struct intel_crtc *)__mptr___0)->config); *(modes + (unsigned long )i) = & (encoder->crtc)->hwmode; } else { } *(crtcs + (unsigned long )i) = new_crtc; tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { __mptr___1 = (struct drm_crtc const *)encoder->crtc; drm_ut_debug_printk("intel_fb_initial_config", "connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", connector->name, (int )((struct intel_crtc *)__mptr___1)->pipe + 65, (encoder->crtc)->base.id, (*(modes + (unsigned long )i))->hdisplay, (*(modes + (unsigned long )i))->vdisplay, ((*(modes + (unsigned long )i))->flags & 16U) != 0U ? (char *)"i" : (char *)""); } else { } fallback = 0; conn_configured = (uint64_t )(1 << i) | conn_configured; ldv_49248: i = i + 1; ldv_49263: ; if (fb_helper->connector_count > i) { goto ldv_49262; } else { } if ((conn_configured & mask) != mask) { pass = pass + 1; goto retry; } else { } if (num_connectors_enabled != num_connectors_detected) { __p = to_i915((struct drm_device const *)dev); if ((int )__p->info.num_pipes > num_connectors_enabled) { tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "fallback: Not all outputs enabled\n"); } else { } tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "Enabled: %i, detected: %i\n", num_connectors_enabled, num_connectors_detected); } else { } fallback = 1; } else { } } else { } if ((int )fallback) { bail: tmp___14 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("intel_fb_initial_config", "Not using firmware configuration\n"); } else { } memcpy((void *)enabled, (void const *)save_enabled, (size_t )dev->mode_config.num_connector); kfree((void const *)save_enabled); return (0); } else { } kfree((void const *)save_enabled); return (1); } } static struct drm_fb_helper_funcs const intel_fb_helper_funcs = {& intel_crtc_fb_gamma_set, & intel_crtc_fb_gamma_get, & intelfb_create, & intel_fb_initial_config}; static void intel_fbdev_destroy(struct drm_device *dev , struct intel_fbdev *ifbdev ) { struct fb_info *info ; { if ((unsigned long )ifbdev->helper.fbdev != (unsigned long )((struct fb_info *)0)) { info = ifbdev->helper.fbdev; unregister_framebuffer(info); iounmap((void volatile *)info->screen_base); if (info->cmap.len != 0U) { fb_dealloc_cmap(& info->cmap); } else { } framebuffer_release(info); } else { } drm_fb_helper_fini(& ifbdev->helper); drm_framebuffer_unregister_private(& (ifbdev->fb)->base); drm_framebuffer_remove(& (ifbdev->fb)->base); return; } } static bool intel_fbdev_init_bios(struct drm_device *dev , struct intel_fbdev *ifbdev ) { struct intel_framebuffer *fb ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct intel_initial_plane_config *plane_config ; unsigned int max_size ; struct list_head const *__mptr ; struct drm_crtc const *__mptr___0 ; long tmp ; long tmp___0 ; struct drm_framebuffer const *__mptr___1 ; struct list_head const *__mptr___2 ; long tmp___1 ; struct list_head const *__mptr___3 ; unsigned int cur_size ; struct drm_crtc const *__mptr___4 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; struct list_head const *__mptr___5 ; long tmp___8 ; struct list_head const *__mptr___6 ; struct drm_crtc const *__mptr___7 ; int __ret_warn_on ; long tmp___9 ; struct list_head const *__mptr___8 ; long tmp___10 ; { fb = (struct intel_framebuffer *)0; plane_config = (struct intel_initial_plane_config *)0; max_size = 0U; if (! i915.fastboot) { return (0); } else { } __mptr = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr + 0xfffffffffffffff0UL; goto ldv_49297; ldv_49296: __mptr___0 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___0; if (! intel_crtc->active || (unsigned long )(crtc->primary)->fb == (unsigned long )((struct drm_framebuffer *)0)) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "pipe %c not active or no fb, skipping\n", (int )intel_crtc->pipe + 65); } else { } goto ldv_49293; } else { } if ((unsigned int )intel_crtc->plane_config.size > max_size) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "found possible fb from plane %c\n", (int )intel_crtc->pipe + 65); } else { } plane_config = & intel_crtc->plane_config; __mptr___1 = (struct drm_framebuffer const *)(crtc->primary)->fb; fb = (struct intel_framebuffer *)__mptr___1; max_size = (unsigned int )plane_config->size; } else { } ldv_49293: __mptr___2 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___2 + 0xfffffffffffffff0UL; ldv_49297: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_49296; } else { } if ((unsigned long )fb == (unsigned long )((struct intel_framebuffer *)0)) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "no active fbs found, not using BIOS config\n"); } else { } goto out; } else { } __mptr___3 = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr___3 + 0xfffffffffffffff0UL; goto ldv_49310; ldv_49309: __mptr___4 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___4; if (! intel_crtc->active) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "pipe %c not active, skipping\n", (int )intel_crtc->pipe + 65); } else { } goto ldv_49307; } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "checking plane %c for BIOS fb\n", (int )intel_crtc->pipe + 65); } else { } cur_size = (unsigned int )(intel_crtc->config)->base.adjusted_mode.crtc_hdisplay; cur_size = ((unsigned int )fb->base.bits_per_pixel * cur_size) / 8U; if (fb->base.pitches[0] < cur_size) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "fb not wide enough for plane %c (%d vs %d)\n", (int )intel_crtc->pipe + 65, cur_size, fb->base.pitches[0]); } else { } plane_config = (struct intel_initial_plane_config *)0; fb = (struct intel_framebuffer *)0; goto ldv_49308; } else { } cur_size = (unsigned int )(intel_crtc->config)->base.adjusted_mode.crtc_vdisplay; cur_size = intel_fb_align_height(dev, cur_size, fb->base.pixel_format, fb->base.modifier[0]); cur_size = fb->base.pitches[0] * cur_size; tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "pipe %c area: %dx%d, bpp: %d, size: %d\n", (int )intel_crtc->pipe + 65, (intel_crtc->config)->base.adjusted_mode.crtc_hdisplay, (intel_crtc->config)->base.adjusted_mode.crtc_vdisplay, fb->base.bits_per_pixel, cur_size); } else { } if (cur_size > max_size) { tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "fb not big enough for plane %c (%d vs %d)\n", (int )intel_crtc->pipe + 65, cur_size, max_size); } else { } plane_config = (struct intel_initial_plane_config *)0; fb = (struct intel_framebuffer *)0; goto ldv_49308; } else { } tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "fb big enough for plane %c (%d >= %d)\n", (int )intel_crtc->pipe + 65, max_size, cur_size); } else { } ldv_49307: __mptr___5 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___5 + 0xfffffffffffffff0UL; ldv_49310: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_49309; } else { } ldv_49308: ; if ((unsigned long )fb == (unsigned long )((struct intel_framebuffer *)0)) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "BIOS fb not suitable for all pipes, not using\n"); } else { } goto out; } else { } ifbdev->preferred_bpp = fb->base.bits_per_pixel; ifbdev->fb = fb; drm_framebuffer_reference(& (ifbdev->fb)->base); __mptr___6 = (struct list_head const *)dev->mode_config.crtc_list.next; crtc = (struct drm_crtc *)__mptr___6 + 0xfffffffffffffff0UL; goto ldv_49321; ldv_49320: __mptr___7 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___7; if (! intel_crtc->active) { goto ldv_49317; } else { } __ret_warn_on = (unsigned long )(crtc->primary)->fb == (unsigned long )((struct drm_framebuffer *)0); tmp___9 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fbdev.c", 691, "re-used BIOS config but lost an fb on crtc %d\n", crtc->base.id); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ldv_49317: __mptr___8 = (struct list_head const *)crtc->head.next; crtc = (struct drm_crtc *)__mptr___8 + 0xfffffffffffffff0UL; ldv_49321: ; if ((unsigned long )(& crtc->head) != (unsigned long )(& dev->mode_config.crtc_list)) { goto ldv_49320; } else { } tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("intel_fbdev_init_bios", "using BIOS fb for initial console\n"); } else { } return (1); out: ; return (0); } } static void intel_fbdev_suspend_worker(struct work_struct *work ) { struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)work; intel_fbdev_set_suspend(((struct drm_i915_private *)__mptr + 0xffffffffffff3f70UL)->dev, 0, 1); return; } } int intel_fbdev_init(struct drm_device *dev ) { struct intel_fbdev *ifbdev ; struct drm_i915_private *dev_priv ; int ret ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp ; long tmp___0 ; void *tmp___1 ; bool tmp___2 ; int tmp___3 ; struct drm_i915_private *__p___0 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); __ret_warn_on = (unsigned int )*((unsigned char *)__p + 38UL) == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fbdev.c", 718, "WARN_ON(INTEL_INFO(dev)->num_pipes == 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-19); } else { } tmp___1 = kzalloc(200UL, 208U); ifbdev = (struct intel_fbdev *)tmp___1; if ((unsigned long )ifbdev == (unsigned long )((struct intel_fbdev *)0)) { return (-12); } else { } drm_fb_helper_prepare(dev, & ifbdev->helper, & intel_fb_helper_funcs); tmp___2 = intel_fbdev_init_bios(dev, ifbdev); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { ifbdev->preferred_bpp = 32; } else { } __p___0 = to_i915((struct drm_device const *)dev); ret = drm_fb_helper_init(dev, & ifbdev->helper, (int )__p___0->info.num_pipes, 4); if (ret != 0) { kfree((void const *)ifbdev); return (ret); } else { } dev_priv->fbdev = ifbdev; __init_work(& dev_priv->fbdev_suspend_work, 0); __constr_expr_0___0.counter = 137438953408L; dev_priv->fbdev_suspend_work.data = __constr_expr_0___0; lockdep_init_map(& dev_priv->fbdev_suspend_work.lockdep_map, "(&dev_priv->fbdev_suspend_work)", & __key, 0); INIT_LIST_HEAD(& dev_priv->fbdev_suspend_work.entry); dev_priv->fbdev_suspend_work.func = & intel_fbdev_suspend_worker; drm_fb_helper_single_add_all_connectors(& ifbdev->helper); return (0); } } void intel_fbdev_initial_config(void *data , async_cookie_t cookie ) { struct drm_i915_private *dev_priv ; struct intel_fbdev *ifbdev ; { dev_priv = (struct drm_i915_private *)data; ifbdev = dev_priv->fbdev; drm_fb_helper_initial_config(& ifbdev->helper, ifbdev->preferred_bpp); return; } } void intel_fbdev_fini(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->fbdev == (unsigned long )((struct intel_fbdev *)0)) { return; } else { } ldv_flush_work_714(& dev_priv->fbdev_suspend_work); async_synchronize_full(); intel_fbdev_destroy(dev, dev_priv->fbdev); kfree((void const *)dev_priv->fbdev); dev_priv->fbdev = (struct intel_fbdev *)0; return; } } void intel_fbdev_set_suspend(struct drm_device *dev , int state , bool synchronous ) { struct drm_i915_private *dev_priv ; struct intel_fbdev *ifbdev ; struct fb_info *info ; int __ret_warn_on ; long tmp ; int tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ifbdev = dev_priv->fbdev; if ((unsigned long )ifbdev == (unsigned long )((struct intel_fbdev *)0)) { return; } else { } info = ifbdev->helper.fbdev; if ((int )synchronous) { if (state != 0) { ldv_flush_work_715(& dev_priv->fbdev_suspend_work); } else { } console_lock(); } else { __ret_warn_on = state != 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_fbdev.c", 796, "WARN_ON(state != FBINFO_STATE_RUNNING)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = console_trylock(); if (tmp___0 == 0) { schedule_work___3(& dev_priv->fbdev_suspend_work); return; } else { } } if (state == 0 && (unsigned long )((ifbdev->fb)->obj)->stolen != (unsigned long )((struct drm_mm_node *)0)) { memset_io((void volatile *)info->screen_base, 0, info->screen_size); } else { } fb_set_suspend(info, state); console_unlock(); return; } } void intel_fbdev_output_poll_changed(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->fbdev != (unsigned long )((struct intel_fbdev *)0)) { drm_fb_helper_hotplug_event(& (dev_priv->fbdev)->helper); } else { } return; } } void intel_fbdev_restore_mode(struct drm_device *dev ) { int ret ; struct drm_i915_private *dev_priv ; bool tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )dev_priv->fbdev == (unsigned long )((struct intel_fbdev *)0)) { return; } else { } tmp = drm_fb_helper_restore_fbdev_mode_unlocked(& (dev_priv->fbdev)->helper); ret = (int )tmp; if (ret != 0) { tmp___0 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_fbdev_restore_mode", "failed to restore crtc mode\n"); } else { } } else { } return; } } void call_and_disable_all_18(int state ) { { if (ldv_work_18_0 == state) { call_and_disable_work_18(ldv_work_struct_18_0); } else { } if (ldv_work_18_1 == state) { call_and_disable_work_18(ldv_work_struct_18_1); } else { } if (ldv_work_18_2 == state) { call_and_disable_work_18(ldv_work_struct_18_2); } else { } if (ldv_work_18_3 == state) { call_and_disable_work_18(ldv_work_struct_18_3); } else { } return; } } void invoke_work_18(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_18_0 == 2 || ldv_work_18_0 == 3) { ldv_work_18_0 = 4; intel_fbdev_suspend_worker(ldv_work_struct_18_0); ldv_work_18_0 = 1; } else { } goto ldv_49389; case 1: ; if (ldv_work_18_1 == 2 || ldv_work_18_1 == 3) { ldv_work_18_1 = 4; intel_fbdev_suspend_worker(ldv_work_struct_18_0); ldv_work_18_1 = 1; } else { } goto ldv_49389; case 2: ; if (ldv_work_18_2 == 2 || ldv_work_18_2 == 3) { ldv_work_18_2 = 4; intel_fbdev_suspend_worker(ldv_work_struct_18_0); ldv_work_18_2 = 1; } else { } goto ldv_49389; case 3: ; if (ldv_work_18_3 == 2 || ldv_work_18_3 == 3) { ldv_work_18_3 = 4; intel_fbdev_suspend_worker(ldv_work_struct_18_0); ldv_work_18_3 = 1; } else { } goto ldv_49389; default: ldv_stop(); } ldv_49389: ; return; } } void disable_work_18(struct work_struct *work ) { { if ((ldv_work_18_0 == 3 || ldv_work_18_0 == 2) && (unsigned long )ldv_work_struct_18_0 == (unsigned long )work) { ldv_work_18_0 = 1; } else { } if ((ldv_work_18_1 == 3 || ldv_work_18_1 == 2) && (unsigned long )ldv_work_struct_18_1 == (unsigned long )work) { ldv_work_18_1 = 1; } else { } if ((ldv_work_18_2 == 3 || ldv_work_18_2 == 2) && (unsigned long )ldv_work_struct_18_2 == (unsigned long )work) { ldv_work_18_2 = 1; } else { } if ((ldv_work_18_3 == 3 || ldv_work_18_3 == 2) && (unsigned long )ldv_work_struct_18_3 == (unsigned long )work) { ldv_work_18_3 = 1; } else { } return; } } void activate_work_18(struct work_struct *work , int state ) { { if (ldv_work_18_0 == 0) { ldv_work_struct_18_0 = work; ldv_work_18_0 = state; return; } else { } if (ldv_work_18_1 == 0) { ldv_work_struct_18_1 = work; ldv_work_18_1 = state; return; } else { } if (ldv_work_18_2 == 0) { ldv_work_struct_18_2 = work; ldv_work_18_2 = state; return; } else { } if (ldv_work_18_3 == 0) { ldv_work_struct_18_3 = work; ldv_work_18_3 = state; return; } else { } return; } } void ldv_initialize_fb_ops_66(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(160UL); intelfb_ops_group0 = (struct fb_var_screeninfo *)tmp; tmp___0 = ldv_init_zalloc(1608UL); intelfb_ops_group1 = (struct fb_info *)tmp___0; return; } } void call_and_disable_work_18(struct work_struct *work ) { { if ((ldv_work_18_0 == 2 || ldv_work_18_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_18_0) { intel_fbdev_suspend_worker(work); ldv_work_18_0 = 1; return; } else { } if ((ldv_work_18_1 == 2 || ldv_work_18_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_18_1) { intel_fbdev_suspend_worker(work); ldv_work_18_1 = 1; return; } else { } if ((ldv_work_18_2 == 2 || ldv_work_18_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_18_2) { intel_fbdev_suspend_worker(work); ldv_work_18_2 = 1; return; } else { } if ((ldv_work_18_3 == 2 || ldv_work_18_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_18_3) { intel_fbdev_suspend_worker(work); ldv_work_18_3 = 1; return; } else { } return; } } void ldv_initialize_drm_fb_helper_funcs_65(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(1160UL); intel_fb_helper_funcs_group0 = (struct drm_crtc *)tmp; tmp___0 = ldv_init_zalloc(160UL); intel_fb_helper_funcs_group1 = (struct drm_fb_helper *)tmp___0; return; } } void work_init_18(void) { { ldv_work_18_0 = 0; ldv_work_18_1 = 0; ldv_work_18_2 = 0; ldv_work_18_3 = 0; return; } } void ldv_main_exported_66(void) { struct fb_cmap *ldvarg394 ; void *tmp ; struct fb_copyarea *ldvarg391 ; void *tmp___0 ; struct fb_fillrect *ldvarg392 ; void *tmp___1 ; int ldvarg393 ; struct fb_image *ldvarg390 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(40UL); ldvarg394 = (struct fb_cmap *)tmp; tmp___0 = ldv_init_zalloc(24UL); ldvarg391 = (struct fb_copyarea *)tmp___0; tmp___1 = ldv_init_zalloc(24UL); ldvarg392 = (struct fb_fillrect *)tmp___1; tmp___2 = ldv_init_zalloc(80UL); ldvarg390 = (struct fb_image *)tmp___2; ldv_memset((void *)(& ldvarg393), 0, 4UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_66 == 1) { intel_fbdev_pan_display(intelfb_ops_group0, intelfb_ops_group1); ldv_state_variable_66 = 1; } else { } goto ldv_49424; case 1: ; if (ldv_state_variable_66 == 1) { drm_fb_helper_setcmap(ldvarg394, intelfb_ops_group1); ldv_state_variable_66 = 1; } else { } goto ldv_49424; case 2: ; if (ldv_state_variable_66 == 1) { intel_fbdev_blank(ldvarg393, intelfb_ops_group1); ldv_state_variable_66 = 1; } else { } goto ldv_49424; case 3: ; if (ldv_state_variable_66 == 1) { drm_fb_helper_debug_leave(intelfb_ops_group1); ldv_state_variable_66 = 1; } else { } goto ldv_49424; case 4: ; if (ldv_state_variable_66 == 1) { drm_fb_helper_check_var(intelfb_ops_group0, intelfb_ops_group1); ldv_state_variable_66 = 1; } else { } goto ldv_49424; case 5: ; if (ldv_state_variable_66 == 1) { cfb_fillrect(intelfb_ops_group1, (struct fb_fillrect const *)ldvarg392); ldv_state_variable_66 = 1; } else { } goto ldv_49424; case 6: ; if (ldv_state_variable_66 == 1) { cfb_copyarea(intelfb_ops_group1, (struct fb_copyarea const *)ldvarg391); ldv_state_variable_66 = 1; } else { } goto ldv_49424; case 7: ; if (ldv_state_variable_66 == 1) { cfb_imageblit(intelfb_ops_group1, (struct fb_image const *)ldvarg390); ldv_state_variable_66 = 1; } else { } goto ldv_49424; case 8: ; if (ldv_state_variable_66 == 1) { drm_fb_helper_debug_enter(intelfb_ops_group1); ldv_state_variable_66 = 1; } else { } goto ldv_49424; case 9: ; if (ldv_state_variable_66 == 1) { intel_fbdev_set_par(intelfb_ops_group1); ldv_state_variable_66 = 1; } else { } goto ldv_49424; default: ldv_stop(); } ldv_49424: ; return; } } void ldv_main_exported_65(void) { int ldvarg99 ; u16 *ldvarg103 ; void *tmp ; u16 ldvarg96 ; struct drm_display_mode **ldvarg92 ; void *tmp___0 ; u16 ldvarg97 ; u16 *ldvarg102 ; void *tmp___1 ; u16 ldvarg98 ; int ldvarg93 ; struct drm_fb_helper_surface_size *ldvarg100 ; void *tmp___2 ; int ldvarg90 ; struct drm_fb_offset *ldvarg91 ; void *tmp___3 ; struct drm_fb_helper_crtc **ldvarg94 ; void *tmp___4 ; u16 *ldvarg101 ; void *tmp___5 ; bool *ldvarg95 ; void *tmp___6 ; int ldvarg104 ; int tmp___7 ; { tmp = ldv_init_zalloc(2UL); ldvarg103 = (u16 *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg92 = (struct drm_display_mode **)tmp___0; tmp___1 = ldv_init_zalloc(2UL); ldvarg102 = (u16 *)tmp___1; tmp___2 = ldv_init_zalloc(24UL); ldvarg100 = (struct drm_fb_helper_surface_size *)tmp___2; tmp___3 = ldv_init_zalloc(8UL); ldvarg91 = (struct drm_fb_offset *)tmp___3; tmp___4 = ldv_init_zalloc(8UL); ldvarg94 = (struct drm_fb_helper_crtc **)tmp___4; tmp___5 = ldv_init_zalloc(2UL); ldvarg101 = (u16 *)tmp___5; tmp___6 = ldv_init_zalloc(1UL); ldvarg95 = (bool *)tmp___6; ldv_memset((void *)(& ldvarg99), 0, 4UL); ldv_memset((void *)(& ldvarg96), 0, 2UL); ldv_memset((void *)(& ldvarg97), 0, 2UL); ldv_memset((void *)(& ldvarg98), 0, 2UL); ldv_memset((void *)(& ldvarg93), 0, 4UL); ldv_memset((void *)(& ldvarg90), 0, 4UL); ldv_memset((void *)(& ldvarg104), 0, 4UL); tmp___7 = __VERIFIER_nondet_int(); switch (tmp___7) { case 0: ; if (ldv_state_variable_65 == 1) { intel_crtc_fb_gamma_get(intel_fb_helper_funcs_group0, ldvarg103, ldvarg102, ldvarg101, ldvarg104); ldv_state_variable_65 = 1; } else { } goto ldv_49454; case 1: ; if (ldv_state_variable_65 == 1) { intelfb_create(intel_fb_helper_funcs_group1, ldvarg100); ldv_state_variable_65 = 1; } else { } goto ldv_49454; case 2: ; if (ldv_state_variable_65 == 1) { intel_crtc_fb_gamma_set(intel_fb_helper_funcs_group0, (int )ldvarg98, (int )ldvarg97, (int )ldvarg96, ldvarg99); ldv_state_variable_65 = 1; } else { } goto ldv_49454; case 3: ; if (ldv_state_variable_65 == 1) { intel_fb_initial_config(intel_fb_helper_funcs_group1, ldvarg94, ldvarg92, ldvarg91, ldvarg95, ldvarg90, ldvarg93); ldv_state_variable_65 = 1; } else { } goto ldv_49454; default: ldv_stop(); } ldv_49454: ; return; } } bool ldv_queue_work_on_709(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_710(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_711(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_712(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_713(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_flush_work_714(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = flush_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(ldv_func_arg1); return (ldv_func_res); } } bool ldv_flush_work_715(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = flush_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_18(ldv_func_arg1); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_727(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_729(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_728(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_731(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_730(struct workqueue_struct *ldv_func_arg1 ) ; extern int i2c_transfer(struct i2c_adapter * , struct i2c_msg * , int ) ; extern void msleep(unsigned int ) ; struct intel_dvo_dev_ops ch7017_ops ; static void ch7017_dump_regs(struct intel_dvo_device *dvo ) ; static void ch7017_dpms(struct intel_dvo_device *dvo , bool enable ) ; static bool ch7017_read(struct intel_dvo_device *dvo , u8 addr , u8 *val ) { struct i2c_msg msgs[2U] ; int tmp ; { msgs[0].addr = (unsigned short )dvo->slave_addr; msgs[0].flags = 0U; msgs[0].len = 1U; msgs[0].buf = & addr; msgs[1].addr = (unsigned short )dvo->slave_addr; msgs[1].flags = 1U; msgs[1].len = 1U; msgs[1].buf = val; tmp = i2c_transfer(dvo->i2c_bus, (struct i2c_msg *)(& msgs), 2); return (tmp == 2); } } static bool ch7017_write(struct intel_dvo_device *dvo , u8 addr , u8 val ) { uint8_t buf[2U] ; struct i2c_msg msg ; int tmp ; { buf[0] = addr; buf[1] = val; msg.addr = (unsigned short )dvo->slave_addr; msg.flags = 0U; msg.len = 2U; msg.buf = (__u8 *)(& buf); tmp = i2c_transfer(dvo->i2c_bus, & msg, 1); return (tmp == 1); } } static bool ch7017_init(struct intel_dvo_device *dvo , struct i2c_adapter *adapter ) { struct ch7017_priv *priv ; char const *str ; u8 val ; void *tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp = kzalloc(1UL, 208U); priv = (struct ch7017_priv *)tmp; if ((unsigned long )priv == (unsigned long )((struct ch7017_priv *)0)) { return (0); } else { } dvo->i2c_bus = adapter; dvo->dev_priv = (void *)priv; tmp___0 = ch7017_read(dvo, 75, & val); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto fail; } else { } switch ((int )val) { case 27: str = "ch7017"; goto ldv_48050; case 26: str = "ch7018"; goto ldv_48050; case 25: str = "ch7019"; goto ldv_48050; default: tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ch7017_init", "ch701x not detected, got %d: from %s slave %d.\n", (int )val, (char *)(& adapter->name), dvo->slave_addr); } else { } goto fail; } ldv_48050: tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("ch7017_init", "%s detected on %s, addr %d\n", str, (char *)(& adapter->name), dvo->slave_addr); } else { } return (1); fail: kfree((void const *)priv); return (0); } } static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo ) { { return (1); } } static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo , struct drm_display_mode *mode ) { { if (mode->clock > 160000) { return (15); } else { } return (0); } } static void ch7017_mode_set(struct intel_dvo_device *dvo , struct drm_display_mode *mode , struct drm_display_mode *adjusted_mode ) { uint8_t lvds_pll_feedback_div ; uint8_t lvds_pll_vco_control ; uint8_t outputs_enable ; uint8_t lvds_control_2 ; uint8_t lvds_power_down ; uint8_t horizontal_active_pixel_input ; uint8_t horizontal_active_pixel_output ; uint8_t vertical_active_line_output ; uint8_t active_input_line_output ; long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ch7017_mode_set", "Registers before mode setting\n"); } else { } ch7017_dump_regs(dvo); if (mode->clock <= 99999) { outputs_enable = 8U; lvds_pll_feedback_div = 173U; lvds_pll_vco_control = 163U; lvds_control_2 = 32U; } else { outputs_enable = 11U; lvds_pll_feedback_div = 163U; lvds_pll_feedback_div = 35U; lvds_control_2 = 96U; outputs_enable = (uint8_t )((unsigned int )outputs_enable | 16U); lvds_pll_vco_control = 173U; } horizontal_active_pixel_input = (uint8_t )mode->hdisplay; vertical_active_line_output = (uint8_t )mode->vdisplay; horizontal_active_pixel_output = (uint8_t )mode->hdisplay; active_input_line_output = (uint8_t )((int )((signed char )((mode->hdisplay & 1792) >> 8)) | (int )((signed char )(((mode->vdisplay & 1792) >> 8) << 3))); lvds_power_down = (uint8_t )((int )((signed char )((mode->hdisplay & 1792) >> 8)) | 8); ch7017_dpms(dvo, 0); ch7017_write(dvo, 95, (int )horizontal_active_pixel_input); ch7017_write(dvo, 98, (int )horizontal_active_pixel_output); ch7017_write(dvo, 97, (int )vertical_active_line_output); ch7017_write(dvo, 96, (int )active_input_line_output); ch7017_write(dvo, 114, (int )lvds_pll_vco_control); ch7017_write(dvo, 113, (int )lvds_pll_feedback_div); ch7017_write(dvo, 120, (int )lvds_control_2); ch7017_write(dvo, 115, (int )outputs_enable); ch7017_write(dvo, 99, (int )lvds_power_down); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ch7017_mode_set", "Registers after mode setting\n"); } else { } ch7017_dump_regs(dvo); return; } } static void ch7017_dpms(struct intel_dvo_device *dvo , bool enable ) { uint8_t val ; { ch7017_read(dvo, 99, & val); ch7017_write(dvo, 73, 62); if ((int )enable) { ch7017_write(dvo, 99, (int )val & 191); } else { ch7017_write(dvo, 99, (int )((unsigned int )val | 64U)); } msleep(20U); return; } } static bool ch7017_get_hw_state(struct intel_dvo_device *dvo ) { uint8_t val ; { ch7017_read(dvo, 99, & val); if (((int )val & 64) != 0) { return (0); } else { return (1); } } } static void ch7017_dump_regs(struct intel_dvo_device *dvo ) { uint8_t val ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; { ch7017_read(dvo, 95, & val); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ch7017_dump_regs", "CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT: %02x\n", (int )val); } else { } ch7017_read(dvo, 98, & val); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ch7017_dump_regs", "CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT: %02x\n", (int )val); } else { } ch7017_read(dvo, 97, & val); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ch7017_dump_regs", "CH7017_VERTICAL_ACTIVE_LINE_OUTPUT: %02x\n", (int )val); } else { } ch7017_read(dvo, 96, & val); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ch7017_dump_regs", "CH7017_ACTIVE_INPUT_LINE_OUTPUT: %02x\n", (int )val); } else { } ch7017_read(dvo, 114, & val); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("ch7017_dump_regs", "CH7017_LVDS_PLL_VCO_CONTROL: %02x\n", (int )val); } else { } ch7017_read(dvo, 113, & val); tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("ch7017_dump_regs", "CH7017_LVDS_PLL_FEEDBACK_DIV: %02x\n", (int )val); } else { } ch7017_read(dvo, 120, & val); tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("ch7017_dump_regs", "CH7017_LVDS_CONTROL_2: %02x\n", (int )val); } else { } ch7017_read(dvo, 115, & val); tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("ch7017_dump_regs", "CH7017_OUTPUTS_ENABLE: %02x\n", (int )val); } else { } ch7017_read(dvo, 99, & val); tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("ch7017_dump_regs", "CH7017_LVDS_POWER_DOWN: %02x\n", (int )val); } else { } return; } } static void ch7017_destroy(struct intel_dvo_device *dvo ) { struct ch7017_priv *priv ; { priv = (struct ch7017_priv *)dvo->dev_priv; if ((unsigned long )priv != (unsigned long )((struct ch7017_priv *)0)) { kfree((void const *)priv); dvo->dev_priv = (void *)0; } else { } return; } } struct intel_dvo_dev_ops ch7017_ops = {& ch7017_init, 0, & ch7017_dpms, (int (*)(struct intel_dvo_device * , struct drm_display_mode * ))(& ch7017_mode_valid), 0, 0, & ch7017_mode_set, & ch7017_detect, & ch7017_get_hw_state, 0, & ch7017_destroy, & ch7017_dump_regs}; extern int ldv_setup_64(void) ; void ldv_initialize_intel_dvo_dev_ops_64(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); ch7017_ops_group0 = (struct intel_dvo_device *)tmp; tmp___0 = ldv_init_zalloc(208UL); ch7017_ops_group1 = (struct drm_display_mode *)tmp___0; return; } } void ldv_main_exported_64(void) { bool ldvarg373 ; struct drm_display_mode *ldvarg372 ; void *tmp ; struct i2c_adapter *ldvarg371 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(208UL); ldvarg372 = (struct drm_display_mode *)tmp; tmp___0 = ldv_init_zalloc(1936UL); ldvarg371 = (struct i2c_adapter *)tmp___0; ldv_memset((void *)(& ldvarg373), 0, 1UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_64 == 2) { ch7017_dpms(ch7017_ops_group0, (int )ldvarg373); ldv_state_variable_64 = 2; } else { } if (ldv_state_variable_64 == 1) { ch7017_dpms(ch7017_ops_group0, (int )ldvarg373); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 3) { ch7017_dpms(ch7017_ops_group0, (int )ldvarg373); ldv_state_variable_64 = 3; } else { } goto ldv_48108; case 1: ; if (ldv_state_variable_64 == 2) { ch7017_detect(ch7017_ops_group0); ldv_state_variable_64 = 2; } else { } if (ldv_state_variable_64 == 1) { ch7017_detect(ch7017_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 3) { ch7017_detect(ch7017_ops_group0); ldv_state_variable_64 = 3; } else { } goto ldv_48108; case 2: ; if (ldv_state_variable_64 == 2) { ch7017_mode_set(ch7017_ops_group0, ch7017_ops_group1, ldvarg372); ldv_state_variable_64 = 2; } else { } if (ldv_state_variable_64 == 1) { ch7017_mode_set(ch7017_ops_group0, ch7017_ops_group1, ldvarg372); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 3) { ch7017_mode_set(ch7017_ops_group0, ch7017_ops_group1, ldvarg372); ldv_state_variable_64 = 3; } else { } goto ldv_48108; case 3: ; if (ldv_state_variable_64 == 2) { ch7017_mode_valid(ch7017_ops_group0, ch7017_ops_group1); ldv_state_variable_64 = 2; } else { } if (ldv_state_variable_64 == 1) { ch7017_mode_valid(ch7017_ops_group0, ch7017_ops_group1); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 3) { ch7017_mode_valid(ch7017_ops_group0, ch7017_ops_group1); ldv_state_variable_64 = 3; } else { } goto ldv_48108; case 4: ; if (ldv_state_variable_64 == 2) { ch7017_destroy(ch7017_ops_group0); ldv_state_variable_64 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_64 == 3) { ch7017_destroy(ch7017_ops_group0); ldv_state_variable_64 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48108; case 5: ; if (ldv_state_variable_64 == 2) { ch7017_dump_regs(ch7017_ops_group0); ldv_state_variable_64 = 2; } else { } if (ldv_state_variable_64 == 1) { ch7017_dump_regs(ch7017_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 3) { ch7017_dump_regs(ch7017_ops_group0); ldv_state_variable_64 = 3; } else { } goto ldv_48108; case 6: ; if (ldv_state_variable_64 == 2) { ch7017_get_hw_state(ch7017_ops_group0); ldv_state_variable_64 = 2; } else { } if (ldv_state_variable_64 == 1) { ch7017_get_hw_state(ch7017_ops_group0); ldv_state_variable_64 = 1; } else { } if (ldv_state_variable_64 == 3) { ch7017_get_hw_state(ch7017_ops_group0); ldv_state_variable_64 = 3; } else { } goto ldv_48108; case 7: ; if (ldv_state_variable_64 == 2) { ch7017_init(ch7017_ops_group0, ldvarg371); ldv_state_variable_64 = 3; } else { } goto ldv_48108; case 8: ; if (ldv_state_variable_64 == 1) { ldv_setup_64(); ldv_state_variable_64 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48108; default: ldv_stop(); } ldv_48108: ; return; } } bool ldv_queue_work_on_727(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_728(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_729(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_730(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_731(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_741(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_743(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_742(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_745(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_744(struct workqueue_struct *ldv_func_arg1 ) ; struct intel_dvo_dev_ops ch7xxx_ops ; static struct ch7xxx_id_struct ch7xxx_ids[5U] = { {131U, (char *)"CH7011"}, {5U, (char *)"CH7010B"}, {132U, (char *)"CH7009A"}, {133U, (char *)"CH7009B"}, {149U, (char *)"CH7301"}}; static struct ch7xxx_did_struct ch7xxx_dids[2U] = { {23U, (char *)"CH7XXX"}, {22U, (char *)"CH7010B"}}; static char *ch7xxx_get_id(uint8_t vid ) { int i ; { i = 0; goto ldv_48038; ldv_48037: ; if ((int )ch7xxx_ids[i].vid == (int )vid) { return (ch7xxx_ids[i].name); } else { } i = i + 1; ldv_48038: ; if ((unsigned int )i <= 4U) { goto ldv_48037; } else { } return ((char *)0); } } static char *ch7xxx_get_did(uint8_t did ) { int i ; { i = 0; goto ldv_48047; ldv_48046: ; if ((int )ch7xxx_dids[i].did == (int )did) { return (ch7xxx_dids[i].name); } else { } i = i + 1; ldv_48047: ; if ((unsigned int )i <= 1U) { goto ldv_48046; } else { } return ((char *)0); } } static bool ch7xxx_readb(struct intel_dvo_device *dvo , int addr , uint8_t *ch ) { struct ch7xxx_priv *ch7xxx ; struct i2c_adapter *adapter ; u8 out_buf[2U] ; u8 in_buf[2U] ; struct i2c_msg msgs[2U] ; int tmp ; long tmp___0 ; { ch7xxx = (struct ch7xxx_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msgs[0].addr = (unsigned short )dvo->slave_addr; msgs[0].flags = 0U; msgs[0].len = 1U; msgs[0].buf = (__u8 *)(& out_buf); msgs[1].addr = (unsigned short )dvo->slave_addr; msgs[1].flags = 1U; msgs[1].len = 1U; msgs[1].buf = (__u8 *)(& in_buf); out_buf[0] = (u8 )addr; out_buf[1] = 0U; tmp = i2c_transfer(adapter, (struct i2c_msg *)(& msgs), 2); if (tmp == 2) { *ch = in_buf[0]; return (1); } else { } if (! ch7xxx->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ch7xxx_readb", "Unable to read register 0x%02x from %s:%02x.\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static bool ch7xxx_writeb(struct intel_dvo_device *dvo , int addr , uint8_t ch ) { struct ch7xxx_priv *ch7xxx ; struct i2c_adapter *adapter ; uint8_t out_buf[2U] ; struct i2c_msg msg ; int tmp ; long tmp___0 ; { ch7xxx = (struct ch7xxx_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msg.addr = (unsigned short )dvo->slave_addr; msg.flags = 0U; msg.len = 2U; msg.buf = (__u8 *)(& out_buf); out_buf[0] = (uint8_t )addr; out_buf[1] = ch; tmp = i2c_transfer(adapter, & msg, 1); if (tmp == 1) { return (1); } else { } if (! ch7xxx->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ch7xxx_writeb", "Unable to write register 0x%02x to %s:%d.\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static bool ch7xxx_init(struct intel_dvo_device *dvo , struct i2c_adapter *adapter ) { struct ch7xxx_priv *ch7xxx ; uint8_t vendor ; uint8_t device ; char *name ; char *devid ; void *tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; long tmp___6 ; { tmp = kzalloc(1UL, 208U); ch7xxx = (struct ch7xxx_priv *)tmp; if ((unsigned long )ch7xxx == (unsigned long )((struct ch7xxx_priv *)0)) { return (0); } else { } dvo->i2c_bus = adapter; dvo->dev_priv = (void *)ch7xxx; ch7xxx->quiet = 1; tmp___0 = ch7xxx_readb(dvo, 74, & vendor); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto out; } else { } name = ch7xxx_get_id((int )vendor); if ((unsigned long )name == (unsigned long )((char *)0)) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ch7xxx_init", "ch7xxx not detected; got 0x%02x from %s slave %d.\n", (int )vendor, (char *)(& adapter->name), dvo->slave_addr); } else { } goto out; } else { } tmp___3 = ch7xxx_readb(dvo, 75, & device); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { goto out; } else { } devid = ch7xxx_get_did((int )device); if ((unsigned long )devid == (unsigned long )((char *)0)) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("ch7xxx_init", "ch7xxx not detected; got 0x%02x from %s slave %d.\n", (int )vendor, (char *)(& adapter->name), dvo->slave_addr); } else { } goto out; } else { } ch7xxx->quiet = 0; tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("ch7xxx_init", "Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", name, (int )vendor, (int )device); } else { } return (1); out: kfree((void const *)ch7xxx); return (0); } } static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo ) { uint8_t cdet ; uint8_t orig_pm ; uint8_t pm ; { ch7xxx_readb(dvo, 73, & orig_pm); pm = orig_pm; pm = (unsigned int )pm & 254U; pm = (uint8_t )((unsigned int )pm | 192U); ch7xxx_writeb(dvo, 73, (int )pm); ch7xxx_readb(dvo, 32, & cdet); ch7xxx_writeb(dvo, 73, (int )orig_pm); if (((int )cdet & 32) != 0) { return (1); } else { } return (2); } } static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo , struct drm_display_mode *mode ) { { if (mode->clock > 165000) { return (15); } else { } return (0); } } static void ch7xxx_mode_set(struct intel_dvo_device *dvo , struct drm_display_mode *mode , struct drm_display_mode *adjusted_mode ) { uint8_t tvco ; uint8_t tpcp ; uint8_t tpd ; uint8_t tlpf ; uint8_t idf ; { if (mode->clock <= 65000) { tvco = 35U; tpcp = 8U; tpd = 22U; tlpf = 96U; } else { tvco = 45U; tpcp = 6U; tpd = 38U; tlpf = 160U; } ch7xxx_writeb(dvo, 49, 0); ch7xxx_writeb(dvo, 50, (int )tvco); ch7xxx_writeb(dvo, 51, (int )tpcp); ch7xxx_writeb(dvo, 52, (int )tpd); ch7xxx_writeb(dvo, 53, 48); ch7xxx_writeb(dvo, 54, (int )tlpf); ch7xxx_writeb(dvo, 55, 0); ch7xxx_readb(dvo, 31, & idf); idf = (unsigned int )idf & 231U; if ((int )mode->flags & 1) { idf = (uint8_t )((unsigned int )idf | 8U); } else { } if ((mode->flags & 4U) != 0U) { idf = (uint8_t )((unsigned int )idf | 16U); } else { } ch7xxx_writeb(dvo, 31, (int )idf); return; } } static void ch7xxx_dpms(struct intel_dvo_device *dvo , bool enable ) { { if ((int )enable) { ch7xxx_writeb(dvo, 73, 192); } else { ch7xxx_writeb(dvo, 73, 1); } return; } } static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo ) { u8 val ; { ch7xxx_readb(dvo, 73, & val); if (((int )val & 192) != 0) { return (1); } else { return (0); } } } static void ch7xxx_dump_regs(struct intel_dvo_device *dvo ) { int i ; uint8_t val ; long tmp ; long tmp___0 ; { i = 0; goto ldv_48116; ldv_48115: ; if (((unsigned int )i & 7U) == 0U) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ch7xxx_dump_regs", "\n %02X: ", i); } else { } } else { } ch7xxx_readb(dvo, i, & val); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ch7xxx_dump_regs", "%02X ", (int )val); } else { } i = i + 1; ldv_48116: ; if (i <= 75) { goto ldv_48115; } else { } return; } } static void ch7xxx_destroy(struct intel_dvo_device *dvo ) { struct ch7xxx_priv *ch7xxx ; { ch7xxx = (struct ch7xxx_priv *)dvo->dev_priv; if ((unsigned long )ch7xxx != (unsigned long )((struct ch7xxx_priv *)0)) { kfree((void const *)ch7xxx); dvo->dev_priv = (void *)0; } else { } return; } } struct intel_dvo_dev_ops ch7xxx_ops = {& ch7xxx_init, 0, & ch7xxx_dpms, (int (*)(struct intel_dvo_device * , struct drm_display_mode * ))(& ch7xxx_mode_valid), 0, 0, & ch7xxx_mode_set, & ch7xxx_detect, & ch7xxx_get_hw_state, 0, & ch7xxx_destroy, & ch7xxx_dump_regs}; extern int ldv_setup_63(void) ; void ldv_initialize_intel_dvo_dev_ops_63(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); ch7xxx_ops_group0 = (struct intel_dvo_device *)tmp; tmp___0 = ldv_init_zalloc(208UL); ch7xxx_ops_group1 = (struct drm_display_mode *)tmp___0; return; } } void ldv_main_exported_63(void) { struct drm_display_mode *ldvarg253 ; void *tmp ; bool ldvarg254 ; struct i2c_adapter *ldvarg252 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(208UL); ldvarg253 = (struct drm_display_mode *)tmp; tmp___0 = ldv_init_zalloc(1936UL); ldvarg252 = (struct i2c_adapter *)tmp___0; ldv_memset((void *)(& ldvarg254), 0, 1UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_63 == 2) { ch7xxx_dpms(ch7xxx_ops_group0, (int )ldvarg254); ldv_state_variable_63 = 2; } else { } if (ldv_state_variable_63 == 1) { ch7xxx_dpms(ch7xxx_ops_group0, (int )ldvarg254); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 3) { ch7xxx_dpms(ch7xxx_ops_group0, (int )ldvarg254); ldv_state_variable_63 = 3; } else { } goto ldv_48135; case 1: ; if (ldv_state_variable_63 == 2) { ch7xxx_detect(ch7xxx_ops_group0); ldv_state_variable_63 = 2; } else { } if (ldv_state_variable_63 == 1) { ch7xxx_detect(ch7xxx_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 3) { ch7xxx_detect(ch7xxx_ops_group0); ldv_state_variable_63 = 3; } else { } goto ldv_48135; case 2: ; if (ldv_state_variable_63 == 2) { ch7xxx_mode_set(ch7xxx_ops_group0, ch7xxx_ops_group1, ldvarg253); ldv_state_variable_63 = 2; } else { } if (ldv_state_variable_63 == 1) { ch7xxx_mode_set(ch7xxx_ops_group0, ch7xxx_ops_group1, ldvarg253); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 3) { ch7xxx_mode_set(ch7xxx_ops_group0, ch7xxx_ops_group1, ldvarg253); ldv_state_variable_63 = 3; } else { } goto ldv_48135; case 3: ; if (ldv_state_variable_63 == 2) { ch7xxx_mode_valid(ch7xxx_ops_group0, ch7xxx_ops_group1); ldv_state_variable_63 = 2; } else { } if (ldv_state_variable_63 == 1) { ch7xxx_mode_valid(ch7xxx_ops_group0, ch7xxx_ops_group1); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 3) { ch7xxx_mode_valid(ch7xxx_ops_group0, ch7xxx_ops_group1); ldv_state_variable_63 = 3; } else { } goto ldv_48135; case 4: ; if (ldv_state_variable_63 == 2) { ch7xxx_destroy(ch7xxx_ops_group0); ldv_state_variable_63 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_63 == 3) { ch7xxx_destroy(ch7xxx_ops_group0); ldv_state_variable_63 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48135; case 5: ; if (ldv_state_variable_63 == 2) { ch7xxx_dump_regs(ch7xxx_ops_group0); ldv_state_variable_63 = 2; } else { } if (ldv_state_variable_63 == 1) { ch7xxx_dump_regs(ch7xxx_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 3) { ch7xxx_dump_regs(ch7xxx_ops_group0); ldv_state_variable_63 = 3; } else { } goto ldv_48135; case 6: ; if (ldv_state_variable_63 == 2) { ch7xxx_get_hw_state(ch7xxx_ops_group0); ldv_state_variable_63 = 2; } else { } if (ldv_state_variable_63 == 1) { ch7xxx_get_hw_state(ch7xxx_ops_group0); ldv_state_variable_63 = 1; } else { } if (ldv_state_variable_63 == 3) { ch7xxx_get_hw_state(ch7xxx_ops_group0); ldv_state_variable_63 = 3; } else { } goto ldv_48135; case 7: ; if (ldv_state_variable_63 == 2) { ch7xxx_init(ch7xxx_ops_group0, ldvarg252); ldv_state_variable_63 = 3; } else { } goto ldv_48135; case 8: ; if (ldv_state_variable_63 == 1) { ldv_setup_63(); ldv_state_variable_63 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48135; default: ldv_stop(); } ldv_48135: ; return; } } bool ldv_queue_work_on_741(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_742(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_743(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_744(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_745(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_755(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_757(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_756(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_759(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_758(struct workqueue_struct *ldv_func_arg1 ) ; struct intel_dvo_dev_ops ivch_ops ; static void ivch_dump_regs(struct intel_dvo_device *dvo ) ; static bool ivch_read(struct intel_dvo_device *dvo , int addr , uint16_t *data ) { struct ivch_priv *priv ; struct i2c_adapter *adapter ; u8 out_buf[1U] ; u8 in_buf[2U] ; struct i2c_msg msgs[3U] ; int tmp ; long tmp___0 ; { priv = (struct ivch_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msgs[0].addr = (unsigned short )dvo->slave_addr; msgs[0].flags = 1U; msgs[0].len = 0U; msgs[0].buf = 0; msgs[1].addr = 0U; msgs[1].flags = 16384U; msgs[1].len = 1U; msgs[1].buf = (__u8 *)(& out_buf); msgs[2].addr = (unsigned short )dvo->slave_addr; msgs[2].flags = 16385U; msgs[2].len = 2U; msgs[2].buf = (__u8 *)(& in_buf); out_buf[0] = (u8 )addr; tmp = i2c_transfer(adapter, (struct i2c_msg *)(& msgs), 3); if (tmp == 3) { *data = (uint16_t )((int )((short )((int )in_buf[1] << 8)) | (int )((short )in_buf[0])); return (1); } else { } if (! priv->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ivch_read", "Unable to read register 0x%02x from %s:%02x.\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static bool ivch_write(struct intel_dvo_device *dvo , int addr , uint16_t data ) { struct ivch_priv *priv ; struct i2c_adapter *adapter ; u8 out_buf[3U] ; struct i2c_msg msg ; int tmp ; long tmp___0 ; { priv = (struct ivch_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msg.addr = (unsigned short )dvo->slave_addr; msg.flags = 0U; msg.len = 3U; msg.buf = (__u8 *)(& out_buf); out_buf[0] = (u8 )addr; out_buf[1] = (u8 )data; out_buf[2] = (u8 )((int )data >> 8); tmp = i2c_transfer(adapter, & msg, 1); if (tmp == 1) { return (1); } else { } if (! priv->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ivch_write", "Unable to write register 0x%02x to %s:%d.\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static bool ivch_init(struct intel_dvo_device *dvo , struct i2c_adapter *adapter ) { struct ivch_priv *priv ; uint16_t temp ; void *tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; { tmp = kzalloc(6UL, 208U); priv = (struct ivch_priv *)tmp; if ((unsigned long )priv == (unsigned long )((struct ivch_priv *)0)) { return (0); } else { } dvo->i2c_bus = adapter; dvo->dev_priv = (void *)priv; priv->quiet = 1; tmp___0 = ivch_read(dvo, 0, & temp); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto out; } else { } priv->quiet = 0; if (((int )temp & 127) != dvo->slave_addr) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ivch_init", "ivch detect failed due to address mismatch (%d vs %d)\n", (int )temp & 127, dvo->slave_addr); } else { } goto out; } else { } ivch_read(dvo, 32, & priv->width); ivch_read(dvo, 32, & priv->height); return (1); out: kfree((void const *)priv); return (0); } } static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo ) { { return (1); } } static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo , struct drm_display_mode *mode ) { { if (mode->clock > 112000) { return (15); } else { } return (0); } } static void ivch_dpms(struct intel_dvo_device *dvo , bool enable ) { int i ; uint16_t vr01 ; uint16_t vr30 ; uint16_t backlight ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { tmp = ivch_read(dvo, 1, & vr01); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } if ((int )enable) { backlight = 1U; } else { backlight = 0U; } ivch_write(dvo, 128, (int )backlight); if ((int )enable) { vr01 = (uint16_t )((unsigned int )vr01 | 5U); } else { vr01 = (unsigned int )vr01 & 65530U; } ivch_write(dvo, 1, (int )vr01); i = 0; goto ldv_48073; ldv_48072: tmp___1 = ivch_read(dvo, 48, & vr30); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto ldv_48071; } else { } if (((int )((short )vr30) < 0) == (int )enable) { goto ldv_48071; } else { } __const_udelay(4295000UL); i = i + 1; ldv_48073: ; if (i <= 99) { goto ldv_48072; } else { } ldv_48071: __const_udelay(68720000UL); return; } } static bool ivch_get_hw_state(struct intel_dvo_device *dvo ) { uint16_t vr01 ; bool tmp ; int tmp___0 ; { tmp = ivch_read(dvo, 1, & vr01); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } if (((int )vr01 & 4) != 0) { return (1); } else { return (0); } } } static void ivch_mode_set(struct intel_dvo_device *dvo , struct drm_display_mode *mode , struct drm_display_mode *adjusted_mode ) { uint16_t vr40 ; uint16_t vr01 ; uint16_t vr10 ; uint16_t x_ratio ; uint16_t y_ratio ; { vr40 = 0U; vr01 = 0U; ivch_read(dvo, 16, & vr10); vr10 = (unsigned int )vr10 & 12U; if ((unsigned int )vr10 == 8U || (unsigned int )vr10 == 0U) { vr01 = 16U; } else { } vr40 = 13312U; if (mode->hdisplay != adjusted_mode->hdisplay || mode->vdisplay != adjusted_mode->vdisplay) { vr01 = (uint16_t )((unsigned int )vr01 | 8U); vr40 = (uint16_t )((unsigned int )vr40 | 2304U); x_ratio = (uint16_t )(((mode->hdisplay + -1) << 16) / (adjusted_mode->hdisplay + -1) >> 2); y_ratio = (uint16_t )(((mode->vdisplay + -1) << 16) / (adjusted_mode->vdisplay + -1) >> 2); ivch_write(dvo, 66, (int )x_ratio); ivch_write(dvo, 65, (int )y_ratio); } else { vr01 = (unsigned int )vr01 & 65527U; vr40 = (unsigned int )vr40 & 65279U; } vr40 = (unsigned int )vr40 & 65023U; ivch_write(dvo, 1, (int )vr01); ivch_write(dvo, 64, (int )vr40); ivch_dump_regs(dvo); return; } } static void ivch_dump_regs(struct intel_dvo_device *dvo ) { uint16_t val ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; long tmp___11 ; long tmp___12 ; long tmp___13 ; long tmp___14 ; { ivch_read(dvo, 0, & val); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR00: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 1, & val); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR01: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 16, & val); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR10: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 48, & val); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR30: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 64, & val); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR40: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 128, & val); tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR80: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 129, & val); tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR81: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 130, & val); tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR82: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 131, & val); tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR83: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 132, & val); tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR84: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 133, & val); tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR85: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 134, & val); tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR86: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 135, & val); tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR87: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 136, & val); tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR88: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 142, & val); tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR8E: 0x%04x\n", (int )val); } else { } ivch_read(dvo, 143, & val); tmp___14 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("ivch_dump_regs", "VR8F: 0x%04x\n", (int )val); } else { } return; } } static void ivch_destroy(struct intel_dvo_device *dvo ) { struct ivch_priv *priv ; { priv = (struct ivch_priv *)dvo->dev_priv; if ((unsigned long )priv != (unsigned long )((struct ivch_priv *)0)) { kfree((void const *)priv); dvo->dev_priv = (void *)0; } else { } return; } } struct intel_dvo_dev_ops ivch_ops = {& ivch_init, 0, & ivch_dpms, (int (*)(struct intel_dvo_device * , struct drm_display_mode * ))(& ivch_mode_valid), 0, 0, & ivch_mode_set, & ivch_detect, & ivch_get_hw_state, 0, & ivch_destroy, & ivch_dump_regs}; extern int ldv_setup_62(void) ; void ldv_initialize_intel_dvo_dev_ops_62(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); ivch_ops_group0 = (struct intel_dvo_device *)tmp; tmp___0 = ldv_init_zalloc(208UL); ivch_ops_group1 = (struct drm_display_mode *)tmp___0; return; } } void ldv_main_exported_62(void) { bool ldvarg132 ; struct i2c_adapter *ldvarg130 ; void *tmp ; struct drm_display_mode *ldvarg131 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1936UL); ldvarg130 = (struct i2c_adapter *)tmp; tmp___0 = ldv_init_zalloc(208UL); ldvarg131 = (struct drm_display_mode *)tmp___0; ldv_memset((void *)(& ldvarg132), 0, 1UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_62 == 1) { ivch_dpms(ivch_ops_group0, (int )ldvarg132); ldv_state_variable_62 = 1; } else { } if (ldv_state_variable_62 == 3) { ivch_dpms(ivch_ops_group0, (int )ldvarg132); ldv_state_variable_62 = 3; } else { } if (ldv_state_variable_62 == 2) { ivch_dpms(ivch_ops_group0, (int )ldvarg132); ldv_state_variable_62 = 2; } else { } goto ldv_48110; case 1: ; if (ldv_state_variable_62 == 1) { ivch_detect(ivch_ops_group0); ldv_state_variable_62 = 1; } else { } if (ldv_state_variable_62 == 3) { ivch_detect(ivch_ops_group0); ldv_state_variable_62 = 3; } else { } if (ldv_state_variable_62 == 2) { ivch_detect(ivch_ops_group0); ldv_state_variable_62 = 2; } else { } goto ldv_48110; case 2: ; if (ldv_state_variable_62 == 1) { ivch_mode_set(ivch_ops_group0, ivch_ops_group1, ldvarg131); ldv_state_variable_62 = 1; } else { } if (ldv_state_variable_62 == 3) { ivch_mode_set(ivch_ops_group0, ivch_ops_group1, ldvarg131); ldv_state_variable_62 = 3; } else { } if (ldv_state_variable_62 == 2) { ivch_mode_set(ivch_ops_group0, ivch_ops_group1, ldvarg131); ldv_state_variable_62 = 2; } else { } goto ldv_48110; case 3: ; if (ldv_state_variable_62 == 1) { ivch_mode_valid(ivch_ops_group0, ivch_ops_group1); ldv_state_variable_62 = 1; } else { } if (ldv_state_variable_62 == 3) { ivch_mode_valid(ivch_ops_group0, ivch_ops_group1); ldv_state_variable_62 = 3; } else { } if (ldv_state_variable_62 == 2) { ivch_mode_valid(ivch_ops_group0, ivch_ops_group1); ldv_state_variable_62 = 2; } else { } goto ldv_48110; case 4: ; if (ldv_state_variable_62 == 3) { ivch_destroy(ivch_ops_group0); ldv_state_variable_62 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_62 == 2) { ivch_destroy(ivch_ops_group0); ldv_state_variable_62 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48110; case 5: ; if (ldv_state_variable_62 == 1) { ivch_dump_regs(ivch_ops_group0); ldv_state_variable_62 = 1; } else { } if (ldv_state_variable_62 == 3) { ivch_dump_regs(ivch_ops_group0); ldv_state_variable_62 = 3; } else { } if (ldv_state_variable_62 == 2) { ivch_dump_regs(ivch_ops_group0); ldv_state_variable_62 = 2; } else { } goto ldv_48110; case 6: ; if (ldv_state_variable_62 == 1) { ivch_get_hw_state(ivch_ops_group0); ldv_state_variable_62 = 1; } else { } if (ldv_state_variable_62 == 3) { ivch_get_hw_state(ivch_ops_group0); ldv_state_variable_62 = 3; } else { } if (ldv_state_variable_62 == 2) { ivch_get_hw_state(ivch_ops_group0); ldv_state_variable_62 = 2; } else { } goto ldv_48110; case 7: ; if (ldv_state_variable_62 == 2) { ivch_init(ivch_ops_group0, ldvarg130); ldv_state_variable_62 = 3; } else { } goto ldv_48110; case 8: ; if (ldv_state_variable_62 == 1) { ldv_setup_62(); ldv_state_variable_62 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48110; default: ldv_stop(); } ldv_48110: ; return; } } bool ldv_queue_work_on_755(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_756(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_757(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_758(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_759(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_769(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_771(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_770(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_773(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_772(struct workqueue_struct *ldv_func_arg1 ) ; struct intel_dvo_dev_ops ns2501_ops ; static struct ns2501_configuration const ns2501_modes[3U] = { {5U, 49U, 50U, 15U, 17U, 852U, 144U, 783U, 22U, 514U, 2047U, 1341U, 0U, 16U, 36U, 40960U, 40960U}, {7U, 49U, 0U, 15U, 25U, 612U, 215U, 1016U, 26U, 627U, 807U, 1341U, 0U, 4U, 35U, 51248U, 51232U}, {5U, 49U, 50U, 15U, 11U, 1350U, 276U, 1299U, 15U, 1056U, 2047U, 1341U, 0U, 7U, 27U, 65535U, 65535U}}; static struct ns2501_reg const mode_agnostic_values[61U] = { {10U, 129U}, {18U, 2U}, {24U, 7U}, {25U, 0U}, {26U, 0U}, {30U, 2U}, {31U, 64U}, {32U, 0U}, {33U, 0U}, {34U, 0U}, {35U, 0U}, {36U, 0U}, {37U, 0U}, {38U, 0U}, {39U, 0U}, {126U, 24U}, {132U, 0U}, {133U, 0U}, {134U, 0U}, {135U, 0U}, {136U, 0U}, {137U, 0U}, {138U, 0U}, {139U, 0U}, {140U, 16U}, {141U, 2U}, {144U, 255U}, {145U, 7U}, {146U, 160U}, {147U, 2U}, {148U, 0U}, {149U, 0U}, {150U, 5U}, {151U, 0U}, {154U, 136U}, {155U, 0U}, {158U, 37U}, {159U, 3U}, {160U, 40U}, {161U, 1U}, {162U, 40U}, {163U, 5U}, {164U, 132U}, {165U, 0U}, {166U, 0U}, {167U, 0U}, {168U, 0U}, {169U, 4U}, {170U, 112U}, {171U, 79U}, {172U, 0U}, {173U, 0U}, {182U, 9U}, {183U, 3U}, {186U, 0U}, {187U, 32U}, {243U, 144U}, {244U, 0U}, {247U, 136U}, {248U, 10U}, {249U, 0U}}; static struct ns2501_reg const regs_init[3U] = { {53U, 255U}, {52U, 0U}, {8U, 48U}}; static bool ns2501_readb(struct intel_dvo_device *dvo , int addr , uint8_t *ch ) { struct ns2501_priv *ns ; struct i2c_adapter *adapter ; u8 out_buf[2U] ; u8 in_buf[2U] ; struct i2c_msg msgs[2U] ; int tmp ; long tmp___0 ; { ns = (struct ns2501_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msgs[0].addr = (unsigned short )dvo->slave_addr; msgs[0].flags = 0U; msgs[0].len = 1U; msgs[0].buf = (__u8 *)(& out_buf); msgs[1].addr = (unsigned short )dvo->slave_addr; msgs[1].flags = 1U; msgs[1].len = 1U; msgs[1].buf = (__u8 *)(& in_buf); out_buf[0] = (u8 )addr; out_buf[1] = 0U; tmp = i2c_transfer(adapter, (struct i2c_msg *)(& msgs), 2); if (tmp == 2) { *ch = in_buf[0]; return (1); } else { } if (! ns->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ns2501_readb", "Unable to read register 0x%02x from %s:0x%02x.\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static bool ns2501_writeb(struct intel_dvo_device *dvo , int addr , uint8_t ch ) { struct ns2501_priv *ns ; struct i2c_adapter *adapter ; uint8_t out_buf[2U] ; struct i2c_msg msg ; int tmp ; long tmp___0 ; { ns = (struct ns2501_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msg.addr = (unsigned short )dvo->slave_addr; msg.flags = 0U; msg.len = 2U; msg.buf = (__u8 *)(& out_buf); out_buf[0] = (uint8_t )addr; out_buf[1] = ch; tmp = i2c_transfer(adapter, & msg, 1); if (tmp == 1) { return (1); } else { } if (! ns->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ns2501_writeb", "Unable to write register 0x%02x to %s:%d\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static bool ns2501_init(struct intel_dvo_device *dvo , struct i2c_adapter *adapter ) { struct ns2501_priv *ns ; unsigned char ch ; void *tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; long tmp___6 ; { tmp = kzalloc(16UL, 208U); ns = (struct ns2501_priv *)tmp; if ((unsigned long )ns == (unsigned long )((struct ns2501_priv *)0)) { return (0); } else { } dvo->i2c_bus = adapter; dvo->dev_priv = (void *)ns; ns->quiet = 1; tmp___0 = ns2501_readb(dvo, 0, & ch); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto out; } else { } if ((unsigned int )ch != 5U) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("ns2501_init", "ns2501 not detected got %d: from %s Slave %d.\n", (int )ch, (char *)(& adapter->name), dvo->slave_addr); } else { } goto out; } else { } tmp___3 = ns2501_readb(dvo, 2, & ch); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { goto out; } else { } if ((unsigned int )ch != 38U) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("ns2501_init", "ns2501 not detected got %d: from %s Slave %d.\n", (int )ch, (char *)(& adapter->name), dvo->slave_addr); } else { } goto out; } else { } ns->quiet = 0; tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("ns2501_init", "init ns2501 dvo controller successfully!\n"); } else { } return (1); out: kfree((void const *)ns); return (0); } } static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo ) { { return (1); } } static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo , struct drm_display_mode *mode ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ns2501_mode_valid", "is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); } else { } if ((((mode->hdisplay == 640 && mode->vdisplay == 480) && mode->clock == 25175) || ((mode->hdisplay == 800 && mode->vdisplay == 600) && mode->clock == 40000)) || ((mode->hdisplay == 1024 && mode->vdisplay == 768) && mode->clock == 65000)) { return (0); } else { return (33); } } } static void ns2501_mode_set(struct intel_dvo_device *dvo , struct drm_display_mode *mode , struct drm_display_mode *adjusted_mode ) { struct ns2501_configuration const *conf ; struct ns2501_priv *ns ; int mode_idx ; int i ; long tmp ; long tmp___0 ; { ns = (struct ns2501_priv *)dvo->dev_priv; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ns2501_mode_set", "set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ns2501_mode_set", "Detailed requested mode settings are:\nclock\t\t: %d kHz\nhdisplay\t: %d\nhblank start\t: %d\nhblank end\t: %d\nhsync start\t: %d\nhsync end\t: %d\nhtotal\t\t: %d\nhskew\t\t: %d\nvdisplay\t: %d\nvblank start\t: %d\nhblank end\t: %d\nvsync start\t: %d\nvsync end\t: %d\nvtotal\t\t: %d\n", adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_hblank_start, adjusted_mode->crtc_hblank_end, adjusted_mode->crtc_hsync_start, adjusted_mode->crtc_hsync_end, adjusted_mode->crtc_htotal, adjusted_mode->crtc_hskew, adjusted_mode->crtc_vdisplay, adjusted_mode->crtc_vblank_start, adjusted_mode->crtc_vblank_end, adjusted_mode->crtc_vsync_start, adjusted_mode->crtc_vsync_end, adjusted_mode->crtc_vtotal); } else { } if (mode->hdisplay == 640 && mode->vdisplay == 480) { mode_idx = 0; } else if (mode->hdisplay == 800 && mode->vdisplay == 600) { mode_idx = 1; } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) { mode_idx = 2; } else { return; } i = 0; goto ldv_48102; ldv_48101: ns2501_writeb(dvo, (int )regs_init[i].offset, (int )regs_init[i].value); i = i + 1; ldv_48102: ; if ((unsigned int )i <= 2U) { goto ldv_48101; } else { } i = 0; goto ldv_48107; ldv_48106: ns2501_writeb(dvo, (int )mode_agnostic_values[i].offset, (int )mode_agnostic_values[i].value); i = i + 1; ldv_48107: ; if ((unsigned int )i <= 60U) { goto ldv_48106; } else { } conf = (struct ns2501_configuration const *)(& ns2501_modes) + (unsigned long )mode_idx; ns->conf = conf; ns2501_writeb(dvo, 8, (int )conf->conf); ns2501_writeb(dvo, 27, (int )conf->pll_a); ns2501_writeb(dvo, 28, (int )((uint8_t )conf->pll_b)); ns2501_writeb(dvo, 29, (int )((uint8_t )((int )((unsigned short )conf->pll_b) >> 8))); ns2501_writeb(dvo, 193, (int )((uint8_t )conf->hstart)); ns2501_writeb(dvo, 194, (int )((uint8_t )((int )((unsigned short )conf->hstart) >> 8))); ns2501_writeb(dvo, 195, (int )((uint8_t )conf->hstop)); ns2501_writeb(dvo, 196, (int )((uint8_t )((int )((unsigned short )conf->hstop) >> 8))); ns2501_writeb(dvo, 197, (int )((uint8_t )conf->vstart)); ns2501_writeb(dvo, 198, (int )((uint8_t )((int )((unsigned short )conf->vstart) >> 8))); ns2501_writeb(dvo, 199, (int )((uint8_t )conf->vstop)); ns2501_writeb(dvo, 200, (int )((uint8_t )((int )((unsigned short )conf->vstop) >> 8))); ns2501_writeb(dvo, 128, (int )((uint8_t )conf->vsync)); ns2501_writeb(dvo, 129, (int )((uint8_t )((int )((unsigned short )conf->vsync) >> 8))); ns2501_writeb(dvo, 130, (int )((uint8_t )conf->vtotal)); ns2501_writeb(dvo, 131, (int )((uint8_t )((int )((unsigned short )conf->vtotal) >> 8))); ns2501_writeb(dvo, 152, (int )((uint8_t )conf->hpos)); ns2501_writeb(dvo, 153, (int )((uint8_t )((int )((unsigned short )conf->hpos) >> 8))); ns2501_writeb(dvo, 142, (int )((uint8_t )conf->vpos)); ns2501_writeb(dvo, 143, (int )((uint8_t )((int )((unsigned short )conf->vpos) >> 8))); ns2501_writeb(dvo, 156, (int )((uint8_t )conf->voffs)); ns2501_writeb(dvo, 157, (int )((uint8_t )((int )((unsigned short )conf->voffs) >> 8))); ns2501_writeb(dvo, 184, (int )((uint8_t )conf->hscale)); ns2501_writeb(dvo, 185, (int )((uint8_t )((int )((unsigned short )conf->hscale) >> 8))); ns2501_writeb(dvo, 16, (int )((uint8_t )conf->vscale)); ns2501_writeb(dvo, 17, (int )((uint8_t )((int )((unsigned short )conf->vscale) >> 8))); ns2501_writeb(dvo, 249, (int )conf->dither); ns2501_writeb(dvo, 65, (int )conf->syncb); ns2501_writeb(dvo, 192, (int )conf->sync); return; } } static bool ns2501_get_hw_state(struct intel_dvo_device *dvo ) { unsigned char ch ; bool tmp ; int tmp___0 ; { tmp = ns2501_readb(dvo, 8, & ch); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } return (((int )ch & 1) != 0); } } static void ns2501_dpms(struct intel_dvo_device *dvo , bool enable ) { struct ns2501_priv *ns ; long tmp ; { ns = (struct ns2501_priv *)dvo->dev_priv; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("ns2501_dpms", "Trying set the dpms of the DVO to %i\n", (int )enable); } else { } if ((int )enable) { ns2501_writeb(dvo, 192, (int )((unsigned int )((unsigned char )(ns->conf)->sync) | 8U)); ns2501_writeb(dvo, 65, (int )(ns->conf)->syncb); ns2501_writeb(dvo, 52, 1); msleep(15U); ns2501_writeb(dvo, 8, (int )((unsigned int )((unsigned char )(ns->conf)->conf) | 4U)); if (((int )(ns->conf)->conf & 4) == 0) { ns2501_writeb(dvo, 8, (int )(ns->conf)->conf); } else { } msleep(200U); ns2501_writeb(dvo, 52, 3); ns2501_writeb(dvo, 192, (int )(ns->conf)->sync); } else { ns2501_writeb(dvo, 52, 1); msleep(200U); ns2501_writeb(dvo, 8, 52); msleep(15U); ns2501_writeb(dvo, 52, 0); } return; } } static void ns2501_destroy(struct intel_dvo_device *dvo ) { struct ns2501_priv *ns ; { ns = (struct ns2501_priv *)dvo->dev_priv; if ((unsigned long )ns != (unsigned long )((struct ns2501_priv *)0)) { kfree((void const *)ns); dvo->dev_priv = (void *)0; } else { } return; } } struct intel_dvo_dev_ops ns2501_ops = {& ns2501_init, 0, & ns2501_dpms, (int (*)(struct intel_dvo_device * , struct drm_display_mode * ))(& ns2501_mode_valid), 0, 0, & ns2501_mode_set, & ns2501_detect, & ns2501_get_hw_state, 0, & ns2501_destroy, 0}; extern int ldv_setup_61(void) ; void ldv_initialize_intel_dvo_dev_ops_61(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); ns2501_ops_group0 = (struct intel_dvo_device *)tmp; tmp___0 = ldv_init_zalloc(208UL); ns2501_ops_group1 = (struct drm_display_mode *)tmp___0; return; } } void ldv_main_exported_61(void) { struct drm_display_mode *ldvarg296 ; void *tmp ; struct i2c_adapter *ldvarg295 ; void *tmp___0 ; bool ldvarg297 ; int tmp___1 ; { tmp = ldv_init_zalloc(208UL); ldvarg296 = (struct drm_display_mode *)tmp; tmp___0 = ldv_init_zalloc(1936UL); ldvarg295 = (struct i2c_adapter *)tmp___0; ldv_memset((void *)(& ldvarg297), 0, 1UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_61 == 2) { ns2501_dpms(ns2501_ops_group0, (int )ldvarg297); ldv_state_variable_61 = 2; } else { } if (ldv_state_variable_61 == 1) { ns2501_dpms(ns2501_ops_group0, (int )ldvarg297); ldv_state_variable_61 = 1; } else { } if (ldv_state_variable_61 == 3) { ns2501_dpms(ns2501_ops_group0, (int )ldvarg297); ldv_state_variable_61 = 3; } else { } goto ldv_48136; case 1: ; if (ldv_state_variable_61 == 2) { ns2501_detect(ns2501_ops_group0); ldv_state_variable_61 = 2; } else { } if (ldv_state_variable_61 == 1) { ns2501_detect(ns2501_ops_group0); ldv_state_variable_61 = 1; } else { } if (ldv_state_variable_61 == 3) { ns2501_detect(ns2501_ops_group0); ldv_state_variable_61 = 3; } else { } goto ldv_48136; case 2: ; if (ldv_state_variable_61 == 2) { ns2501_mode_set(ns2501_ops_group0, ns2501_ops_group1, ldvarg296); ldv_state_variable_61 = 2; } else { } if (ldv_state_variable_61 == 1) { ns2501_mode_set(ns2501_ops_group0, ns2501_ops_group1, ldvarg296); ldv_state_variable_61 = 1; } else { } if (ldv_state_variable_61 == 3) { ns2501_mode_set(ns2501_ops_group0, ns2501_ops_group1, ldvarg296); ldv_state_variable_61 = 3; } else { } goto ldv_48136; case 3: ; if (ldv_state_variable_61 == 2) { ns2501_mode_valid(ns2501_ops_group0, ns2501_ops_group1); ldv_state_variable_61 = 2; } else { } if (ldv_state_variable_61 == 1) { ns2501_mode_valid(ns2501_ops_group0, ns2501_ops_group1); ldv_state_variable_61 = 1; } else { } if (ldv_state_variable_61 == 3) { ns2501_mode_valid(ns2501_ops_group0, ns2501_ops_group1); ldv_state_variable_61 = 3; } else { } goto ldv_48136; case 4: ; if (ldv_state_variable_61 == 2) { ns2501_destroy(ns2501_ops_group0); ldv_state_variable_61 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_61 == 3) { ns2501_destroy(ns2501_ops_group0); ldv_state_variable_61 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48136; case 5: ; if (ldv_state_variable_61 == 2) { ns2501_get_hw_state(ns2501_ops_group0); ldv_state_variable_61 = 2; } else { } if (ldv_state_variable_61 == 1) { ns2501_get_hw_state(ns2501_ops_group0); ldv_state_variable_61 = 1; } else { } if (ldv_state_variable_61 == 3) { ns2501_get_hw_state(ns2501_ops_group0); ldv_state_variable_61 = 3; } else { } goto ldv_48136; case 6: ; if (ldv_state_variable_61 == 2) { ns2501_init(ns2501_ops_group0, ldvarg295); ldv_state_variable_61 = 3; } else { } goto ldv_48136; case 7: ; if (ldv_state_variable_61 == 1) { ldv_setup_61(); ldv_state_variable_61 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48136; default: ldv_stop(); } ldv_48136: ; return; } } bool ldv_queue_work_on_769(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_770(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_771(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_772(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_773(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_783(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_785(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_784(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_787(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_786(struct workqueue_struct *ldv_func_arg1 ) ; struct intel_dvo_dev_ops sil164_ops ; static bool sil164_readb(struct intel_dvo_device *dvo , int addr , uint8_t *ch ) { struct sil164_priv *sil ; struct i2c_adapter *adapter ; u8 out_buf[2U] ; u8 in_buf[2U] ; struct i2c_msg msgs[2U] ; int tmp ; long tmp___0 ; { sil = (struct sil164_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msgs[0].addr = (unsigned short )dvo->slave_addr; msgs[0].flags = 0U; msgs[0].len = 1U; msgs[0].buf = (__u8 *)(& out_buf); msgs[1].addr = (unsigned short )dvo->slave_addr; msgs[1].flags = 1U; msgs[1].len = 1U; msgs[1].buf = (__u8 *)(& in_buf); out_buf[0] = (u8 )addr; out_buf[1] = 0U; tmp = i2c_transfer(adapter, (struct i2c_msg *)(& msgs), 2); if (tmp == 2) { *ch = in_buf[0]; return (1); } else { } if (! sil->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("sil164_readb", "Unable to read register 0x%02x from %s:%02x.\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static bool sil164_writeb(struct intel_dvo_device *dvo , int addr , uint8_t ch ) { struct sil164_priv *sil ; struct i2c_adapter *adapter ; uint8_t out_buf[2U] ; struct i2c_msg msg ; int tmp ; long tmp___0 ; { sil = (struct sil164_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msg.addr = (unsigned short )dvo->slave_addr; msg.flags = 0U; msg.len = 2U; msg.buf = (__u8 *)(& out_buf); out_buf[0] = (uint8_t )addr; out_buf[1] = ch; tmp = i2c_transfer(adapter, & msg, 1); if (tmp == 1) { return (1); } else { } if (! sil->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("sil164_writeb", "Unable to write register 0x%02x to %s:%d.\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static bool sil164_init(struct intel_dvo_device *dvo , struct i2c_adapter *adapter ) { struct sil164_priv *sil ; unsigned char ch ; void *tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; long tmp___6 ; { tmp = kzalloc(1UL, 208U); sil = (struct sil164_priv *)tmp; if ((unsigned long )sil == (unsigned long )((struct sil164_priv *)0)) { return (0); } else { } dvo->i2c_bus = adapter; dvo->dev_priv = (void *)sil; sil->quiet = 1; tmp___0 = sil164_readb(dvo, 0, & ch); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto out; } else { } if ((unsigned int )ch != 1U) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("sil164_init", "sil164 not detected got %d: from %s Slave %d.\n", (int )ch, (char *)(& adapter->name), dvo->slave_addr); } else { } goto out; } else { } tmp___3 = sil164_readb(dvo, 2, & ch); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { goto out; } else { } if ((unsigned int )ch != 6U) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("sil164_init", "sil164 not detected got %d: from %s Slave %d.\n", (int )ch, (char *)(& adapter->name), dvo->slave_addr); } else { } goto out; } else { } sil->quiet = 0; tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("sil164_init", "init sil164 dvo controller successfully!\n"); } else { } return (1); out: kfree((void const *)sil); return (0); } } static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo ) { uint8_t reg9 ; { sil164_readb(dvo, 9, & reg9); if (((int )reg9 & 2) != 0) { return (1); } else { return (2); } } } static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo , struct drm_display_mode *mode ) { { return (0); } } static void sil164_mode_set(struct intel_dvo_device *dvo , struct drm_display_mode *mode , struct drm_display_mode *adjusted_mode ) { { return; } } static void sil164_dpms(struct intel_dvo_device *dvo , bool enable ) { int ret ; unsigned char ch ; bool tmp ; { tmp = sil164_readb(dvo, 8, & ch); ret = (int )tmp; if (ret == 0) { return; } else { } if ((int )enable) { ch = (unsigned int )ch | 1U; } else { ch = (unsigned int )ch & 254U; } sil164_writeb(dvo, 8, (int )ch); return; } } static bool sil164_get_hw_state(struct intel_dvo_device *dvo ) { int ret ; unsigned char ch ; bool tmp ; { tmp = sil164_readb(dvo, 8, & ch); ret = (int )tmp; if (ret == 0) { return (0); } else { } if ((int )ch & 1) { return (1); } else { return (0); } } } static void sil164_dump_regs(struct intel_dvo_device *dvo ) { uint8_t val ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { sil164_readb(dvo, 6, & val); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("sil164_dump_regs", "SIL164_FREQ_LO: 0x%02x\n", (int )val); } else { } sil164_readb(dvo, 7, & val); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("sil164_dump_regs", "SIL164_FREQ_HI: 0x%02x\n", (int )val); } else { } sil164_readb(dvo, 8, & val); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("sil164_dump_regs", "SIL164_REG8: 0x%02x\n", (int )val); } else { } sil164_readb(dvo, 9, & val); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("sil164_dump_regs", "SIL164_REG9: 0x%02x\n", (int )val); } else { } sil164_readb(dvo, 12, & val); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("sil164_dump_regs", "SIL164_REGC: 0x%02x\n", (int )val); } else { } return; } } static void sil164_destroy(struct intel_dvo_device *dvo ) { struct sil164_priv *sil ; { sil = (struct sil164_priv *)dvo->dev_priv; if ((unsigned long )sil != (unsigned long )((struct sil164_priv *)0)) { kfree((void const *)sil); dvo->dev_priv = (void *)0; } else { } return; } } struct intel_dvo_dev_ops sil164_ops = {& sil164_init, 0, & sil164_dpms, (int (*)(struct intel_dvo_device * , struct drm_display_mode * ))(& sil164_mode_valid), 0, 0, & sil164_mode_set, & sil164_detect, & sil164_get_hw_state, 0, & sil164_destroy, & sil164_dump_regs}; extern int ldv_setup_60(void) ; void ldv_initialize_intel_dvo_dev_ops_60(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); sil164_ops_group0 = (struct intel_dvo_device *)tmp; tmp___0 = ldv_init_zalloc(208UL); sil164_ops_group1 = (struct drm_display_mode *)tmp___0; return; } } void ldv_main_exported_60(void) { struct i2c_adapter *ldvarg119 ; void *tmp ; struct drm_display_mode *ldvarg120 ; void *tmp___0 ; bool ldvarg121 ; int tmp___1 ; { tmp = ldv_init_zalloc(1936UL); ldvarg119 = (struct i2c_adapter *)tmp; tmp___0 = ldv_init_zalloc(208UL); ldvarg120 = (struct drm_display_mode *)tmp___0; ldv_memset((void *)(& ldvarg121), 0, 1UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_60 == 1) { sil164_dpms(sil164_ops_group0, (int )ldvarg121); ldv_state_variable_60 = 1; } else { } if (ldv_state_variable_60 == 3) { sil164_dpms(sil164_ops_group0, (int )ldvarg121); ldv_state_variable_60 = 3; } else { } if (ldv_state_variable_60 == 2) { sil164_dpms(sil164_ops_group0, (int )ldvarg121); ldv_state_variable_60 = 2; } else { } goto ldv_48098; case 1: ; if (ldv_state_variable_60 == 1) { sil164_detect(sil164_ops_group0); ldv_state_variable_60 = 1; } else { } if (ldv_state_variable_60 == 3) { sil164_detect(sil164_ops_group0); ldv_state_variable_60 = 3; } else { } if (ldv_state_variable_60 == 2) { sil164_detect(sil164_ops_group0); ldv_state_variable_60 = 2; } else { } goto ldv_48098; case 2: ; if (ldv_state_variable_60 == 1) { sil164_mode_set(sil164_ops_group0, sil164_ops_group1, ldvarg120); ldv_state_variable_60 = 1; } else { } if (ldv_state_variable_60 == 3) { sil164_mode_set(sil164_ops_group0, sil164_ops_group1, ldvarg120); ldv_state_variable_60 = 3; } else { } if (ldv_state_variable_60 == 2) { sil164_mode_set(sil164_ops_group0, sil164_ops_group1, ldvarg120); ldv_state_variable_60 = 2; } else { } goto ldv_48098; case 3: ; if (ldv_state_variable_60 == 1) { sil164_mode_valid(sil164_ops_group0, sil164_ops_group1); ldv_state_variable_60 = 1; } else { } if (ldv_state_variable_60 == 3) { sil164_mode_valid(sil164_ops_group0, sil164_ops_group1); ldv_state_variable_60 = 3; } else { } if (ldv_state_variable_60 == 2) { sil164_mode_valid(sil164_ops_group0, sil164_ops_group1); ldv_state_variable_60 = 2; } else { } goto ldv_48098; case 4: ; if (ldv_state_variable_60 == 3) { sil164_destroy(sil164_ops_group0); ldv_state_variable_60 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_60 == 2) { sil164_destroy(sil164_ops_group0); ldv_state_variable_60 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48098; case 5: ; if (ldv_state_variable_60 == 1) { sil164_dump_regs(sil164_ops_group0); ldv_state_variable_60 = 1; } else { } if (ldv_state_variable_60 == 3) { sil164_dump_regs(sil164_ops_group0); ldv_state_variable_60 = 3; } else { } if (ldv_state_variable_60 == 2) { sil164_dump_regs(sil164_ops_group0); ldv_state_variable_60 = 2; } else { } goto ldv_48098; case 6: ; if (ldv_state_variable_60 == 1) { sil164_get_hw_state(sil164_ops_group0); ldv_state_variable_60 = 1; } else { } if (ldv_state_variable_60 == 3) { sil164_get_hw_state(sil164_ops_group0); ldv_state_variable_60 = 3; } else { } if (ldv_state_variable_60 == 2) { sil164_get_hw_state(sil164_ops_group0); ldv_state_variable_60 = 2; } else { } goto ldv_48098; case 7: ; if (ldv_state_variable_60 == 2) { sil164_init(sil164_ops_group0, ldvarg119); ldv_state_variable_60 = 3; } else { } goto ldv_48098; case 8: ; if (ldv_state_variable_60 == 1) { ldv_setup_60(); ldv_state_variable_60 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48098; default: ldv_stop(); } ldv_48098: ; return; } } bool ldv_queue_work_on_783(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_784(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_785(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_786(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_787(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_797(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_799(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_798(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_801(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_800(struct workqueue_struct *ldv_func_arg1 ) ; struct intel_dvo_dev_ops tfp410_ops ; static bool tfp410_readb(struct intel_dvo_device *dvo , int addr , uint8_t *ch ) { struct tfp410_priv *tfp ; struct i2c_adapter *adapter ; u8 out_buf[2U] ; u8 in_buf[2U] ; struct i2c_msg msgs[2U] ; int tmp ; long tmp___0 ; { tfp = (struct tfp410_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msgs[0].addr = (unsigned short )dvo->slave_addr; msgs[0].flags = 0U; msgs[0].len = 1U; msgs[0].buf = (__u8 *)(& out_buf); msgs[1].addr = (unsigned short )dvo->slave_addr; msgs[1].flags = 1U; msgs[1].len = 1U; msgs[1].buf = (__u8 *)(& in_buf); out_buf[0] = (u8 )addr; out_buf[1] = 0U; tmp = i2c_transfer(adapter, (struct i2c_msg *)(& msgs), 2); if (tmp == 2) { *ch = in_buf[0]; return (1); } else { } if (! tfp->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("tfp410_readb", "Unable to read register 0x%02x from %s:%02x.\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static bool tfp410_writeb(struct intel_dvo_device *dvo , int addr , uint8_t ch ) { struct tfp410_priv *tfp ; struct i2c_adapter *adapter ; uint8_t out_buf[2U] ; struct i2c_msg msg ; int tmp ; long tmp___0 ; { tfp = (struct tfp410_priv *)dvo->dev_priv; adapter = dvo->i2c_bus; msg.addr = (unsigned short )dvo->slave_addr; msg.flags = 0U; msg.len = 2U; msg.buf = (__u8 *)(& out_buf); out_buf[0] = (uint8_t )addr; out_buf[1] = ch; tmp = i2c_transfer(adapter, & msg, 1); if (tmp == 1) { return (1); } else { } if (! tfp->quiet) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("tfp410_writeb", "Unable to write register 0x%02x to %s:%d.\n", addr, (char *)(& adapter->name), dvo->slave_addr); } else { } } else { } return (0); } } static int tfp410_getid(struct intel_dvo_device *dvo , int addr ) { uint8_t ch1 ; uint8_t ch2 ; bool tmp ; bool tmp___0 ; { tmp = tfp410_readb(dvo, addr, & ch1); if ((int )tmp) { tmp___0 = tfp410_readb(dvo, addr + 1, & ch2); if ((int )tmp___0) { return ((((int )ch2 << 8) & 65535) | (int )ch1); } else { } } else { } return (-1); } } static bool tfp410_init(struct intel_dvo_device *dvo , struct i2c_adapter *adapter ) { struct tfp410_priv *tfp ; int id ; void *tmp ; long tmp___0 ; long tmp___1 ; { tmp = kzalloc(1UL, 208U); tfp = (struct tfp410_priv *)tmp; if ((unsigned long )tfp == (unsigned long )((struct tfp410_priv *)0)) { return (0); } else { } dvo->i2c_bus = adapter; dvo->dev_priv = (void *)tfp; tfp->quiet = 1; id = tfp410_getid(dvo, 0); if (id != 332) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("tfp410_init", "tfp410 not detected got VID %X: from %s Slave %d.\n", id, (char *)(& adapter->name), dvo->slave_addr); } else { } goto out; } else { } id = tfp410_getid(dvo, 2); if (id != 1040) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("tfp410_init", "tfp410 not detected got DID %X: from %s Slave %d.\n", id, (char *)(& adapter->name), dvo->slave_addr); } else { } goto out; } else { } tfp->quiet = 0; return (1); out: kfree((void const *)tfp); return (0); } } static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo ) { enum drm_connector_status ret ; uint8_t ctl2 ; bool tmp ; { ret = 2; tmp = tfp410_readb(dvo, 9, & ctl2); if ((int )tmp) { if (((int )ctl2 & 4) != 0) { ret = 1; } else { ret = 2; } } else { } return (ret); } } static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo , struct drm_display_mode *mode ) { { return (0); } } static void tfp410_mode_set(struct intel_dvo_device *dvo , struct drm_display_mode *mode , struct drm_display_mode *adjusted_mode ) { { return; } } static void tfp410_dpms(struct intel_dvo_device *dvo , bool enable ) { uint8_t ctl1 ; bool tmp ; int tmp___0 ; { tmp = tfp410_readb(dvo, 8, & ctl1); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } if ((int )enable) { ctl1 = (uint8_t )((unsigned int )ctl1 | 1U); } else { ctl1 = (unsigned int )ctl1 & 254U; } tfp410_writeb(dvo, 8, (int )ctl1); return; } } static bool tfp410_get_hw_state(struct intel_dvo_device *dvo ) { uint8_t ctl1 ; bool tmp ; int tmp___0 ; { tmp = tfp410_readb(dvo, 8, & ctl1); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } if ((int )ctl1 & 1) { return (1); } else { return (0); } } } static void tfp410_dump_regs(struct intel_dvo_device *dvo ) { uint8_t val ; uint8_t val2 ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; { tfp410_readb(dvo, 4, & val); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_REV: 0x%02X\n", (int )val); } else { } tfp410_readb(dvo, 8, & val); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_CTL1: 0x%02X\n", (int )val); } else { } tfp410_readb(dvo, 9, & val); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_CTL2: 0x%02X\n", (int )val); } else { } tfp410_readb(dvo, 10, & val); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_CTL3: 0x%02X\n", (int )val); } else { } tfp410_readb(dvo, 11, & val); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_USERCFG: 0x%02X\n", (int )val); } else { } tfp410_readb(dvo, 50, & val); tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_DE_DLY: 0x%02X\n", (int )val); } else { } tfp410_readb(dvo, 51, & val); tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_DE_CTL: 0x%02X\n", (int )val); } else { } tfp410_readb(dvo, 52, & val); tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_DE_TOP: 0x%02X\n", (int )val); } else { } tfp410_readb(dvo, 54, & val); tfp410_readb(dvo, 55, & val2); tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_DE_CNT: 0x%02X%02X\n", (int )val2, (int )val); } else { } tfp410_readb(dvo, 56, & val); tfp410_readb(dvo, 57, & val2); tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_DE_LIN: 0x%02X%02X\n", (int )val2, (int )val); } else { } tfp410_readb(dvo, 58, & val); tfp410_readb(dvo, 59, & val2); tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_H_RES: 0x%02X%02X\n", (int )val2, (int )val); } else { } tfp410_readb(dvo, 60, & val); tfp410_readb(dvo, 61, & val2); tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("tfp410_dump_regs", "TFP410_V_RES: 0x%02X%02X\n", (int )val2, (int )val); } else { } return; } } static void tfp410_destroy(struct intel_dvo_device *dvo ) { struct tfp410_priv *tfp ; { tfp = (struct tfp410_priv *)dvo->dev_priv; if ((unsigned long )tfp != (unsigned long )((struct tfp410_priv *)0)) { kfree((void const *)tfp); dvo->dev_priv = (void *)0; } else { } return; } } struct intel_dvo_dev_ops tfp410_ops = {& tfp410_init, 0, & tfp410_dpms, (int (*)(struct intel_dvo_device * , struct drm_display_mode * ))(& tfp410_mode_valid), 0, 0, & tfp410_mode_set, & tfp410_detect, & tfp410_get_hw_state, 0, & tfp410_destroy, & tfp410_dump_regs}; extern int ldv_setup_59(void) ; void ldv_initialize_intel_dvo_dev_ops_59(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(48UL); tfp410_ops_group0 = (struct intel_dvo_device *)tmp; tmp___0 = ldv_init_zalloc(208UL); tfp410_ops_group1 = (struct drm_display_mode *)tmp___0; return; } } void ldv_main_exported_59(void) { struct i2c_adapter *ldvarg489 ; void *tmp ; struct drm_display_mode *ldvarg490 ; void *tmp___0 ; bool ldvarg491 ; int tmp___1 ; { tmp = ldv_init_zalloc(1936UL); ldvarg489 = (struct i2c_adapter *)tmp; tmp___0 = ldv_init_zalloc(208UL); ldvarg490 = (struct drm_display_mode *)tmp___0; ldv_memset((void *)(& ldvarg491), 0, 1UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_59 == 2) { tfp410_dpms(tfp410_ops_group0, (int )ldvarg491); ldv_state_variable_59 = 2; } else { } if (ldv_state_variable_59 == 1) { tfp410_dpms(tfp410_ops_group0, (int )ldvarg491); ldv_state_variable_59 = 1; } else { } if (ldv_state_variable_59 == 3) { tfp410_dpms(tfp410_ops_group0, (int )ldvarg491); ldv_state_variable_59 = 3; } else { } goto ldv_48104; case 1: ; if (ldv_state_variable_59 == 2) { tfp410_detect(tfp410_ops_group0); ldv_state_variable_59 = 2; } else { } if (ldv_state_variable_59 == 1) { tfp410_detect(tfp410_ops_group0); ldv_state_variable_59 = 1; } else { } if (ldv_state_variable_59 == 3) { tfp410_detect(tfp410_ops_group0); ldv_state_variable_59 = 3; } else { } goto ldv_48104; case 2: ; if (ldv_state_variable_59 == 2) { tfp410_mode_set(tfp410_ops_group0, tfp410_ops_group1, ldvarg490); ldv_state_variable_59 = 2; } else { } if (ldv_state_variable_59 == 1) { tfp410_mode_set(tfp410_ops_group0, tfp410_ops_group1, ldvarg490); ldv_state_variable_59 = 1; } else { } if (ldv_state_variable_59 == 3) { tfp410_mode_set(tfp410_ops_group0, tfp410_ops_group1, ldvarg490); ldv_state_variable_59 = 3; } else { } goto ldv_48104; case 3: ; if (ldv_state_variable_59 == 2) { tfp410_mode_valid(tfp410_ops_group0, tfp410_ops_group1); ldv_state_variable_59 = 2; } else { } if (ldv_state_variable_59 == 1) { tfp410_mode_valid(tfp410_ops_group0, tfp410_ops_group1); ldv_state_variable_59 = 1; } else { } if (ldv_state_variable_59 == 3) { tfp410_mode_valid(tfp410_ops_group0, tfp410_ops_group1); ldv_state_variable_59 = 3; } else { } goto ldv_48104; case 4: ; if (ldv_state_variable_59 == 2) { tfp410_destroy(tfp410_ops_group0); ldv_state_variable_59 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_59 == 3) { tfp410_destroy(tfp410_ops_group0); ldv_state_variable_59 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48104; case 5: ; if (ldv_state_variable_59 == 2) { tfp410_dump_regs(tfp410_ops_group0); ldv_state_variable_59 = 2; } else { } if (ldv_state_variable_59 == 1) { tfp410_dump_regs(tfp410_ops_group0); ldv_state_variable_59 = 1; } else { } if (ldv_state_variable_59 == 3) { tfp410_dump_regs(tfp410_ops_group0); ldv_state_variable_59 = 3; } else { } goto ldv_48104; case 6: ; if (ldv_state_variable_59 == 2) { tfp410_get_hw_state(tfp410_ops_group0); ldv_state_variable_59 = 2; } else { } if (ldv_state_variable_59 == 1) { tfp410_get_hw_state(tfp410_ops_group0); ldv_state_variable_59 = 1; } else { } if (ldv_state_variable_59 == 3) { tfp410_get_hw_state(tfp410_ops_group0); ldv_state_variable_59 = 3; } else { } goto ldv_48104; case 7: ; if (ldv_state_variable_59 == 2) { tfp410_init(tfp410_ops_group0, ldvarg489); ldv_state_variable_59 = 3; } else { } goto ldv_48104; case 8: ; if (ldv_state_variable_59 == 1) { ldv_setup_59(); ldv_state_variable_59 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48104; default: ldv_stop(); } ldv_48104: ; return; } } bool ldv_queue_work_on_797(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_798(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_799(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_800(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_801(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; bool ldv_queue_work_on_811(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_813(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_812(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_815(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_814(struct workqueue_struct *ldv_func_arg1 ) ; extern struct drm_plane *drm_plane_from_index(struct drm_device * , int ) ; extern void drm_atomic_helper_wait_for_vblanks(struct drm_device * , struct drm_atomic_state * ) ; extern void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc * , struct drm_crtc_state * ) ; extern void drm_atomic_helper_crtc_destroy_state(struct drm_crtc * , struct drm_crtc_state * ) ; int intel_connector_atomic_get_property(struct drm_connector *connector , struct drm_connector_state const *state , struct drm_property *property , uint64_t *val ) ; int intel_atomic_check(struct drm_device *dev , struct drm_atomic_state *state ) { int nplanes ; int ncrtcs ; int nconnectors ; enum pipe nuclear_pipe ; struct intel_crtc *nuclear_crtc ; struct intel_crtc_state *crtc_state ; int ret ; int i ; bool not_nuclear ; struct intel_plane *plane ; struct drm_plane const *__mptr ; long tmp ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr___0 ; struct drm_crtc_state const *__mptr___1 ; long tmp___0 ; { nplanes = dev->mode_config.num_total_plane; ncrtcs = dev->mode_config.num_crtc; nconnectors = dev->mode_config.num_connector; nuclear_pipe = -1; nuclear_crtc = (struct intel_crtc *)0; crtc_state = (struct intel_crtc_state *)0; not_nuclear = 0; i = 0; goto ldv_48188; ldv_48187: __mptr = (struct drm_plane const *)*(state->planes + (unsigned long )i); plane = (struct intel_plane *)__mptr; if ((unsigned long )plane == (unsigned long )((struct intel_plane *)0)) { goto ldv_48185; } else { } if ((int )nuclear_pipe == -1) { nuclear_pipe = plane->pipe; } else if ((int )plane->pipe != (int )nuclear_pipe) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_atomic_check", "i915 only support atomic plane operations on a single CRTC at the moment\n"); } else { } return (-22); } else { } ldv_48185: i = i + 1; ldv_48188: ; if (i < nplanes) { goto ldv_48187; } else { } state->allow_modeset = 0; i = 0; goto ldv_48196; ldv_48195: __mptr___0 = (struct drm_crtc const *)*(state->crtcs + (unsigned long )i); crtc = (struct intel_crtc *)__mptr___0; if ((unsigned long )crtc != (unsigned long )((struct intel_crtc *)0)) { memset((void *)(& crtc->atomic), 0, 32UL); } else { } if ((unsigned long )crtc != (unsigned long )((struct intel_crtc *)0) && (int )crtc->pipe != (int )nuclear_pipe) { not_nuclear = 1; } else { } if ((unsigned long )crtc != (unsigned long )((struct intel_crtc *)0) && (int )crtc->pipe == (int )nuclear_pipe) { nuclear_crtc = crtc; __mptr___1 = (struct drm_crtc_state const *)*(state->crtc_states + (unsigned long )i); crtc_state = (struct intel_crtc_state *)__mptr___1; } else { } i = i + 1; ldv_48196: ; if (i < ncrtcs) { goto ldv_48195; } else { } i = 0; goto ldv_48199; ldv_48198: ; if ((unsigned long )*(state->connectors + (unsigned long )i) != (unsigned long )((struct drm_connector *)0)) { not_nuclear = 1; } else { } i = i + 1; ldv_48199: ; if (i < nconnectors) { goto ldv_48198; } else { } if ((int )not_nuclear) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_atomic_check", "i915 only supports atomic plane operations at the moment\n"); } else { } return (-22); } else { } ret = drm_atomic_helper_check_planes(dev, state); if (ret != 0) { return (ret); } else { } ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state); if (ret != 0) { return (ret); } else { } return (ret); } } int intel_atomic_commit(struct drm_device *dev , struct drm_atomic_state *state , bool async ) { int ret ; int i ; long tmp ; struct drm_plane *plane ; struct drm_plane_state *__tmp ; struct drm_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_crtc_state const *__mptr___0 ; struct drm_crtc const *__mptr___1 ; struct drm_i915_private *__p ; { if ((int )async) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_atomic_commit", "i915 does not yet support async commit\n"); } else { } return (-22); } else { } ret = drm_atomic_helper_prepare_planes(dev, state); if (ret != 0) { return (ret); } else { } i = 0; goto ldv_48213; ldv_48212: plane = *(state->planes + (unsigned long )i); if ((unsigned long )plane == (unsigned long )((struct drm_plane *)0)) { goto ldv_48210; } else { } (plane->state)->state = state; __tmp = *(state->plane_states + (unsigned long )i); *(state->plane_states + (unsigned long )i) = plane->state; plane->state = __tmp; (plane->state)->state = (struct drm_atomic_state *)0; ldv_48210: i = i + 1; ldv_48213: ; if (dev->mode_config.num_total_plane > i) { goto ldv_48212; } else { } i = 0; goto ldv_48230; ldv_48229: crtc = *(state->crtcs + (unsigned long )i); if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { goto ldv_48216; } else { } __mptr = (struct drm_crtc const *)crtc; __mptr___0 = (struct drm_crtc_state const *)*(state->crtc_states + (unsigned long )i); (((struct intel_crtc *)__mptr)->config)->scaler_state = ((struct intel_crtc_state *)__mptr___0)->scaler_state; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U) { __mptr___1 = (struct drm_crtc const *)crtc; skl_detach_scalers((struct intel_crtc *)__mptr___1); } else { } ldv_48216: i = i + 1; ldv_48230: ; if (dev->mode_config.num_crtc > i) { goto ldv_48229; } else { } drm_atomic_helper_commit_planes(dev, state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_state_free(state); return (0); } } int intel_connector_atomic_get_property(struct drm_connector *connector , struct drm_connector_state const *state , struct drm_property *property , uint64_t *val ) { int i ; { i = 0; goto ldv_48240; ldv_48239: ; if ((unsigned long )(connector->base.properties)->properties[i] == (unsigned long )property) { *val = (connector->base.properties)->values[i]; return (0); } else { } i = i + 1; ldv_48240: ; if ((connector->base.properties)->count > i) { goto ldv_48239; } else { } return (-22); } } struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_crtc_state *crtc_state ; void *tmp ; void *tmp___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; __ret_warn_on = (unsigned long )intel_crtc->config == (unsigned long )((struct intel_crtc_state *)0); tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_atomic.c", 247, "WARN_ON(!intel_crtc->config)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { tmp = kzalloc(752UL, 208U); crtc_state = (struct intel_crtc_state *)tmp; } else { tmp___0 = kmemdup((void const *)intel_crtc->config, 752UL, 208U); crtc_state = (struct intel_crtc_state *)tmp___0; } if ((unsigned long )crtc_state == (unsigned long )((struct intel_crtc_state *)0)) { return ((struct drm_crtc_state *)0); } else { } __drm_atomic_helper_crtc_duplicate_state(crtc, & crtc_state->base); crtc_state->base.crtc = crtc; return (& crtc_state->base); } } void intel_crtc_destroy_state(struct drm_crtc *crtc , struct drm_crtc_state *state ) { { drm_atomic_helper_crtc_destroy_state(crtc, state); return; } } int intel_atomic_setup_scalers(struct drm_device *dev , struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state ) { struct drm_plane *plane ; struct intel_plane *intel_plane ; struct intel_plane_state *plane_state ; struct intel_crtc_scaler_state *scaler_state ; struct drm_atomic_state *drm_state ; int num_scalers_need ; int i ; int j ; struct drm_i915_private *__p ; unsigned int tmp ; long tmp___0 ; long tmp___1 ; int *scaler_id ; struct drm_plane_state *state ; long tmp___2 ; long tmp___3 ; bool tmp___4 ; struct drm_plane const *__mptr ; int __ret_warn_on ; long tmp___5 ; long tmp___6 ; struct drm_plane_state const *__mptr___0 ; long tmp___7 ; long tmp___8 ; int __ret_warn_on___0 ; long tmp___9 ; long tmp___10 ; { plane = (struct drm_plane *)0; plane_state = (struct intel_plane_state *)0; __p = to_i915((struct drm_device const *)dev); if (((unsigned int )((unsigned char )__p->info.gen) <= 8U || (unsigned long )intel_crtc == (unsigned long )((struct intel_crtc *)0)) || (unsigned long )crtc_state == (unsigned long )((struct intel_crtc_state *)0)) { return (0); } else { } scaler_state = & crtc_state->scaler_state; drm_state = crtc_state->base.state; tmp = __arch_hweight32(scaler_state->scaler_users); num_scalers_need = (int )tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_atomic_setup_scalers", "crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n", crtc_state, num_scalers_need, intel_crtc->num_scalers, scaler_state->scaler_users); } else { } if (intel_crtc->num_scalers < num_scalers_need) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_atomic_setup_scalers", "Too many scaling requests %d > %d\n", num_scalers_need, intel_crtc->num_scalers); } else { } return (-22); } else { } i = 0; goto ldv_48290; ldv_48289: ; if ((scaler_state->scaler_users & (unsigned int )(1 << i)) == 0U) { goto ldv_48276; } else { } if (i == 31) { scaler_id = & scaler_state->scaler_id; } else { if ((unsigned long )drm_state == (unsigned long )((struct drm_atomic_state *)0)) { goto ldv_48276; } else { } plane = *(drm_state->planes + (unsigned long )i); if ((unsigned long )plane == (unsigned long )((struct drm_plane *)0)) { plane = drm_plane_from_index(dev, i); state = drm_atomic_get_plane_state(drm_state, plane); tmp___4 = IS_ERR((void const *)state); if ((int )tmp___4) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_atomic_setup_scalers", "Failed to add [PLANE:%d] to drm_state\n", plane->base.id); } else { } tmp___3 = PTR_ERR((void const *)state); return ((int )tmp___3); } else { } } else { } __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; __ret_warn_on = (int )intel_plane->pipe != (int )intel_crtc->pipe; tmp___5 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_atomic.c", 373, "WARN_ON(intel_plane->pipe != intel_crtc->pipe)"); } else { } tmp___6 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___6 != 0L) { goto ldv_48276; } else { } __mptr___0 = (struct drm_plane_state const *)*(drm_state->plane_states + (unsigned long )i); plane_state = (struct intel_plane_state *)__mptr___0; scaler_id = & plane_state->scaler_id; } if (*scaler_id < 0) { j = 0; goto ldv_48286; ldv_48285: ; if (scaler_state->scalers[j].in_use == 0) { scaler_state->scalers[j].in_use = 1; *scaler_id = scaler_state->scalers[j].id; tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_atomic_setup_scalers", "Attached scaler id %u.%u to %s:%d\n", (int )intel_crtc->pipe, i == 31 ? scaler_state->scaler_id : plane_state->scaler_id, i == 31 ? (char *)"CRTC" : (char *)"PLANE", i == 31 ? intel_crtc->base.base.id : plane->base.id); } else { } goto ldv_48284; } else { } j = j + 1; ldv_48286: ; if (intel_crtc->num_scalers > j) { goto ldv_48285; } else { } ldv_48284: ; } else { } __ret_warn_on___0 = *scaler_id < 0; tmp___9 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___9 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_atomic.c", 399, "WARN_ON(*scaler_id < 0)"); } else { } tmp___10 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___10 != 0L) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_atomic_setup_scalers", "Cannot find scaler for %s:%d\n", i == 31 ? (char *)"CRTC" : (char *)"PLANE", i == 31 ? intel_crtc->base.base.id : plane->base.id); } else { } goto ldv_48276; } else { } if (num_scalers_need == 1 && (int )intel_crtc->pipe != 2) { *scaler_id = 0; scaler_state->scalers[0].in_use = 1; scaler_state->scalers[0].mode = 268435456U; scaler_state->scalers[1].in_use = 0; } else { scaler_state->scalers[*scaler_id].mode = 0U; } ldv_48276: i = i + 1; ldv_48290: ; if ((unsigned int )i <= 31U) { goto ldv_48289; } else { } return (0); } } bool ldv_queue_work_on_811(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_812(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_813(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_814(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_815(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; bool ldv_queue_work_on_825(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_827(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_826(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_829(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_828(struct workqueue_struct *ldv_func_arg1 ) ; extern void __drm_atomic_helper_plane_duplicate_state(struct drm_plane * , struct drm_plane_state * ) ; extern void drm_atomic_helper_plane_destroy_state(struct drm_plane * , struct drm_plane_state * ) ; __inline static struct intel_crtc_state *intel_atomic_get_crtc_state___1(struct drm_atomic_state *state , struct intel_crtc *crtc ) { struct drm_crtc_state *crtc_state ; void *tmp ; bool tmp___0 ; struct drm_crtc_state const *__mptr ; { crtc_state = drm_atomic_get_crtc_state(state, & crtc->base); tmp___0 = IS_ERR((void const *)crtc_state); if ((int )tmp___0) { tmp = ERR_CAST((void const *)crtc_state); return ((struct intel_crtc_state *)tmp); } else { } __mptr = (struct drm_crtc_state const *)crtc_state; return ((struct intel_crtc_state *)__mptr); } } struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane ) { struct intel_plane_state *state ; void *tmp ; { tmp = kzalloc(136UL, 208U); state = (struct intel_plane_state *)tmp; if ((unsigned long )state == (unsigned long )((struct intel_plane_state *)0)) { return ((struct intel_plane_state *)0); } else { } state->base.plane = plane; state->base.rotation = 1U; return (state); } } struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane ) { struct drm_plane_state *state ; struct intel_plane_state *intel_state ; void *tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; { __ret_warn_on = (unsigned long )plane->state == (unsigned long )((struct drm_plane_state *)0); tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_atomic_plane.c", 78, "WARN_ON(!plane->state)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { intel_state = intel_create_plane_state(plane); } else { tmp = kmemdup((void const *)plane->state, 136UL, 208U); intel_state = (struct intel_plane_state *)tmp; } if ((unsigned long )intel_state == (unsigned long )((struct intel_plane_state *)0)) { return ((struct drm_plane_state *)0); } else { } state = & intel_state->base; __drm_atomic_helper_plane_duplicate_state(plane, state); return (state); } } void intel_plane_destroy_state(struct drm_plane *plane , struct drm_plane_state *state ) { { drm_atomic_helper_plane_destroy_state(plane, state); return; } } static int intel_plane_atomic_check(struct drm_plane *plane , struct drm_plane_state *state ) { struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct intel_crtc_state *crtc_state ; struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; struct intel_plane_state *intel_state ; struct drm_plane_state const *__mptr___0 ; struct drm_crtc const *__mptr___1 ; long tmp ; bool tmp___0 ; unsigned int tmp___1 ; long tmp___2 ; char const *tmp___3 ; long tmp___4 ; bool tmp___5 ; int tmp___6 ; { crtc = state->crtc; __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; __mptr___0 = (struct drm_plane_state const *)state; intel_state = (struct intel_plane_state *)__mptr___0; crtc = (unsigned long )crtc == (unsigned long )((struct drm_crtc *)0) ? plane->crtc : crtc; __mptr___1 = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr___1; if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { return (0); } else { } if ((unsigned long )state->state != (unsigned long )((struct drm_atomic_state *)0)) { crtc_state = intel_atomic_get_crtc_state___1(state->state, intel_crtc); tmp___0 = IS_ERR((void const *)crtc_state); if ((int )tmp___0) { tmp = PTR_ERR((void const *)crtc_state); return ((int )tmp); } else { } } else { crtc_state = intel_crtc->config; } intel_state->src.x1 = (int )state->src_x; intel_state->src.y1 = (int )state->src_y; intel_state->src.x2 = (int )(state->src_x + state->src_w); intel_state->src.y2 = (int )(state->src_y + state->src_h); intel_state->dst.x1 = state->crtc_x; intel_state->dst.y1 = state->crtc_y; intel_state->dst.x2 = (int )((uint32_t )state->crtc_x + state->crtc_w); intel_state->dst.y2 = (int )((uint32_t )state->crtc_y + state->crtc_h); intel_state->clip.x1 = 0; intel_state->clip.y1 = 0; intel_state->clip.x2 = (int )crtc_state->base.active ? crtc_state->pipe_src_w : 0; intel_state->clip.y2 = (int )crtc_state->base.active ? crtc_state->pipe_src_h : 0; if ((unsigned long )state->fb == (unsigned long )((struct drm_framebuffer *)0) && (unsigned long )(plane->state)->fb != (unsigned long )((struct drm_framebuffer *)0)) { tmp___1 = drm_plane_index(plane); intel_crtc->atomic.disabled_planes = intel_crtc->atomic.disabled_planes | (unsigned int )(1 << (int )tmp___1); } else { } if ((unsigned long )state->fb != (unsigned long )((struct drm_framebuffer *)0)) { tmp___5 = intel_rotation_90_or_270(state->rotation); if ((int )tmp___5) { if ((state->fb)->modifier[0] != 72057594037927938ULL && (state->fb)->modifier[0] != 72057594037927939ULL) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_plane_atomic_check", "Y/Yf tiling required for 90/270!\n"); } else { } return (-22); } else { } switch ((state->fb)->pixel_format) { case 538982467U: ; case 909199186U: tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { tmp___3 = drm_get_format_name((state->fb)->pixel_format); drm_ut_debug_printk("intel_plane_atomic_check", "Unsupported pixel format %s for 90/270!\n", tmp___3); } else { } return (-22); default: ; goto ldv_48203; } ldv_48203: ; } else { } } else { } tmp___6 = (*(intel_plane->check_plane))(plane, intel_state); return (tmp___6); } } static void intel_plane_atomic_update(struct drm_plane *plane , struct drm_plane_state *old_state ) { struct intel_plane *intel_plane ; struct drm_plane const *__mptr ; struct intel_plane_state *intel_state ; struct drm_plane_state const *__mptr___0 ; { __mptr = (struct drm_plane const *)plane; intel_plane = (struct intel_plane *)__mptr; __mptr___0 = (struct drm_plane_state const *)plane->state; intel_state = (struct intel_plane_state *)__mptr___0; (*(intel_plane->commit_plane))(plane, intel_state); return; } } struct drm_plane_helper_funcs const intel_plane_helper_funcs = {& intel_prepare_plane_fb, & intel_cleanup_plane_fb, & intel_plane_atomic_check, & intel_plane_atomic_update, 0}; int intel_plane_atomic_get_property(struct drm_plane *plane , struct drm_plane_state const *state , struct drm_property *property , uint64_t *val ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_plane_atomic_get_property", "Unknown plane property \'%s\'\n", (char *)(& property->name)); } else { } return (-22); } } int intel_plane_atomic_set_property(struct drm_plane *plane , struct drm_plane_state *state , struct drm_property *property , uint64_t val ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_plane_atomic_set_property", "Unknown plane property \'%s\'\n", (char *)(& property->name)); } else { } return (-22); } } void ldv_initialize_drm_plane_helper_funcs_58(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; { tmp = ldv_init_zalloc(704UL); intel_plane_helper_funcs_group0 = (struct drm_plane *)tmp; tmp___0 = ldv_init_zalloc(168UL); intel_plane_helper_funcs_group2 = (struct drm_framebuffer *)tmp___0; tmp___1 = ldv_init_zalloc(80UL); intel_plane_helper_funcs_group1 = (struct drm_plane_state const *)tmp___1; tmp___2 = ldv_init_zalloc(80UL); intel_plane_helper_funcs_group3 = (struct drm_plane_state *)tmp___2; return; } } void ldv_main_exported_58(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_58 == 1) { intel_plane_atomic_check(intel_plane_helper_funcs_group0, intel_plane_helper_funcs_group3); ldv_state_variable_58 = 1; } else { } goto ldv_48236; case 1: ; if (ldv_state_variable_58 == 1) { intel_plane_atomic_update(intel_plane_helper_funcs_group0, intel_plane_helper_funcs_group3); ldv_state_variable_58 = 1; } else { } goto ldv_48236; case 2: ; if (ldv_state_variable_58 == 1) { intel_cleanup_plane_fb(intel_plane_helper_funcs_group0, intel_plane_helper_funcs_group2, intel_plane_helper_funcs_group1); ldv_state_variable_58 = 1; } else { } goto ldv_48236; case 3: ; if (ldv_state_variable_58 == 1) { intel_prepare_plane_fb(intel_plane_helper_funcs_group0, intel_plane_helper_funcs_group2, intel_plane_helper_funcs_group1); ldv_state_variable_58 = 1; } else { } goto ldv_48236; default: ldv_stop(); } ldv_48236: ; return; } } bool ldv_queue_work_on_825(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_826(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_827(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_828(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_829(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___19(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } bool ldv_queue_work_on_839(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_841(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_840(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_843(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_842(struct workqueue_struct *ldv_func_arg1 ) ; extern void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx * , uint32_t ) ; extern void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx * ) ; extern void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx * ) ; extern int drm_connector_init(struct drm_device * , struct drm_connector * , struct drm_connector_funcs const * , int ) ; extern int drm_connector_register(struct drm_connector * ) ; extern void drm_connector_cleanup(struct drm_connector * ) ; extern int drm_encoder_init(struct drm_device * , struct drm_encoder * , struct drm_encoder_funcs const * , int ) ; __inline static bool drm_can_sleep___10(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39743; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39743; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39743; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39743; default: __bad_percpu_size(); } ldv_39743: pscr_ret__ = pfo_ret__; goto ldv_39749; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39753; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39753; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39753; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39753; default: __bad_percpu_size(); } ldv_39753: pscr_ret__ = pfo_ret_____0; goto ldv_39749; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39762; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39762; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39762; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39762; default: __bad_percpu_size(); } ldv_39762: pscr_ret__ = pfo_ret_____1; goto ldv_39749; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39771; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39771; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39771; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39771; default: __bad_percpu_size(); } ldv_39771: pscr_ret__ = pfo_ret_____2; goto ldv_39749; default: __bad_size_call_parameter(); goto ldv_39749; } ldv_39749: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___19(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } extern struct drm_connector_state *drm_atomic_helper_connector_duplicate_state(struct drm_connector * ) ; extern void drm_atomic_helper_connector_destroy_state(struct drm_connector * , struct drm_connector_state * ) ; __inline static void drm_connector_helper_add(struct drm_connector *connector , struct drm_connector_helper_funcs const *funcs ) { { connector->helper_private = (void const *)funcs; return; } } extern int drm_helper_probe_single_connector_modes(struct drm_connector * , uint32_t , uint32_t ) ; struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv , unsigned int pin ) ; void intel_gmbus_force_bit(struct i2c_adapter *adapter , bool force_bit ) ; __inline static bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter ) { struct i2c_adapter const *__mptr ; { __mptr = (struct i2c_adapter const *)adapter; return (((struct intel_gmbus *)__mptr)->force_bit != 0U); } } bool intel_ddi_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) ; void intel_ddi_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) ; static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder ) { struct intel_encoder const *__mptr ; { __mptr = (struct intel_encoder const *)encoder; return ((struct intel_crt *)__mptr); } } static struct intel_crt *intel_attached_crt(struct drm_connector *connector ) { struct intel_encoder *tmp ; struct intel_crt *tmp___0 ; { tmp = intel_attached_encoder(connector); tmp___0 = intel_encoder_to_crt(tmp); return (tmp___0); } } static bool intel_crt_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crt *crt ; struct intel_crt *tmp ; enum intel_display_power_domain power_domain ; u32 tmp___0 ; bool tmp___1 ; int tmp___2 ; struct drm_i915_private *__p ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_encoder_to_crt(encoder); crt = tmp; power_domain = intel_display_port_power_domain(encoder); tmp___1 = intel_display_power_is_enabled(dev_priv, power_domain); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); if ((int )tmp___0 >= 0) { return (0); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 2U) { *pipe = (enum pipe )((tmp___0 & 1610612736U) >> 29); } else { *pipe = (enum pipe )((tmp___0 & 1073741824U) >> 30); } return (1); } } static unsigned int intel_crt_get_flags(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_crt *crt ; struct intel_crt *tmp ; u32 tmp___0 ; u32 flags ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = intel_encoder_to_crt(encoder); crt = tmp; flags = 0U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); if ((tmp___0 & 8U) != 0U) { flags = flags | 1U; } else { flags = flags | 2U; } if ((tmp___0 & 16U) != 0U) { flags = flags | 4U; } else { flags = flags | 8U; } return (flags); } } static void intel_crt_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; int dotclock ; unsigned int tmp ; struct drm_i915_private *__p ; { dev = encoder->base.dev; tmp = intel_crt_get_flags(encoder); pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | tmp; dotclock = pipe_config->port_clock; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { ironlake_check_encoder_dotclock((struct intel_crtc_state const *)pipe_config, dotclock); } else { } pipe_config->base.adjusted_mode.crtc_clock = dotclock; return; } } static void hsw_crt_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { unsigned int tmp ; { intel_ddi_get_config(encoder, pipe_config); pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags & 4294967280U; tmp = intel_crt_get_flags(encoder); pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | tmp; return; } } static void hsw_crt_pre_enable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; uint32_t tmp ; long tmp___0 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286752L, 1); __ret_warn_on = (int )tmp < 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_crt.c", 146, "SPLL already enabled\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286752L, 2483027968U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286752L, 0); __const_udelay(85900UL); return; } } static void intel_crt_set_dpms(struct intel_encoder *encoder , int mode ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crt *crt ; struct intel_crt *tmp ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode *adjusted_mode ; u32 adpa ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_encoder_to_crt(encoder); crt = tmp; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; adjusted_mode = & (crtc->config)->base.adjusted_mode; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { adpa = 15990784U; } else { adpa = 0U; } if ((int )adjusted_mode->flags & 1) { adpa = adpa | 8U; } else { } if ((adjusted_mode->flags & 4U) != 0U) { adpa = adpa | 16U; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type == 3U) { } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { adpa = (u32 )((int )crtc->pipe << 29) | adpa; } else if ((int )crtc->pipe == 0) { adpa = adpa; } else { adpa = adpa | 1073741824U; } } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___2->pch_type == 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(int )crtc->pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393248U), 0U, 1); } else { } switch (mode) { case 0: adpa = adpa | 2147483648U; goto ldv_48463; case 1: adpa = adpa | 2147485696U; goto ldv_48463; case 2: adpa = adpa | 2147484672U; goto ldv_48463; case 3: adpa = adpa | 3072U; goto ldv_48463; } ldv_48463: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )crt->adpa_reg, adpa, 1); return; } } static void intel_disable_crt(struct intel_encoder *encoder ) { { intel_crt_set_dpms(encoder, 3); return; } } static void pch_disable_crt(struct intel_encoder *encoder ) { { return; } } static void pch_post_disable_crt(struct intel_encoder *encoder ) { { intel_disable_crt(encoder); return; } } static void hsw_crt_post_disable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t val ; long tmp ; int __ret_warn_on ; long tmp___0 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("hsw_crt_post_disable", "Disabling SPLL\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286752L, 1); __ret_warn_on = (int )val >= 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_crt.c", 227, "WARN_ON(!(val & SPLL_PLL_ENABLE))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286752L, val & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286752L, 0); return; } } static void intel_enable_crt(struct intel_encoder *encoder ) { struct intel_crt *crt ; struct intel_crt *tmp ; { tmp = intel_encoder_to_crt(encoder); crt = tmp; intel_crt_set_dpms(encoder, (crt->connector)->base.dpms); return; } } static void intel_crt_dpms(struct drm_connector *connector , int mode ) { struct drm_device *dev ; struct intel_encoder *encoder ; struct intel_encoder *tmp ; struct drm_crtc *crtc ; int old_dpms ; struct drm_i915_private *__p ; { dev = connector->dev; tmp = intel_attached_encoder(connector); encoder = tmp; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U && mode != 0) { mode = 3; } else { } if (connector->dpms == mode) { return; } else { } old_dpms = connector->dpms; connector->dpms = mode; crtc = encoder->base.crtc; if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { encoder->connectors_active = 0; return; } else { } if (mode == 3) { encoder->connectors_active = 0; } else { encoder->connectors_active = 1; } if (mode < old_dpms) { intel_crtc_update_dpms(crtc); intel_crt_set_dpms(encoder, mode); } else { intel_crt_set_dpms(encoder, mode); intel_crtc_update_dpms(crtc); } intel_modeset_check_state(connector->dev); return; } } static enum drm_mode_status intel_crt_mode_valid(struct drm_connector *connector , struct drm_display_mode *mode ) { struct drm_device *dev ; int max_clock ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp ; { dev = connector->dev; max_clock = 0; if ((mode->flags & 32U) != 0U) { return (8); } else { } if (mode->clock <= 24999) { return (16); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { max_clock = 350000; } else { max_clock = 400000; } if (mode->clock > max_clock) { return (15); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 3U) { tmp = ironlake_get_lanes_required(mode->clock, 270000, 24); if (tmp > 2) { return (15); } else { } } else { } return (0); } } static bool intel_crt_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = encoder->base.dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { pipe_config->has_pch_encoder = 1; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 3U) { pipe_config->pipe_bpp = 24; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 46UL) != 0U) { pipe_config->ddi_pll_sel = 1610612736U; pipe_config->port_clock = 270000; } else { } return (1); } } static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector ) { struct drm_device *dev ; struct intel_crt *crt ; struct intel_crt *tmp ; struct drm_i915_private *dev_priv ; u32 adpa ; bool ret ; bool turn_off_dac ; struct drm_i915_private *__p ; u32 save_adpa ; long tmp___0 ; long tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; long tmp___6 ; { dev = connector->dev; tmp = intel_attached_crt(connector); crt = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((int )crt->force_hotplug_required) { __p = to_i915((struct drm_device const *)dev); turn_off_dac = (unsigned int )__p->pch_type != 0U; crt->force_hotplug_required = 0; adpa = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); save_adpa = adpa; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_ironlake_crt_detect_hotplug", "trigger hotplug detect cycle: adpa=0x%x\n", adpa); } else { } adpa = adpa | 65536U; if ((int )turn_off_dac) { adpa = adpa & 2147483647U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )crt->adpa_reg, adpa, 1); tmp___2 = msecs_to_jiffies(1000U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48571; ldv_48570: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); if ((tmp___3 & 65536U) != 0U) { ret__ = -110; } else { } goto ldv_48569; } else { } tmp___4 = drm_can_sleep___10(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48571: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); if ((tmp___5 & 65536U) != 0U) { goto ldv_48570; } else { } ldv_48569: ; if (ret__ != 0) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_ironlake_crt_detect_hotplug", "timed out waiting for FORCE_TRIGGER"); } else { } } else { } if ((int )turn_off_dac) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )crt->adpa_reg, save_adpa, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 0); } else { } } else { } adpa = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); if ((adpa & 50331648U) != 0U) { ret = 1; } else { ret = 0; } tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_ironlake_crt_detect_hotplug", "ironlake hotplug adpa=0x%x, result %d\n", adpa, (int )ret); } else { } return (ret); } } static bool valleyview_crt_detect_hotplug(struct drm_connector *connector ) { struct drm_device *dev ; struct intel_crt *crt ; struct intel_crt *tmp ; struct drm_i915_private *dev_priv ; u32 adpa ; bool ret ; u32 save_adpa ; long tmp___0 ; long tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; long tmp___6 ; { dev = connector->dev; tmp = intel_attached_crt(connector); crt = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; adpa = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); save_adpa = adpa; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("valleyview_crt_detect_hotplug", "trigger hotplug detect cycle: adpa=0x%x\n", adpa); } else { } adpa = adpa | 65536U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )crt->adpa_reg, adpa, 1); tmp___2 = msecs_to_jiffies(1000U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48593; ldv_48592: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); if ((tmp___3 & 65536U) != 0U) { ret__ = -110; } else { } goto ldv_48591; } else { } tmp___4 = drm_can_sleep___10(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48593: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); if ((tmp___5 & 65536U) != 0U) { goto ldv_48592; } else { } ldv_48591: ; if (ret__ != 0) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("valleyview_crt_detect_hotplug", "timed out waiting for FORCE_TRIGGER"); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )crt->adpa_reg, save_adpa, 1); } else { } adpa = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); if ((adpa & 50331648U) != 0U) { ret = 1; } else { ret = 0; } tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("valleyview_crt_detect_hotplug", "valleyview hotplug adpa=0x%x, result %d\n", adpa, (int )ret); } else { } return (ret); } } static bool intel_crt_detect_hotplug(struct drm_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 hotplug_en ; u32 orig ; u32 stat ; bool ret ; int i ; int tries ; bool tmp ; struct drm_i915_private *__p ; bool tmp___0 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; long tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; { dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; tries = 0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { tmp = intel_ironlake_crt_detect_hotplug(connector); return (tmp); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { tmp___0 = valleyview_crt_detect_hotplug(connector); return (tmp___0); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) != 10818U) { tries = 2; } else { tries = 1; } } else { tries = 1; } orig = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 1); hotplug_en = orig; hotplug_en = hotplug_en | 8U; i = 0; goto ldv_48644; ldv_48643: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), hotplug_en, 1); tmp___2 = msecs_to_jiffies(1000U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48640; ldv_48639: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 1); if ((tmp___3 & 8U) != 0U) { ret__ = -110; } else { } goto ldv_48638; } else { } tmp___4 = drm_can_sleep___10(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48640: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), 1); if ((tmp___5 & 8U) != 0U) { goto ldv_48639; } else { } ldv_48638: ; if (ret__ != 0) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_crt_detect_hotplug", "timed out waiting for FORCE_DETECT to go off"); } else { } } else { } i = i + 1; ldv_48644: ; if (i < tries) { goto ldv_48643; } else { } stat = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 1); if ((stat & 768U) != 0U) { ret = 1; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 2048U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397584U), orig, 1); return (ret); } } static struct edid *intel_crt_get_edid(struct drm_connector *connector , struct i2c_adapter *i2c ) { struct edid *edid ; long tmp ; bool tmp___0 ; int tmp___1 ; { edid = drm_get_edid(connector, i2c); if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { tmp___0 = intel_gmbus_is_forced_bit(i2c); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_crt_get_edid", "CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); } else { } intel_gmbus_force_bit(i2c, 1); edid = drm_get_edid(connector, i2c); intel_gmbus_force_bit(i2c, 0); } else { } } else { } return (edid); } } static int intel_crt_ddc_get_modes(struct drm_connector *connector , struct i2c_adapter *adapter ) { struct edid *edid ; int ret ; { edid = intel_crt_get_edid(connector, adapter); if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { return (0); } else { } ret = intel_connector_update_modes(connector, edid); kfree((void const *)edid); return (ret); } } static bool intel_crt_detect_ddc(struct drm_connector *connector ) { struct intel_crt *crt ; struct intel_crt *tmp ; struct drm_i915_private *dev_priv ; struct edid *edid ; struct i2c_adapter *i2c ; long tmp___0 ; bool is_digital ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp = intel_attached_crt(connector); crt = tmp; dev_priv = (struct drm_i915_private *)(crt->base.base.dev)->dev_private; tmp___0 = ldv__builtin_expect((unsigned int )crt->base.type != 1U, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_crt.c"), "i" (512), "i" (12UL)); ldv_48665: ; goto ldv_48665; } else { } i2c = intel_gmbus_get_adapter(dev_priv, (unsigned int )dev_priv->vbt.crt_ddc_pin); edid = intel_crt_get_edid(connector, i2c); if ((unsigned long )edid != (unsigned long )((struct edid *)0)) { is_digital = ((int )edid->input & 128) != 0; if (! is_digital) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_crt_detect_ddc", "CRT detected via DDC:0x50 [EDID]\n"); } else { } return (1); } else { } tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_crt_detect_ddc", "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } else { } } else { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_crt_detect_ddc", "CRT not detected via DDC:0x50 [no valid EDID found]\n"); } else { } } kfree((void const *)edid); return (0); } } static enum drm_connector_status intel_crt_load_detect(struct intel_crt *crt ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t pipe ; struct drm_crtc const *__mptr ; uint32_t save_bclrpat ; uint32_t save_vtotal ; uint32_t vtotal ; uint32_t vactive ; uint32_t vsample ; uint32_t vblank ; uint32_t vblank_start ; uint32_t vblank_end ; uint32_t dsl ; uint32_t bclrpat_reg ; uint32_t vtotal_reg ; uint32_t vblank_reg ; uint32_t vsync_reg ; uint32_t pipeconf_reg ; uint32_t pipe_dsl_reg ; uint8_t st00 ; enum drm_connector_status status ; long tmp ; uint32_t pipeconf ; uint32_t tmp___0 ; bool restore_vblank ; int count ; int detect ; uint32_t vsync ; uint32_t tmp___1 ; uint32_t vsync_start ; uint32_t tmp___2 ; uint32_t tmp___3 ; struct drm_i915_private *__p ; { dev = crt->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crt->base.base.crtc; pipe = (uint32_t )((struct intel_crtc *)__mptr)->pipe; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_crt_load_detect", "starting load-detect on CRT\n"); } else { } bclrpat_reg = ((unsigned int )(dev_priv->info.trans_offsets[pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393248U; vtotal_reg = ((unsigned int )(dev_priv->info.trans_offsets[pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393228U; vblank_reg = ((unsigned int )(dev_priv->info.trans_offsets[pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393232U; vsync_reg = ((unsigned int )(dev_priv->info.trans_offsets[pipe] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393236U; pipeconf_reg = ((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U; pipe_dsl_reg = ((unsigned int )(dev_priv->info.pipe_offsets[pipe] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458752U; save_bclrpat = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )bclrpat_reg, 1); save_vtotal = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )vtotal_reg, 1); vblank = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )vblank_reg, 1); vtotal = ((save_vtotal >> 16) & 4095U) + 1U; vactive = (save_vtotal & 2047U) + 1U; vblank_start = (vblank & 4095U) + 1U; vblank_end = ((vblank >> 16) & 4095U) + 1U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )bclrpat_reg, 5242960U, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) != 2U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pipeconf_reg, 1); pipeconf = tmp___0; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pipeconf_reg, pipeconf | 33554432U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pipeconf_reg, 0); intel_wait_for_vblank(dev, (int )pipe); st00 = (*(dev_priv->uncore.funcs.mmio_readb))(dev_priv, 962L, 1); status = ((int )st00 & 16) != 0 ? 1 : 2; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pipeconf_reg, pipeconf, 1); } else { restore_vblank = 0; if (vblank_start <= vactive && vblank_end >= vtotal) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )vsync_reg, 1); vsync = tmp___1; vsync_start = (vsync & 65535U) + 1U; vblank_start = vsync_start; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )vblank_reg, (vblank_start - 1U) | ((vblank_end - 1U) << 16), 1); restore_vblank = 1; } else { } if (vblank_start - vactive >= vtotal - vblank_end) { vsample = (vblank_start + vactive) >> 1; } else { vsample = (vtotal + vblank_end) >> 1; } goto ldv_48707; ldv_48706: ; ldv_48707: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pipe_dsl_reg, 1); if (tmp___2 >= vactive) { goto ldv_48706; } else { } goto ldv_48710; ldv_48709: ; ldv_48710: dsl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pipe_dsl_reg, 1); if (dsl <= vsample) { goto ldv_48709; } else { } detect = 0; count = 0; ldv_48712: count = count + 1; st00 = (*(dev_priv->uncore.funcs.mmio_readb))(dev_priv, 962L, 1); if (((int )st00 & 16) != 0) { detect = detect + 1; } else { } tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pipe_dsl_reg, 1); if (tmp___3 == dsl) { goto ldv_48712; } else { } if ((int )restore_vblank) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )vblank_reg, vblank, 1); } else { } status = detect * 4 > count * 3 ? 1 : 2; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )bclrpat_reg, save_bclrpat, 1); return (status); } } static enum drm_connector_status intel_crt_detect(struct drm_connector *connector , bool force ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crt *crt ; struct intel_crt *tmp ; struct intel_encoder *intel_encoder ; enum intel_display_power_domain power_domain ; enum drm_connector_status status ; struct intel_load_detect_pipe tmp___0 ; struct drm_modeset_acquire_ctx ctx ; long tmp___1 ; long tmp___2 ; long tmp___3 ; bool tmp___4 ; struct drm_i915_private *__p ; bool tmp___5 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; bool tmp___6 ; bool tmp___7 ; { dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_attached_crt(connector); crt = tmp; intel_encoder = & crt->base; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_crt_detect", "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, (int )force); } else { } power_domain = intel_display_port_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { tmp___4 = intel_crt_detect_hotplug(connector); if ((int )tmp___4) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_crt_detect", "CRT detected via hotplug\n"); } else { } status = 1; goto out; } else { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_crt_detect", "CRT not detected via hotplug\n"); } else { } } } else { } tmp___5 = intel_crt_detect_ddc(connector); if ((int )tmp___5) { status = 1; goto out; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) != 0U && ! i915.load_detect_test) { status = 2; goto out; } else { } if (! force) { status = connector->status; goto out; } else { } drm_modeset_acquire_init(& ctx, 0U); tmp___7 = intel_get_load_detect_pipe(connector, (struct drm_display_mode *)0, & tmp___0, & ctx); if ((int )tmp___7) { tmp___6 = intel_crt_detect_ddc(connector); if ((int )tmp___6) { status = 1; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) <= 3U) { status = intel_crt_load_detect(crt); } else { status = 3; } } intel_release_load_detect_pipe(connector, & tmp___0, & ctx); } else { status = 3; } drm_modeset_drop_locks(& ctx); drm_modeset_acquire_fini(& ctx); out: intel_display_power_put(dev_priv, power_domain); return (status); } } static void intel_crt_destroy(struct drm_connector *connector ) { { drm_connector_cleanup(connector); kfree((void const *)connector); return; } } static int intel_crt_get_modes(struct drm_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crt *crt ; struct intel_crt *tmp ; struct intel_encoder *intel_encoder ; enum intel_display_power_domain power_domain ; int ret ; struct i2c_adapter *i2c ; struct drm_i915_private *__p ; { dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_attached_crt(connector); crt = tmp; intel_encoder = & crt->base; power_domain = intel_display_port_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); i2c = intel_gmbus_get_adapter(dev_priv, (unsigned int )dev_priv->vbt.crt_ddc_pin); ret = intel_crt_ddc_get_modes(connector, i2c); if (ret != 0) { goto out; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) == 0U) { goto out; } else { } } i2c = intel_gmbus_get_adapter(dev_priv, 5U); ret = intel_crt_ddc_get_modes(connector, i2c); out: intel_display_power_put(dev_priv, power_domain); return (ret); } } static int intel_crt_set_property(struct drm_connector *connector , struct drm_property *property , uint64_t value ) { { return (0); } } static void intel_crt_reset(struct drm_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crt *crt ; struct intel_crt *tmp ; u32 adpa ; long tmp___0 ; struct drm_i915_private *__p ; { dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_attached_crt(connector); crt = tmp; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { adpa = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 1); adpa = adpa & 4227923967U; adpa = adpa | 15990784U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )crt->adpa_reg, adpa, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )crt->adpa_reg, 0); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_crt_reset", "crt adpa set to 0x%x\n", adpa); } else { } crt->force_hotplug_required = 1; } else { } return; } } static struct drm_connector_funcs const intel_crt_connector_funcs = {& intel_crt_dpms, 0, 0, & intel_crt_reset, & intel_crt_detect, & drm_helper_probe_single_connector_modes, & intel_crt_set_property, & intel_crt_destroy, 0, & drm_atomic_helper_connector_duplicate_state, & drm_atomic_helper_connector_destroy_state, 0, & intel_connector_atomic_get_property}; static struct drm_connector_helper_funcs const intel_crt_connector_helper_funcs = {& intel_crt_get_modes, & intel_crt_mode_valid, & intel_best_encoder}; static struct drm_encoder_funcs const intel_crt_enc_funcs = {0, & intel_encoder_destroy}; static int intel_no_crt_dmi_callback(struct dmi_system_id const *id ) { { printk("\016[drm] Skipping CRT initialization for %s\n", id->ident); return (1); } } static struct dmi_system_id const intel_no_crt[3U] = { {& intel_no_crt_dmi_callback, "ACER ZGB", {{4U, (unsigned char)0, {'A', 'C', 'E', 'R', '\000'}}, {5U, (unsigned char)0, {'Z', 'G', 'B', '\000'}}}, 0}, {& intel_no_crt_dmi_callback, "DELL XPS 8700", {{4U, (unsigned char)0, {'D', 'e', 'l', 'l', ' ', 'I', 'n', 'c', '.', '\000'}}, {5U, (unsigned char)0, {'X', 'P', 'S', ' ', '8', '7', '0', '0', '\000'}}}, 0}}; void intel_crt_init(struct drm_device *dev ) { struct drm_connector *connector ; struct intel_crt *crt ; struct intel_connector *intel_connector ; struct drm_i915_private *dev_priv ; int tmp ; void *tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; u32 fdi_config ; uint32_t tmp___1 ; struct drm_i915_private *__p___8 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = dmi_check_system((struct dmi_system_id const *)(& intel_no_crt)); if (tmp != 0) { return; } else { } tmp___0 = kzalloc(232UL, 208U); crt = (struct intel_crt *)tmp___0; if ((unsigned long )crt == (unsigned long )((struct intel_crt *)0)) { return; } else { } intel_connector = intel_connector_alloc(); if ((unsigned long )intel_connector == (unsigned long )((struct intel_connector *)0)) { kfree((void const *)crt); return; } else { } connector = & intel_connector->base; crt->connector = intel_connector; drm_connector_init(dev, & intel_connector->base, & intel_crt_connector_funcs, 1); drm_encoder_init(dev, & crt->base.base, & intel_crt_enc_funcs, 1); intel_connector_attach_encoder(intel_connector, & crt->base); crt->base.type = 1; crt->base.cloneable = 68U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 13687U) { crt->base.crtc_mask = 1; } else { crt->base.crtc_mask = 7; } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 2U) { connector->interlace_allowed = 0; } else { connector->interlace_allowed = 1; } connector->doublescan_allowed = 0; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___2->pch_type != 0U) { crt->adpa_reg = 921856U; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { crt->adpa_reg = 1970432U; } else { crt->adpa_reg = 397568U; } } crt->base.compute_config = & intel_crt_compute_config; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 46UL) == 0U) { crt->base.disable = & pch_disable_crt; crt->base.post_disable = & pch_post_disable_crt; } else { crt->base.disable = & intel_disable_crt; } } else { crt->base.disable = & intel_disable_crt; } crt->base.enable = & intel_enable_crt; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 46UL) != 0U) { crt->base.hpd_pin = 1; } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 46UL) != 0U) { crt->base.get_config = & hsw_crt_get_config; crt->base.get_hw_state = & intel_ddi_get_hw_state; crt->base.pre_enable = & hsw_crt_pre_enable; crt->base.post_disable = & hsw_crt_post_disable; } else { crt->base.get_config = & intel_crt_get_config; crt->base.get_hw_state = & intel_crt_get_hw_state; } intel_connector->get_hw_state = & intel_connector_get_hw_state; intel_connector->unregister = & intel_connector_unregister; drm_connector_helper_add(connector, & intel_crt_connector_helper_funcs); drm_connector_register(connector); __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 46UL) == 0U) { intel_connector->polled = 2U; } else { } crt->force_hotplug_required = 0; __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___8->pch_type == 3U) { fdi_config = 98304U; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983052L, 1); dev_priv->fdi_rx_config = tmp___1 & fdi_config; } else { } intel_crt_reset(connector); return; } } extern int ldv_probe_57(void) ; extern int ldv_probe_55(void) ; void ldv_initialize_drm_connector_helper_funcs_56(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_crt_connector_helper_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_initialize_drm_connector_funcs_57(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(104UL); intel_crt_connector_funcs_group0 = (struct drm_property *)tmp; tmp___0 = ldv_init_zalloc(936UL); intel_crt_connector_funcs_group1 = (struct drm_connector *)tmp___0; return; } } void ldv_main_exported_57(void) { struct drm_connector_state *ldvarg47 ; void *tmp ; uint32_t ldvarg44 ; int ldvarg48 ; bool ldvarg46 ; struct drm_connector_state *ldvarg42 ; void *tmp___0 ; uint32_t ldvarg43 ; uint64_t *ldvarg41 ; void *tmp___1 ; uint64_t ldvarg45 ; int tmp___2 ; { tmp = ldv_init_zalloc(32UL); ldvarg47 = (struct drm_connector_state *)tmp; tmp___0 = ldv_init_zalloc(32UL); ldvarg42 = (struct drm_connector_state *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg41 = (uint64_t *)tmp___1; ldv_memset((void *)(& ldvarg44), 0, 4UL); ldv_memset((void *)(& ldvarg48), 0, 4UL); ldv_memset((void *)(& ldvarg46), 0, 1UL); ldv_memset((void *)(& ldvarg43), 0, 4UL); ldv_memset((void *)(& ldvarg45), 0, 8UL); tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_57 == 1) { intel_crt_dpms(intel_crt_connector_funcs_group1, ldvarg48); ldv_state_variable_57 = 1; } else { } if (ldv_state_variable_57 == 2) { intel_crt_dpms(intel_crt_connector_funcs_group1, ldvarg48); ldv_state_variable_57 = 2; } else { } goto ldv_48882; case 1: ; if (ldv_state_variable_57 == 1) { drm_atomic_helper_connector_destroy_state(intel_crt_connector_funcs_group1, ldvarg47); ldv_state_variable_57 = 1; } else { } if (ldv_state_variable_57 == 2) { drm_atomic_helper_connector_destroy_state(intel_crt_connector_funcs_group1, ldvarg47); ldv_state_variable_57 = 2; } else { } goto ldv_48882; case 2: ; if (ldv_state_variable_57 == 1) { drm_atomic_helper_connector_duplicate_state(intel_crt_connector_funcs_group1); ldv_state_variable_57 = 1; } else { } if (ldv_state_variable_57 == 2) { drm_atomic_helper_connector_duplicate_state(intel_crt_connector_funcs_group1); ldv_state_variable_57 = 2; } else { } goto ldv_48882; case 3: ; if (ldv_state_variable_57 == 1) { intel_crt_detect(intel_crt_connector_funcs_group1, (int )ldvarg46); ldv_state_variable_57 = 1; } else { } if (ldv_state_variable_57 == 2) { intel_crt_detect(intel_crt_connector_funcs_group1, (int )ldvarg46); ldv_state_variable_57 = 2; } else { } goto ldv_48882; case 4: ; if (ldv_state_variable_57 == 1) { intel_crt_set_property(intel_crt_connector_funcs_group1, intel_crt_connector_funcs_group0, ldvarg45); ldv_state_variable_57 = 1; } else { } if (ldv_state_variable_57 == 2) { intel_crt_set_property(intel_crt_connector_funcs_group1, intel_crt_connector_funcs_group0, ldvarg45); ldv_state_variable_57 = 2; } else { } goto ldv_48882; case 5: ; if (ldv_state_variable_57 == 2) { intel_crt_destroy(intel_crt_connector_funcs_group1); ldv_state_variable_57 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48882; case 6: ; if (ldv_state_variable_57 == 1) { intel_crt_reset(intel_crt_connector_funcs_group1); ldv_state_variable_57 = 1; } else { } if (ldv_state_variable_57 == 2) { intel_crt_reset(intel_crt_connector_funcs_group1); ldv_state_variable_57 = 2; } else { } goto ldv_48882; case 7: ; if (ldv_state_variable_57 == 1) { drm_helper_probe_single_connector_modes(intel_crt_connector_funcs_group1, ldvarg44, ldvarg43); ldv_state_variable_57 = 1; } else { } if (ldv_state_variable_57 == 2) { drm_helper_probe_single_connector_modes(intel_crt_connector_funcs_group1, ldvarg44, ldvarg43); ldv_state_variable_57 = 2; } else { } goto ldv_48882; case 8: ; if (ldv_state_variable_57 == 1) { intel_connector_atomic_get_property(intel_crt_connector_funcs_group1, (struct drm_connector_state const *)ldvarg42, intel_crt_connector_funcs_group0, ldvarg41); ldv_state_variable_57 = 1; } else { } if (ldv_state_variable_57 == 2) { intel_connector_atomic_get_property(intel_crt_connector_funcs_group1, (struct drm_connector_state const *)ldvarg42, intel_crt_connector_funcs_group0, ldvarg41); ldv_state_variable_57 = 2; } else { } goto ldv_48882; case 9: ; if (ldv_state_variable_57 == 1) { ldv_probe_57(); ldv_state_variable_57 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48882; default: ldv_stop(); } ldv_48882: ; return; } } void ldv_main_exported_56(void) { struct drm_display_mode *ldvarg388 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(208UL); ldvarg388 = (struct drm_display_mode *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_56 == 1) { intel_crt_get_modes(intel_crt_connector_helper_funcs_group0); ldv_state_variable_56 = 1; } else { } goto ldv_48898; case 1: ; if (ldv_state_variable_56 == 1) { intel_crt_mode_valid(intel_crt_connector_helper_funcs_group0, ldvarg388); ldv_state_variable_56 = 1; } else { } goto ldv_48898; case 2: ; if (ldv_state_variable_56 == 1) { intel_best_encoder(intel_crt_connector_helper_funcs_group0); ldv_state_variable_56 = 1; } else { } goto ldv_48898; default: ldv_stop(); } ldv_48898: ; return; } } void ldv_main_exported_55(void) { struct drm_encoder *ldvarg25 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(96UL); ldvarg25 = (struct drm_encoder *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_55 == 2) { intel_encoder_destroy(ldvarg25); ldv_state_variable_55 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48907; case 1: ; if (ldv_state_variable_55 == 1) { ldv_probe_55(); ldv_state_variable_55 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48907; default: ldv_stop(); } ldv_48907: ; return; } } bool ldv_queue_work_on_839(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_840(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_841(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_842(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_843(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___20(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static u64 div64_u64(u64 dividend , u64 divisor ) { { return (dividend / divisor); } } bool ldv_queue_work_on_853(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_855(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_854(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_857(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_856(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool drm_can_sleep___11(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39965; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39965; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39965; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39965; default: __bad_percpu_size(); } ldv_39965: pscr_ret__ = pfo_ret__; goto ldv_39971; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39975; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39975; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39975; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39975; default: __bad_percpu_size(); } ldv_39975: pscr_ret__ = pfo_ret_____0; goto ldv_39971; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39984; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39984; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39984; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39984; default: __bad_percpu_size(); } ldv_39984: pscr_ret__ = pfo_ret_____1; goto ldv_39971; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39993; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39993; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39993; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39993; default: __bad_percpu_size(); } ldv_39993: pscr_ret__ = pfo_ret_____2; goto ldv_39971; default: __bad_size_call_parameter(); goto ldv_39971; } ldv_39971: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___20(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } __inline static bool drm_dp_enhanced_frame_cap(u8 const *dpcd ) { { return ((bool )((unsigned int )((unsigned char )*dpcd) > 16U && (int )((signed char )*(dpcd + 2UL)) < 0)); } } void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder ) ; bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector ) ; void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder ) ; void intel_ddi_clock_get(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) ; void bxt_ddi_vswing_sequence(struct drm_device *dev , u32 level , enum port port , int type ) ; bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port , struct intel_connector *intel_connector ) ; void intel_dp_start_link_train(struct intel_dp *intel_dp ) ; void intel_dp_complete_link_train(struct intel_dp *intel_dp ) ; void intel_dp_stop_link_train(struct intel_dp *intel_dp ) ; void intel_dp_sink_dpms(struct intel_dp *intel_dp , int mode ) ; void intel_dp_encoder_destroy(struct drm_encoder *encoder ) ; bool intel_dp_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) ; enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port , bool long_hpd ) ; void intel_edp_backlight_on(struct intel_dp *intel_dp ) ; void intel_edp_backlight_off(struct intel_dp *intel_dp ) ; void intel_edp_panel_vdd_on(struct intel_dp *intel_dp ) ; void intel_edp_panel_on(struct intel_dp *intel_dp ) ; void intel_edp_panel_off(struct intel_dp *intel_dp ) ; void intel_dp_hot_plug(struct intel_encoder *intel_encoder ) ; void intel_edp_drrs_enable(struct intel_dp *intel_dp ) ; void intel_edp_drrs_disable(struct intel_dp *intel_dp ) ; void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port , struct intel_connector *intel_connector ) ; bool intel_hdmi_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) ; static struct ddi_buf_trans const hsw_ddi_translations_dp[9U] = { {16777215U, 393230U}, {14114815U, 327690U}, {12783615U, 262150U}, {2158669823U, 720896U}, {16777215U, 327690U}, {14114815U, 786436U}, {2160267263U, 720896U}, {16777215U, 262150U}, {2161598463U, 720896U}}; static struct ddi_buf_trans const hsw_ddi_translations_fdi[9U] = { {16777215U, 458766U}, {14114815U, 983050U}, {12783615U, 393222U}, {11186175U, 1966080U}, {16777215U, 983050U}, {14114815U, 1441796U}, {12783615U, 1966080U}, {16777215U, 393222U}, {14114815U, 1966080U}}; static struct ddi_buf_trans const hsw_ddi_translations_hdmi[12U] = { {16777215U, 393230U}, {15179775U, 917516U}, {14114815U, 327690U}, {16777215U, 327690U}, {15179775U, 1900551U}, {14114815U, 786436U}, {16777215U, 262150U}, {2162663423U, 196610U}, {16777215U, 1310725U}, {16777215U, 786436U}, {16777215U, 1835011U}, {2164260863U, 196610U}}; static struct ddi_buf_trans const bdw_ddi_translations_edp[9U] = { {16777215U, 18U}, {15446015U, 131089U}, {13049855U, 393231U}, {11186175U, 917514U}, {16777215U, 131089U}, {14381055U, 327695U}, {12513279U, 655372U}, {16777215U, 327695U}, {14381055U, 655372U}}; static struct ddi_buf_trans const bdw_ddi_translations_dp[9U] = { {16777215U, 458766U}, {14114815U, 917514U}, {12517375U, 1310726U}, {2159202303U, 1769474U}, {16777215U, 917514U}, {14381055U, 1441797U}, {2160533503U, 1703938U}, {16244735U, 1572868U}, {2161598463U, 1769474U}}; static struct ddi_buf_trans const bdw_ddi_translations_fdi[9U] = { {16777215U, 65550U}, {14114815U, 262154U}, {12783615U, 458758U}, {11186175U, 786432U}, {16777215U, 262154U}, {14114815U, 589828U}, {12783615U, 786432U}, {16777215U, 458758U}, {14114815U, 786432U}}; static struct ddi_buf_trans const bdw_ddi_translations_hdmi[10U] = { {16777215U, 458766U}, {14114815U, 917514U}, {12517375U, 1310726U}, {16777215U, 589837U}, {16777215U, 917514U}, {14155775U, 1310726U}, {2160799743U, 1769474U}, {16777215U, 1310726U}, {2162663423U, 1769474U}, {2164260863U, 1769474U}}; static struct ddi_buf_trans const skl_ddi_translations_dp[9U] = { {24U, 162U}, {16404U, 155U}, {24594U, 136U}, {32784U, 135U}, {24U, 155U}, {16404U, 136U}, {24594U, 135U}, {24U, 136U}, {16404U, 135U}}; static struct ddi_buf_trans const skl_ddi_translations_edp[10U] = { {24U, 168U}, {8214U, 171U}, {24594U, 162U}, {32784U, 136U}, {24U, 171U}, {16404U, 162U}, {24594U, 166U}, {24U, 162U}, {20499U, 156U}, {24U, 136U}}; static struct ddi_buf_trans const skl_ddi_translations_hdmi[11U] = { {24U, 172U}, {20498U, 157U}, {28689U, 136U}, {24U, 161U}, {24U, 152U}, {16403U, 136U}, {24594U, 135U}, {24U, 223U}, {12309U, 135U}, {12309U, 199U}, {24U, 199U}}; static struct bxt_ddi_buf_trans const bxt_ddi_translations_dp[10U] = { {52U, 0U, 0U, 128U, 1}, {78U, 0U, 0U, 85U, 0}, {104U, 0U, 0U, 64U, 0}, {154U, 0U, 0U, 43U, 0}, {77U, 0U, 0U, 128U, 0}, {116U, 0U, 0U, 85U, 0}, {154U, 0U, 0U, 64U, 0}, {102U, 0U, 0U, 128U, 0}, {154U, 0U, 0U, 85U, 0}, {154U, 154U, 1U, 128U, 0}}; static struct bxt_ddi_buf_trans const bxt_ddi_translations_hdmi[10U] = { {52U, 0U, 0U, 128U, 0}, {52U, 0U, 0U, 85U, 0}, {52U, 0U, 0U, 64U, 0}, {42U, 0U, 0U, 43U, 0}, {77U, 0U, 0U, 128U, 0}, {77U, 0U, 0U, 85U, 0}, {77U, 0U, 0U, 64U, 0}, {102U, 0U, 0U, 128U, 0}, {102U, 0U, 0U, 85U, 0}, {154U, 154U, 1U, 128U, 1}}; static void ddi_get_encoder_port(struct intel_encoder *intel_encoder , struct intel_digital_port **dig_port , enum port *port ) { struct drm_encoder *encoder ; int type ; struct intel_dp_mst_encoder *tmp ; { encoder = & intel_encoder->base; type = (int )intel_encoder->type; if (type == 11) { tmp = enc_to_mst(encoder); *dig_port = tmp->primary; *port = (*dig_port)->port; } else if (((type == 7 || type == 8) || type == 6) || type == 10) { *dig_port = enc_to_dig_port(encoder); *port = (*dig_port)->port; } else if (type == 1) { *dig_port = (struct intel_digital_port *)0; *port = 4; } else { drm_err("Invalid DDI encoder type %d\n", type); __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c"), "i" (232), "i" (12UL)); ldv_48005: ; goto ldv_48005; } return; } } enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder ) { struct intel_digital_port *dig_port ; enum port port ; { ddi_get_encoder_port(intel_encoder, & dig_port, & port); return (port); } } static bool intel_dig_port_supports_hdmi(struct intel_digital_port const *intel_dig_port ) { { return ((unsigned int )intel_dig_port->hdmi.hdmi_reg != 0U); } } static void intel_prepare_ddi_buffers(struct drm_device *dev , enum port port , bool supports_hdmi ) { struct drm_i915_private *dev_priv ; u32 reg ; int i ; int n_hdmi_entries ; int n_dp_entries ; int n_edp_entries ; int hdmi_default_entry ; int size ; int hdmi_level ; struct ddi_buf_trans const *ddi_translations_fdi ; struct ddi_buf_trans const *ddi_translations_dp ; struct ddi_buf_trans const *ddi_translations_edp ; struct ddi_buf_trans const *ddi_translations_hdmi ; struct ddi_buf_trans const *ddi_translations ; int __ret_warn_on ; long tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; bool tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; hdmi_level = (int )dev_priv->vbt.ddi_port_info[(unsigned int )port].hdmi_level_shift; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 9U) { if (! supports_hdmi) { return; } else { } bxt_ddi_vswing_sequence(dev, (u32 )hdmi_level, port, 6); return; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { ddi_translations_fdi = (struct ddi_buf_trans const *)0; ddi_translations_dp = (struct ddi_buf_trans const *)(& skl_ddi_translations_dp); n_dp_entries = 9; if ((int )dev_priv->edp_low_vswing) { ddi_translations_edp = (struct ddi_buf_trans const *)(& skl_ddi_translations_edp); n_edp_entries = 10; } else { ddi_translations_edp = (struct ddi_buf_trans const *)(& skl_ddi_translations_dp); n_edp_entries = 9; } ddi_translations_hdmi = (struct ddi_buf_trans const *)(& skl_ddi_translations_hdmi); n_hdmi_entries = 11; hdmi_default_entry = 7; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { ddi_translations_fdi = (struct ddi_buf_trans const *)(& bdw_ddi_translations_fdi); ddi_translations_dp = (struct ddi_buf_trans const *)(& bdw_ddi_translations_dp); ddi_translations_edp = (struct ddi_buf_trans const *)(& bdw_ddi_translations_edp); ddi_translations_hdmi = (struct ddi_buf_trans const *)(& bdw_ddi_translations_hdmi); n_edp_entries = 9; n_dp_entries = 9; n_hdmi_entries = 10; hdmi_default_entry = 7; } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { ddi_translations_fdi = (struct ddi_buf_trans const *)(& hsw_ddi_translations_fdi); ddi_translations_dp = (struct ddi_buf_trans const *)(& hsw_ddi_translations_dp); ddi_translations_edp = (struct ddi_buf_trans const *)(& hsw_ddi_translations_dp); ddi_translations_hdmi = (struct ddi_buf_trans const *)(& hsw_ddi_translations_hdmi); n_edp_entries = 9; n_dp_entries = n_edp_entries; n_hdmi_entries = 12; hdmi_default_entry = 6; } else { __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 314, "ddi translation table missing\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ddi_translations_edp = (struct ddi_buf_trans const *)(& bdw_ddi_translations_dp); ddi_translations_fdi = (struct ddi_buf_trans const *)(& bdw_ddi_translations_fdi); ddi_translations_dp = (struct ddi_buf_trans const *)(& bdw_ddi_translations_dp); ddi_translations_hdmi = (struct ddi_buf_trans const *)(& bdw_ddi_translations_hdmi); n_edp_entries = 9; n_dp_entries = 9; n_hdmi_entries = 10; hdmi_default_entry = 7; } } } } switch ((unsigned int )port) { case 0U: ddi_translations = ddi_translations_edp; size = n_edp_entries; goto ldv_48096; case 1U: ; case 2U: ddi_translations = ddi_translations_dp; size = n_dp_entries; goto ldv_48096; case 3U: tmp___0 = intel_dp_is_edp(dev, 3); if ((int )tmp___0) { ddi_translations = ddi_translations_edp; size = n_edp_entries; } else { ddi_translations = ddi_translations_dp; size = n_dp_entries; } goto ldv_48096; case 4U: ; if ((unsigned long )ddi_translations_fdi != (unsigned long )((struct ddi_buf_trans const *)0)) { ddi_translations = ddi_translations_fdi; } else { ddi_translations = ddi_translations_dp; } size = n_dp_entries; goto ldv_48096; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c"), "i" (352), "i" (12UL)); ldv_48102: ; goto ldv_48102; } ldv_48096: i = 0; reg = (unsigned int )port * 96U + 413184U; goto ldv_48104; ldv_48103: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, (ddi_translations + (unsigned long )i)->trans1, 1); reg = reg + 4U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, (ddi_translations + (unsigned long )i)->trans2, 1); reg = reg + 4U; i = i + 1; ldv_48104: ; if (i < size) { goto ldv_48103; } else { } if (! supports_hdmi) { return; } else { } if (hdmi_level == 255 || hdmi_level >= n_hdmi_entries) { hdmi_level = hdmi_default_entry; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, (ddi_translations_hdmi + (unsigned long )hdmi_level)->trans1, 1); reg = reg + 4U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, (ddi_translations_hdmi + (unsigned long )hdmi_level)->trans2, 1); reg = reg + 4U; return; } } void intel_prepare_ddi(struct drm_device *dev ) { struct intel_encoder *intel_encoder ; bool visited[5U] ; unsigned int tmp ; struct drm_i915_private *__p ; struct list_head const *__mptr ; struct intel_digital_port *intel_dig_port ; enum port port ; bool supports_hdmi ; bool tmp___0 ; int tmp___1 ; struct list_head const *__mptr___0 ; { visited[0] = 0; tmp = 1U; while (1) { if (tmp >= 5U) { break; } else { } visited[tmp] = (_Bool)0; tmp = tmp + 1U; } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { return; } else { } __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_48126; ldv_48125: ddi_get_encoder_port(intel_encoder, & intel_dig_port, & port); if ((int )visited[(unsigned int )port]) { goto ldv_48124; } else { } if ((unsigned long )intel_dig_port != (unsigned long )((struct intel_digital_port *)0)) { tmp___0 = intel_dig_port_supports_hdmi((struct intel_digital_port const *)intel_dig_port); if ((int )tmp___0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } supports_hdmi = (bool )tmp___1; intel_prepare_ddi_buffers(dev, port, (int )supports_hdmi); visited[(unsigned int )port] = 1; ldv_48124: __mptr___0 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_48126: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_48125; } else { } return; } } static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv , enum port port ) { uint32_t reg ; int i ; uint32_t tmp ; { reg = ((unsigned int )port + 1600U) * 256U; i = 0; goto ldv_48135; ldv_48134: __const_udelay(4295UL); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((tmp & 128U) != 0U) { return; } else { } i = i + 1; ldv_48135: ; if (i <= 15) { goto ldv_48134; } else { } drm_err("Timeout waiting for DDI BUF %c idle bit\n", (unsigned int )port + 65U); return; } } void hsw_fdi_link_train(struct drm_crtc *crtc ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; u32 temp ; u32 i ; u32 rx_ctl_val ; int __ret_warn_on ; long tmp ; long tmp___0 ; { dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983056L, 169869456U, 1); rx_ctl_val = (dev_priv->fdi_rx_config | (u32 )(((intel_crtc->config)->fdi_lanes + -1) << 19)) | 8256U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983052L, rx_ctl_val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983052L, 0); __const_udelay(944900UL); rx_ctl_val = rx_ctl_val | 16U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983052L, rx_ctl_val, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 286992L, (intel_crtc->config)->ddi_pll_sel, 1); __ret_warn_on = (intel_crtc->config)->ddi_pll_sel != 1610612736U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 461, "WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); i = 0U; goto ldv_48154; ldv_48153: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 410688L, 2147778560U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 410624L, ((u32 )(((intel_crtc->config)->fdi_lanes + -1) << 1) | (i / 2U << 24)) | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 410624L, 0); __const_udelay(2577000UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983088L, 2113929216U, 1); rx_ctl_val = rx_ctl_val | 2147484672U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983052L, rx_ctl_val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983052L, 0); __const_udelay(128850UL); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983056L, 1); temp = temp & 4043309055U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983056L, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983056L, 0); __const_udelay(21475UL); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 410692L, 1); if ((temp & 4096U) != 0U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("hsw_fdi_link_train", "FDI link training done on step %d\n", i); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 410688L, 2147779328U, 1); return; } else { } temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 410624L, 1); temp = temp & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 410624L, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 410624L, 0); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 410688L, 1); temp = temp & 2147481855U; temp = temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 410688L, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 410688L, 0); intel_wait_ddi_buf_idle(dev_priv, 4); rx_ctl_val = rx_ctl_val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983052L, rx_ctl_val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983052L, 0); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983056L, 1); temp = temp & 4043309055U; temp = temp | 167772160U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983056L, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983056L, 0); i = i + 1U; ldv_48154: ; if (i <= 17U) { goto ldv_48153; } else { } drm_err("FDI link training failed!\n"); return; } } void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___0 ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; tmp___0 = enc_to_dig_port(& encoder->base); intel_dig_port = tmp___0; intel_dp->DP = intel_dig_port->saved_port_bits | 2147483648U; intel_dp->DP = intel_dp->DP | (uint32_t )(((int )intel_dp->lane_count + -1) << 1); return; } } static struct intel_encoder *intel_ddi_get_crtc_encoder(struct drm_crtc *crtc ) { struct drm_device *dev ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *intel_encoder ; struct intel_encoder *ret ; int num_encoders ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int __ret_warn_on ; long tmp ; long tmp___0 ; { dev = crtc->dev; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; ret = (struct intel_encoder *)0; num_encoders = 0; __mptr___0 = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_48176; ldv_48175: ; if ((unsigned long )intel_encoder->base.crtc == (unsigned long )crtc) { ret = intel_encoder; num_encoders = num_encoders + 1; } else { } __mptr___1 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___1 + 0xfffffffffffffff8UL; ldv_48176: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_48175; } else { } if (num_encoders != 1) { __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 575, "%d encoders on crtc for pipe %c\n", num_encoders, (int )intel_crtc->pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } tmp___0 = ldv__builtin_expect((unsigned long )ret == (unsigned long )((struct intel_encoder *)0), 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c"), "i" (577), "i" (12UL)); ldv_48180: ; goto ldv_48180; } else { } return (ret); } } struct intel_encoder *intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state ) { struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *ret ; struct drm_atomic_state *state ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; int num_encoders ; int i ; struct drm_encoder const *__mptr___0 ; int __ret_warn_on ; long tmp ; long tmp___0 ; { __mptr = (struct drm_crtc const *)crtc_state->base.crtc; crtc = (struct intel_crtc *)__mptr; ret = (struct intel_encoder *)0; num_encoders = 0; state = crtc_state->base.state; i = 0; goto ldv_48197; ldv_48196: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )crtc_state->base.crtc) { goto ldv_48193; } else { } __mptr___0 = (struct drm_encoder const *)connector_state->best_encoder; ret = (struct intel_encoder *)__mptr___0; num_encoders = num_encoders + 1; } else { } ldv_48193: i = i + 1; ldv_48197: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_48196; } else { } __ret_warn_on = num_encoders != 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 603, "%d encoders on crtc for pipe %c\n", num_encoders, (int )crtc->pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = ldv__builtin_expect((unsigned long )ret == (unsigned long )((struct intel_encoder *)0), 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c"), "i" (605), "i" (12UL)); ldv_48201: ; goto ldv_48201; } else { } return (ret); } } static unsigned int wrpll_get_budget_for_freq(int clock ) { unsigned int budget ; { switch (clock) { case 25175000: ; case 25200000: ; case 27000000: ; case 27027000: ; case 37762500: ; case 37800000: ; case 40500000: ; case 40541000: ; case 54000000: ; case 54054000: ; case 59341000: ; case 59400000: ; case 72000000: ; case 74176000: ; case 74250000: ; case 81000000: ; case 81081000: ; case 89012000: ; case 89100000: ; case 108000000: ; case 108108000: ; case 111264000: ; case 111375000: ; case 148352000: ; case 148500000: ; case 162000000: ; case 162162000: ; case 222525000: ; case 222750000: ; case 296703000: ; case 297000000: budget = 0U; goto ldv_48241; case 233500000: ; case 245250000: ; case 247750000: ; case 253250000: ; case 298000000: budget = 1500U; goto ldv_48241; case 169128000: ; case 169500000: ; case 179500000: ; case 202000000: budget = 2000U; goto ldv_48241; case 256250000: ; case 262500000: ; case 270000000: ; case 272500000: ; case 273750000: ; case 280750000: ; case 281250000: ; case 286000000: ; case 291750000: budget = 4000U; goto ldv_48241; case 267250000: ; case 268500000: budget = 5000U; goto ldv_48241; default: budget = 1000U; goto ldv_48241; } ldv_48241: ; return (budget); } } static void wrpll_update_rnp(uint64_t freq2k , unsigned int budget , unsigned int r2 , unsigned int n2 , unsigned int p , struct wrpll_rnp *best ) { uint64_t a ; uint64_t b ; uint64_t c ; uint64_t d ; uint64_t diff ; uint64_t diff_best ; uint64_t __a ; unsigned long long __b ; uint64_t __a___0 ; unsigned long long __b___0 ; { if (best->p == 0U) { best->p = p; best->n2 = n2; best->r2 = r2; return; } else { } a = (((uint64_t )budget * freq2k) * (uint64_t )p) * (uint64_t )r2; b = (((uint64_t )budget * freq2k) * (uint64_t )best->p) * (uint64_t )best->r2; __a = ((uint64_t )p * freq2k) * (uint64_t )r2; __b = (unsigned long long )n2 * 5400000ULL; diff = __a > __b ? __a - __b : __b - __a; __a___0 = ((uint64_t )best->p * freq2k) * (uint64_t )best->r2; __b___0 = (unsigned long long )best->n2 * 5400000ULL; diff_best = __a___0 > __b___0 ? __a___0 - __b___0 : __b___0 - __a___0; c = diff * 1000000ULL; d = diff_best * 1000000ULL; if (a < c && b < d) { if ((uint64_t )(best->p * best->r2) * diff < (uint64_t )(p * r2) * diff_best) { best->p = p; best->n2 = n2; best->r2 = r2; } else { } } else if (a >= c && b < d) { best->p = p; best->n2 = n2; best->r2 = r2; } else if (a >= c && b >= d) { if ((best->r2 * n2) * best->r2 > (best->n2 * r2) * r2) { best->p = p; best->n2 = n2; best->r2 = r2; } else { } } else { } return; } } static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv , int reg ) { int refclk ; int n ; int p ; int r ; u32 wrpll ; int __ret_warn_on ; long tmp ; { refclk = 2700; wrpll = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); switch (wrpll & 805306368U) { case 268435456U: ; case 536870912U: refclk = 135; goto ldv_48294; case 805306368U: refclk = 2700; goto ldv_48294; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 787, "bad wrpll refclk\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (0); } ldv_48294: r = (int )wrpll & 255; p = (int )((wrpll & 16128U) >> 8); n = (int )((wrpll & 16711680U) >> 16); return (((refclk * n) * 100) / (p * r)); } } static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv , uint32_t dpll ) { uint32_t cfgcr1_reg ; uint32_t cfgcr2_reg ; uint32_t cfgcr1_val ; uint32_t cfgcr2_val ; uint32_t p0 ; uint32_t p1 ; uint32_t p2 ; uint32_t dco_freq ; { cfgcr1_reg = (dpll + 55303U) * 8U; cfgcr2_reg = (dpll + 536870911U) * 8U + 442436U; cfgcr1_val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )cfgcr1_reg, 1); cfgcr2_val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )cfgcr2_reg, 1); p0 = cfgcr2_val & 28U; p2 = cfgcr2_val & 96U; if ((cfgcr2_val & 128U) != 0U) { p1 = (cfgcr2_val & 65280U) >> 8; } else { p1 = 1U; } switch (p0) { case 0U: p0 = 1U; goto ldv_48312; case 4U: p0 = 2U; goto ldv_48312; case 8U: p0 = 3U; goto ldv_48312; case 16U: p0 = 7U; goto ldv_48312; } ldv_48312: ; switch (p2) { case 0U: p2 = 5U; goto ldv_48317; case 32U: p2 = 2U; goto ldv_48317; case 64U: p2 = 3U; goto ldv_48317; case 96U: p2 = 1U; goto ldv_48317; } ldv_48317: dco_freq = (cfgcr1_val & 511U) * 24000U; dco_freq = (((cfgcr1_val & 16776704U) >> 9) * 24000U) / 32768U + dco_freq; return ((int )(dco_freq / (((p0 * p1) * p2) * 5U))); } } static void skl_ddi_clock_get(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_i915_private *dev_priv ; int link_clock ; uint32_t dpll_ctl1 ; uint32_t dpll ; int __ret_warn_on ; long tmp ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; link_clock = 0; dpll = pipe_config->ddi_pll_sel; dpll_ctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 1); if (((uint32_t )(1 << (int )(dpll * 6U + 5U)) & dpll_ctl1) != 0U) { link_clock = skl_calc_wrpll_link(dev_priv, dpll); } else { link_clock = (int )((uint32_t )(7 << (int )(dpll * 6U + 1U)) & dpll_ctl1); link_clock = link_clock >> (int )(dpll * 6U + 1U); switch (link_clock) { case 2: link_clock = 81000; goto ldv_48330; case 4: link_clock = 108000; goto ldv_48330; case 1: link_clock = 135000; goto ldv_48330; case 3: link_clock = 162000; goto ldv_48330; case 5: link_clock = 216000; goto ldv_48330; case 0: link_clock = 270000; goto ldv_48330; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 897, "Unsupported link rate\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); goto ldv_48330; } ldv_48330: link_clock = link_clock * 2; } pipe_config->port_clock = link_clock; if ((int )pipe_config->has_dp_encoder) { pipe_config->base.adjusted_mode.crtc_clock = intel_dotclock_calculate(pipe_config->port_clock, (struct intel_link_m_n const *)(& pipe_config->dp_m_n)); } else { pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; } return; } } static void hsw_ddi_clock_get(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_i915_private *dev_priv ; int link_clock ; u32 val ; u32 pll ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; link_clock = 0; val = pipe_config->ddi_pll_sel; switch (val & 3758096384U) { case 1073741824U: link_clock = 81000; goto ldv_48348; case 536870912U: link_clock = 135000; goto ldv_48348; case 0U: link_clock = 270000; goto ldv_48348; case 2147483648U: link_clock = intel_ddi_calc_wrpll_link(dev_priv, 286784); goto ldv_48348; case 2684354560U: link_clock = intel_ddi_calc_wrpll_link(dev_priv, 286816); goto ldv_48348; case 1610612736U: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286752L, 1); pll = tmp & 201326592U; if (pll == 0U) { link_clock = 81000; } else if (pll == 67108864U) { link_clock = 135000; } else if (pll == 134217728U) { link_clock = 270000; } else { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 946, "bad spll freq\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } goto ldv_48348; default: __ret_warn_on___0 = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 951, "bad port clock sel\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); return; } ldv_48348: pipe_config->port_clock = link_clock * 2; if ((int )pipe_config->has_pch_encoder) { pipe_config->base.adjusted_mode.crtc_clock = intel_dotclock_calculate(pipe_config->port_clock, (struct intel_link_m_n const *)(& pipe_config->fdi_m_n)); } else if ((int )pipe_config->has_dp_encoder) { pipe_config->base.adjusted_mode.crtc_clock = intel_dotclock_calculate(pipe_config->port_clock, (struct intel_link_m_n const *)(& pipe_config->dp_m_n)); } else { pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; } return; } } static int bxt_calc_pll_link(struct drm_i915_private *dev_priv , enum intel_dpll_id dpll ) { { return (0); } } static void bxt_ddi_clock_get(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_i915_private *dev_priv ; enum port port ; enum port tmp ; uint32_t dpll ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = intel_ddi_get_encoder_port(encoder); port = tmp; dpll = port; pipe_config->port_clock = bxt_calc_pll_link(dev_priv, (enum intel_dpll_id )dpll); if ((int )pipe_config->has_dp_encoder) { pipe_config->base.adjusted_mode.crtc_clock = intel_dotclock_calculate(pipe_config->port_clock, (struct intel_link_m_n const *)(& pipe_config->dp_m_n)); } else { pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; } return; } } void intel_ddi_clock_get(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = encoder->base.dev; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 8U) { hsw_ddi_clock_get(encoder, pipe_config); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { skl_ddi_clock_get(encoder, pipe_config); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { bxt_ddi_clock_get(encoder, pipe_config); } else { } } else { } } } return; } } static void hsw_ddi_calculate_wrpll(int clock , unsigned int *r2_out , unsigned int *n2_out , unsigned int *p_out ) { uint64_t freq2k ; unsigned int p ; unsigned int n2 ; unsigned int r2 ; struct wrpll_rnp best ; unsigned int budget ; { best.p = 0U; best.n2 = 0U; best.r2 = 0U; freq2k = (uint64_t )(clock / 100); budget = wrpll_get_budget_for_freq(clock); if (freq2k == 5400000ULL) { *n2_out = 2U; *p_out = 1U; *r2_out = 2U; return; } else { } r2 = 14U; goto ldv_48418; ldv_48417: n2 = (r2 * 2400U) / 2700U + 1U; goto ldv_48415; ldv_48414: p = 2U; goto ldv_48412; ldv_48411: wrpll_update_rnp(freq2k, budget, r2, n2, p, & best); p = p + 2U; ldv_48412: ; if (p <= 64U) { goto ldv_48411; } else { } n2 = n2 + 1U; ldv_48415: ; if ((r2 * 4800U) / 2700U >= n2) { goto ldv_48414; } else { } r2 = r2 + 1U; ldv_48418: ; if (r2 <= 112U) { goto ldv_48417; } else { } *n2_out = best.n2; *p_out = best.p; *r2_out = best.r2; return; } } static bool hsw_ddi_pll_select(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state , struct intel_encoder *intel_encoder , int clock ) { struct intel_shared_dpll *pll ; uint32_t val ; unsigned int p ; unsigned int n2 ; unsigned int r2 ; long tmp ; { if ((unsigned int )intel_encoder->type == 6U) { hsw_ddi_calculate_wrpll(clock * 1000, & r2, & n2, & p); val = (((n2 << 16) | r2) | (p << 8)) | 2952790016U; memset((void *)(& crtc_state->dpll_hw_state), 0, 68UL); crtc_state->dpll_hw_state.wrpll = val; pll = intel_get_shared_dpll(intel_crtc, crtc_state); if ((unsigned long )pll == (unsigned long )((struct intel_shared_dpll *)0)) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("hsw_ddi_pll_select", "failed to find PLL for pipe %c\n", (int )intel_crtc->pipe + 65); } else { } return (0); } else { } crtc_state->ddi_pll_sel = (uint32_t )(((int )pll->id + 4) << 29); } else { } return (1); } } static void skl_ddi_calculate_wrpll(int clock , struct skl_wrpll_params *wrpll_params ) { uint64_t afe_clock ; uint64_t dco_central_freq[3U] ; uint32_t min_dco_deviation ; uint32_t min_dco_index ; uint32_t P0[4U] ; uint32_t P2[4U] ; bool found ; uint32_t candidate_p ; uint32_t candidate_p0[3U] ; unsigned int tmp ; uint32_t candidate_p1[3U] ; unsigned int tmp___0 ; uint32_t candidate_p2[3U] ; unsigned int tmp___1 ; uint32_t dco_central_freq_deviation[3U] ; uint32_t i ; uint32_t P1 ; uint32_t k ; uint32_t dco_count ; bool retry_with_odd ; uint64_t dco_freq ; u64 tmp___2 ; uint64_t __a ; uint64_t __b ; u64 tmp___3 ; int __ret_warn_on ; long tmp___4 ; int __ret_warn_on___0 ; long tmp___5 ; int __ret_warn_on___1 ; long tmp___6 ; u64 tmp___7 ; u64 tmp___8 ; u64 tmp___9 ; { afe_clock = (uint64_t )(clock * 5); dco_central_freq[0] = 8400000000ULL; dco_central_freq[1] = 9000000000ULL; dco_central_freq[2] = 9600000000ULL; min_dco_deviation = 400U; min_dco_index = 3U; P0[0] = 1U; P0[1] = 2U; P0[2] = 3U; P0[3] = 7U; P2[0] = 1U; P2[1] = 2U; P2[2] = 3U; P2[3] = 5U; found = 0; candidate_p = 0U; candidate_p0[0] = 0U; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } candidate_p0[tmp] = 0U; tmp = tmp + 1U; } candidate_p1[0] = 0U; tmp___0 = 1U; while (1) { if (tmp___0 >= 3U) { break; } else { } candidate_p1[tmp___0] = 0U; tmp___0 = tmp___0 + 1U; } candidate_p2[0] = 0U; tmp___1 = 1U; while (1) { if (tmp___1 >= 3U) { break; } else { } candidate_p2[tmp___1] = 0U; tmp___1 = tmp___1 + 1U; } retry_with_odd = 0; dco_count = 0U; goto ldv_48478; ldv_48477: found = 0; tmp___2 = div64_u64(dco_central_freq[dco_count], afe_clock); candidate_p = (uint32_t )tmp___2; if (! retry_with_odd) { candidate_p = (candidate_p & 1U) != 0U ? candidate_p + 1U : candidate_p; } else { } P1 = 1U; goto ldv_48472; ldv_48471: i = 0U; goto ldv_48469; ldv_48468: ; if (P0[i] == 1U && P1 != 1U) { goto ldv_48462; } else { } k = 0U; goto ldv_48466; ldv_48465: ; if (P1 != 1U && P2[k] != 2U) { goto ldv_48463; } else { } if ((P0[i] * P1) * P2[k] == candidate_p) { found = 1; candidate_p0[dco_count] = P0[i]; candidate_p1[dco_count] = P1; candidate_p2[dco_count] = P2[k]; goto found; } else { } ldv_48463: k = k + 1U; ldv_48466: ; if (k <= 3U) { goto ldv_48465; } else { } ldv_48462: i = i + 1U; ldv_48469: ; if (i <= 3U) { goto ldv_48468; } else { } P1 = P1 + 1U; ldv_48472: ; if (P1 < candidate_p) { goto ldv_48471; } else { } found: ; if ((int )found) { __a = (uint64_t )candidate_p * afe_clock; __b = dco_central_freq[dco_count]; tmp___3 = div64_u64((__a > __b ? __a - __b : __b - __a) * 10000ULL, dco_central_freq[dco_count]); dco_central_freq_deviation[dco_count] = (uint32_t )tmp___3; if (dco_central_freq_deviation[dco_count] < min_dco_deviation) { min_dco_deviation = dco_central_freq_deviation[dco_count]; min_dco_index = dco_count; } else { } } else { } if (min_dco_index > 2U && dco_count == 2U) { retry_with_odd = 1; dco_count = 0U; } else { } dco_count = dco_count + 1U; ldv_48478: ; if (dco_count <= 2U) { goto ldv_48477; } else { } if (min_dco_index > 2U) { __ret_warn_on = 1; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 1193, "No valid values found for the given pixel clock\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { wrpll_params->central_freq = (uint32_t )dco_central_freq[min_dco_index]; switch (dco_central_freq[min_dco_index]) { case 1010065408ULL: wrpll_params->central_freq = 0U; goto ldv_48483; case 410065408ULL: wrpll_params->central_freq = 1U; goto ldv_48483; case 0xfffffffff4add400ULL: wrpll_params->central_freq = 3U; } ldv_48483: ; switch (candidate_p0[min_dco_index]) { case 1U: wrpll_params->pdiv = 0U; goto ldv_48487; case 2U: wrpll_params->pdiv = 1U; goto ldv_48487; case 3U: wrpll_params->pdiv = 2U; goto ldv_48487; case 7U: wrpll_params->pdiv = 4U; goto ldv_48487; default: __ret_warn_on___0 = 1; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 1222, "Incorrect PDiv\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } ldv_48487: ; switch (candidate_p2[min_dco_index]) { case 5U: wrpll_params->kdiv = 0U; goto ldv_48495; case 2U: wrpll_params->kdiv = 1U; goto ldv_48495; case 3U: wrpll_params->kdiv = 2U; goto ldv_48495; case 1U: wrpll_params->kdiv = 3U; goto ldv_48495; default: __ret_warn_on___1 = 1; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 1239, "Incorrect KDiv\n"); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); } ldv_48495: wrpll_params->qdiv_ratio = candidate_p1[min_dco_index]; wrpll_params->qdiv_mode = wrpll_params->qdiv_ratio != 1U; dco_freq = (uint64_t )((candidate_p0[min_dco_index] * candidate_p1[min_dco_index]) * candidate_p2[min_dco_index]) * afe_clock; tmp___7 = div_u64(dco_freq, 24000000U); wrpll_params->dco_integer = (uint32_t )tmp___7; tmp___8 = div_u64(dco_freq, 24U); tmp___9 = div_u64((tmp___8 - (u64 )(wrpll_params->dco_integer * 1000000U)) * 32768ULL, 1000000U); wrpll_params->dco_fraction = (uint32_t )tmp___9; } return; } } static bool skl_ddi_pll_select(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state , struct intel_encoder *intel_encoder , int clock ) { struct intel_shared_dpll *pll ; uint32_t ctrl1 ; uint32_t cfgcr1 ; uint32_t cfgcr2 ; struct skl_wrpll_params wrpll_params ; struct drm_encoder *encoder ; struct intel_dp *intel_dp ; struct intel_dp *tmp ; long tmp___0 ; { ctrl1 = 1U; if ((unsigned int )intel_encoder->type == 6U) { wrpll_params.dco_fraction = 0U; wrpll_params.dco_integer = 0U; wrpll_params.qdiv_ratio = 0U; wrpll_params.qdiv_mode = 0U; wrpll_params.kdiv = 0U; wrpll_params.pdiv = 0U; wrpll_params.central_freq = 0U; ctrl1 = ctrl1 | 32U; skl_ddi_calculate_wrpll(clock * 1000, & wrpll_params); cfgcr1 = ((wrpll_params.dco_fraction << 9) | wrpll_params.dco_integer) | 2147483648U; cfgcr2 = ((((wrpll_params.qdiv_ratio << 8) | (wrpll_params.qdiv_mode << 7)) | (wrpll_params.kdiv << 5)) | (wrpll_params.pdiv << 2)) | wrpll_params.central_freq; } else if ((unsigned int )intel_encoder->type == 7U) { encoder = & intel_encoder->base; tmp = enc_to_intel_dp(encoder); intel_dp = tmp; switch ((int )intel_dp->link_bw) { case 6: ctrl1 = ctrl1 | 4U; goto ldv_48516; case 10: ctrl1 = ctrl1 | 2U; goto ldv_48516; case 20: ctrl1 = ctrl1; goto ldv_48516; } ldv_48516: cfgcr2 = 0U; cfgcr1 = cfgcr2; } else { return (1); } memset((void *)(& crtc_state->dpll_hw_state), 0, 68UL); crtc_state->dpll_hw_state.ctrl1 = ctrl1; crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; pll = intel_get_shared_dpll(intel_crtc, crtc_state); if ((unsigned long )pll == (unsigned long )((struct intel_shared_dpll *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("skl_ddi_pll_select", "failed to find PLL for pipe %c\n", (int )intel_crtc->pipe + 65); } else { } return (0); } else { } crtc_state->ddi_pll_sel = (uint32_t )((int )pll->id + 1); return (1); } } static struct bxt_clk_div bxt_dp_clk_val[7U] = { {4U, 2U, 32U, 1677722U, 1, 1U}, {4U, 1U, 27U, 0U, 0, 1U}, {2U, 1U, 27U, 0U, 0, 1U}, {3U, 2U, 32U, 1677722U, 1, 1U}, {4U, 1U, 24U, 1258291U, 1, 1U}, {4U, 1U, 32U, 1677722U, 1, 1U}, {3U, 1U, 32U, 1677722U, 1, 1U}}; static bool bxt_ddi_pll_select(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state , struct intel_encoder *intel_encoder , int clock ) { struct intel_shared_dpll *pll ; struct bxt_clk_div clk_div ; int vco ; uint32_t prop_coef ; uint32_t int_coef ; uint32_t gain_ctl ; uint32_t targ_cnt ; uint32_t dcoampovr_en_h ; uint32_t dco_amp ; uint32_t lanestagger ; intel_clock_t best_clock ; long tmp ; bool tmp___0 ; int tmp___1 ; int __ret_warn_on ; long tmp___2 ; struct drm_encoder *encoder ; struct intel_dp *intel_dp ; struct intel_dp *tmp___3 ; long tmp___4 ; { clk_div.p1 = 0U; clk_div.p2 = 0U; clk_div.m2_int = 0U; clk_div.m2_frac = 0U; clk_div.m2_frac_en = (_Bool)0; clk_div.n = 0U; vco = 0; if ((unsigned int )intel_encoder->type == 6U) { tmp___0 = bxt_find_best_dpll(crtc_state, clock, & best_clock); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("bxt_ddi_pll_select", "no PLL dividers found for clock %d pipe %c\n", clock, (int )intel_crtc->pipe + 65); } else { } return (0); } else { } clk_div.p1 = (uint32_t )best_clock.p1; clk_div.p2 = (uint32_t )best_clock.p2; __ret_warn_on = best_clock.m1 != 2; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 1384, "WARN_ON(best_clock.m1 != 2)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); clk_div.n = (uint32_t )best_clock.n; clk_div.m2_int = (uint32_t )(best_clock.m2 >> 22); clk_div.m2_frac = (uint32_t )best_clock.m2 & 4194303U; clk_div.m2_frac_en = clk_div.m2_frac != 0U; vco = best_clock.vco; } else if ((unsigned int )intel_encoder->type == 7U || (unsigned int )intel_encoder->type == 8U) { encoder = & intel_encoder->base; tmp___3 = enc_to_intel_dp(encoder); intel_dp = tmp___3; switch ((int )intel_dp->link_bw) { case 6: clk_div = bxt_dp_clk_val[0]; goto ldv_48551; case 10: clk_div = bxt_dp_clk_val[1]; goto ldv_48551; case 20: clk_div = bxt_dp_clk_val[2]; goto ldv_48551; default: clk_div = bxt_dp_clk_val[0]; drm_err("Unknown link rate\n"); } ldv_48551: vco = (int )(((uint32_t )((clock * 10) / 2) * clk_div.p1) * clk_div.p2); } else { } dco_amp = 15U; dcoampovr_en_h = 0U; if (vco > 6199999 && vco <= 6480000) { prop_coef = 4U; int_coef = 9U; gain_ctl = 3U; targ_cnt = 8U; } else if ((vco > 5400000 && vco <= 6199999) || (vco > 4799999 && vco <= 5399999)) { prop_coef = 5U; int_coef = 11U; gain_ctl = 3U; targ_cnt = 9U; if (vco > 4799999 && vco <= 5399999) { dcoampovr_en_h = 1U; } else { } } else if (vco == 5400000) { prop_coef = 3U; int_coef = 8U; gain_ctl = 1U; targ_cnt = 9U; } else { drm_err("Invalid VCO\n"); return (0); } memset((void *)(& crtc_state->dpll_hw_state), 0, 68UL); if (clock > 270000) { lanestagger = 24U; } else if (clock > 135000) { lanestagger = 13U; } else if (clock > 67000) { lanestagger = 7U; } else if (clock > 33000) { lanestagger = 4U; } else { lanestagger = 2U; } crtc_state->dpll_hw_state.ebb0 = (clk_div.p1 << 13) | (clk_div.p2 << 8); crtc_state->dpll_hw_state.pll0 = clk_div.m2_int; crtc_state->dpll_hw_state.pll1 = clk_div.n << 8; crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac; if ((int )clk_div.m2_frac_en) { crtc_state->dpll_hw_state.pll3 = 65536U; } else { } crtc_state->dpll_hw_state.pll6 = (int_coef << 8) | prop_coef; crtc_state->dpll_hw_state.pll6 = crtc_state->dpll_hw_state.pll6 | (gain_ctl << 16); crtc_state->dpll_hw_state.pll8 = targ_cnt; if (dcoampovr_en_h != 0U) { crtc_state->dpll_hw_state.pll10 = 134217728U; } else { } crtc_state->dpll_hw_state.pll10 = crtc_state->dpll_hw_state.pll10 | (dco_amp << 10); crtc_state->dpll_hw_state.pcsdw12 = lanestagger | 64U; pll = intel_get_shared_dpll(intel_crtc, crtc_state); if ((unsigned long )pll == (unsigned long )((struct intel_shared_dpll *)0)) { tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("bxt_ddi_pll_select", "failed to find PLL for pipe %c\n", (int )intel_crtc->pipe + 65); } else { } return (0); } else { } crtc_state->ddi_pll_sel = (uint32_t )pll->id; return (1); } } bool intel_ddi_pll_select(struct intel_crtc *intel_crtc , struct intel_crtc_state *crtc_state ) { struct drm_device *dev ; struct intel_encoder *intel_encoder ; struct intel_encoder *tmp ; int clock ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = intel_crtc->base.dev; tmp = intel_ddi_get_crtc_new_encoder(crtc_state); intel_encoder = tmp; clock = crtc_state->port_clock; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { tmp___0 = skl_ddi_pll_select(intel_crtc, crtc_state, intel_encoder, clock); return (tmp___0); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { tmp___1 = bxt_ddi_pll_select(intel_crtc, crtc_state, intel_encoder, clock); return (tmp___1); } else { tmp___2 = hsw_ddi_pll_select(intel_crtc, crtc_state, intel_encoder, clock); return (tmp___2); } } else { tmp___2 = hsw_ddi_pll_select(intel_crtc, crtc_state, intel_encoder, clock); return (tmp___2); } } } } void intel_ddi_set_pipe_settings(struct drm_crtc *crtc ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *intel_encoder ; struct intel_encoder *tmp ; enum transcoder cpu_transcoder ; int type ; uint32_t temp ; { dev_priv = (struct drm_i915_private *)(crtc->dev)->dev_private; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = intel_ddi_get_crtc_encoder(crtc); intel_encoder = tmp; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; type = (int )intel_encoder->type; if ((type == 7 || type == 8) || type == 11) { temp = 1U; switch ((intel_crtc->config)->pipe_bpp) { case 18: temp = temp; goto ldv_48592; case 24: temp = temp | 32U; goto ldv_48592; case 30: temp = temp | 64U; goto ldv_48592; case 36: temp = temp | 96U; goto ldv_48592; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c"), "i" (1541), "i" (12UL)); ldv_48597: ; goto ldv_48597; } ldv_48592: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394256U), temp, 1); } else { } return; } } void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc , bool state ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum transcoder cpu_transcoder ; uint32_t temp ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), 1); if ((int )state) { temp = temp | 256U; } else { temp = temp & 4294967039U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), temp, 1); return; } } void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc ) { struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_encoder *intel_encoder ; struct intel_encoder *tmp ; struct drm_encoder *encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum pipe pipe ; enum transcoder cpu_transcoder ; enum port port ; enum port tmp___0 ; int type ; uint32_t temp ; struct drm_i915_private *__p ; struct intel_dp *intel_dp ; struct intel_dp *tmp___1 ; struct intel_dp *intel_dp___0 ; struct intel_dp_mst_encoder *tmp___2 ; int __ret_warn_on ; long tmp___3 ; { __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = intel_ddi_get_crtc_encoder(crtc); intel_encoder = tmp; encoder = & intel_encoder->base; dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = intel_crtc->pipe; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; tmp___0 = intel_ddi_get_encoder_port(intel_encoder); port = tmp___0; type = (int )intel_encoder->type; temp = 2147483648U; temp = ((unsigned int )port << 28) | temp; switch ((intel_crtc->config)->pipe_bpp) { case 18: temp = temp | 2097152U; goto ldv_48625; case 24: temp = temp; goto ldv_48625; case 30: temp = temp | 1048576U; goto ldv_48625; case 36: temp = temp | 3145728U; goto ldv_48625; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c"), "i" (1593), "i" (12UL)); ldv_48630: ; goto ldv_48630; } ldv_48625: ; if (((intel_crtc->config)->base.adjusted_mode.flags & 4U) != 0U) { temp = temp | 131072U; } else { } if ((int )(intel_crtc->config)->base.adjusted_mode.flags & 1) { temp = temp | 65536U; } else { } if ((unsigned int )cpu_transcoder == 3U) { switch ((int )pipe) { case 0: __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && ((int )(intel_crtc->config)->pch_pfit.enabled || (int )(intel_crtc->config)->pch_pfit.force_thru)) { temp = temp | 16384U; } else { temp = temp; } goto ldv_48638; case 1: temp = temp | 20480U; goto ldv_48638; case 2: temp = temp | 24576U; goto ldv_48638; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c"), "i" (1622), "i" (12UL)); ldv_48642: ; goto ldv_48642; } ldv_48638: ; } else { } if (type == 6) { if ((int )(intel_crtc->config)->has_hdmi_sink) { temp = temp; } else { temp = temp | 16777216U; } } else if (type == 1) { temp = temp | 67108864U; temp = (uint32_t )(((intel_crtc->config)->fdi_lanes + -1) << 1) | temp; } else if (type == 7 || type == 8) { tmp___1 = enc_to_intel_dp(encoder); intel_dp = tmp___1; if ((int )intel_dp->is_mst) { temp = temp | 50331648U; } else { temp = temp | 33554432U; } temp = (uint32_t )(((int )intel_dp->lane_count + -1) << 1) | temp; } else if (type == 11) { tmp___2 = enc_to_mst(encoder); intel_dp___0 = & (tmp___2->primary)->dp; if ((int )intel_dp___0->is_mst) { temp = temp | 50331648U; } else { temp = temp | 33554432U; } temp = (uint32_t )(((int )intel_dp___0->lane_count + -1) << 1) | temp; } else { __ret_warn_on = 1; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 1658, "Invalid encoder type %d for pipe %c\n", (unsigned int )intel_encoder->type, (int )pipe + 65); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), temp, 1); return; } } void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv , enum transcoder cpu_transcoder ) { uint32_t reg ; uint32_t val ; uint32_t tmp ; { reg = ((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp; val = val & 268435199U; val = val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); return; } } bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_encoder *intel_encoder ; int type ; enum port port ; enum port tmp ; enum pipe pipe ; enum transcoder cpu_transcoder ; enum intel_display_power_domain power_domain ; uint32_t tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; { dev = intel_connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_encoder = intel_connector->encoder; type = intel_connector->base.connector_type; tmp = intel_ddi_get_encoder_port(intel_encoder); port = tmp; pipe = 0; power_domain = intel_display_port_power_domain(intel_encoder); tmp___1 = intel_display_power_is_enabled(dev_priv, power_domain); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } tmp___3 = (*(intel_encoder->get_hw_state))(intel_encoder, & pipe); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return (0); } else { } if ((unsigned int )port == 0U) { cpu_transcoder = 3; } else { cpu_transcoder = (enum transcoder )pipe; } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), 1); switch (tmp___0 & 117440512U) { case 0U: ; case 16777216U: ; return (type == 11); case 33554432U: ; if (type == 14) { return (1); } else { } return (type == 10); case 50331648U: ; return (0); case 67108864U: ; return (type == 1); default: ; return (0); } } } bool intel_ddi_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; enum port tmp ; enum intel_display_power_domain power_domain ; u32 tmp___0 ; int i ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_ddi_get_encoder_port(encoder); port = tmp; power_domain = intel_display_port_power_domain(encoder); tmp___1 = intel_display_power_is_enabled(dev_priv, power_domain); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 1600U) * 256U), 1); if ((int )tmp___0 >= 0) { return (0); } else { } if ((unsigned int )port == 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[3] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), 1); switch (tmp___0 & 28672U) { case 0U: ; case 16384U: *pipe = 0; goto ldv_48683; case 20480U: *pipe = 1; goto ldv_48683; case 24576U: *pipe = 2; goto ldv_48683; } ldv_48683: ; return (1); } else { i = 0; goto ldv_48687; ldv_48686: tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[i] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), 1); if ((tmp___0 & 1879048192U) == (unsigned int )port << 28) { if ((tmp___0 & 117440512U) == 50331648U) { return (0); } else { } *pipe = (enum pipe )i; return (1); } else { } i = i + 1; ldv_48687: ; if (i <= 2) { goto ldv_48686; } else { } } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_ddi_get_hw_state", "No pipe for ddi port %c found\n", (unsigned int )port + 65U); } else { } return (0); } } void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc ) { struct drm_crtc *crtc ; struct drm_i915_private *dev_priv ; struct intel_encoder *intel_encoder ; struct intel_encoder *tmp ; enum port port ; enum port tmp___0 ; enum transcoder cpu_transcoder ; { crtc = & intel_crtc->base; dev_priv = (struct drm_i915_private *)(crtc->dev)->dev_private; tmp = intel_ddi_get_crtc_encoder(crtc); intel_encoder = tmp; tmp___0 = intel_ddi_get_encoder_port(intel_encoder); port = tmp___0; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; if ((unsigned int )cpu_transcoder != 3U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )cpu_transcoder + 71760U) * 4U), ((unsigned int )port + 1U) << 29, 1); } else { } return; } } void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc ) { struct drm_i915_private *dev_priv ; enum transcoder cpu_transcoder ; { dev_priv = (struct drm_i915_private *)(intel_crtc->base.dev)->dev_private; cpu_transcoder = (intel_crtc->config)->cpu_transcoder; if ((unsigned int )cpu_transcoder != 3U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )cpu_transcoder + 71760U) * 4U), 0U, 1); } else { } return; } } void bxt_ddi_vswing_sequence(struct drm_device *dev , u32 level , enum port port , int type ) { struct drm_i915_private *dev_priv ; struct bxt_ddi_buf_trans const *ddi_translations ; u32 n_entries ; u32 i ; uint32_t val ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (type == 7 || type == 8) { n_entries = 10U; ddi_translations = (struct bxt_ddi_buf_trans const *)(& bxt_ddi_translations_dp); } else if (type == 6) { n_entries = 10U; ddi_translations = (struct bxt_ddi_buf_trans const *)(& bxt_ddi_translations_hdmi); } else { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("bxt_ddi_vswing_sequence", "Vswing programming not done for encoder %d\n", type); } else { } return; } if (level >= n_entries || (type == 6 && level == 255U)) { i = 0U; goto ldv_48721; ldv_48720: ; if ((int )(ddi_translations + (unsigned long )i)->default_index) { level = i; goto ldv_48719; } else { } i = i + 1U; ldv_48721: ; if (i < n_entries) { goto ldv_48720; } else { } ldv_48719: ; } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443432L : 444456L) : 1451048L, 1); val = val & 1073741823U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 445480L : 445992L) : 1453096L, val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443656L : 444680L) : 1451272L, 1); val = val & 4278190335U; val = ((unsigned int )((ddi_translations + (unsigned long )level)->margin << 16) | (unsigned int )((ddi_translations + (unsigned long )level)->scale << 8)) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 445704L : 446216L) : 1453320L, val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443660L : 444684L) : 1451276L, 1); val = val & 4160749567U; if ((unsigned int )(ddi_translations + (unsigned long )level)->enable != 0U) { val = val | 134217728U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 445708L : 446220L) : 1453324L, val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443664L : 444688L) : 1451280L, 1); val = val & 16777215U; val = (uint32_t )((ddi_translations + (unsigned long )level)->deemphasis << 24) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 445712L : 446224L) : 1453328L, val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443432L : 444456L) : 1451048L, 1); val = val | 3221225472U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 445480L : 445992L) : 1453096L, val, 1); return; } } static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder ) { struct drm_encoder *encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; enum port port ; enum port tmp ; int type ; int hdmi_level ; struct intel_dp *intel_dp ; struct intel_dp *tmp___0 ; uint32_t dpll ; uint32_t val ; int __ret_warn_on ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct intel_dp *intel_dp___0 ; struct intel_dp *tmp___3 ; struct drm_i915_private *__p___1 ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp___4 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { encoder = & intel_encoder->base; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; crtc = (struct intel_crtc *)__mptr; tmp = intel_ddi_get_encoder_port(intel_encoder); port = tmp; type = (int )intel_encoder->type; if (type == 8) { tmp___0 = enc_to_intel_dp(encoder); intel_dp = tmp___0; intel_edp_panel_on(intel_dp); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { dpll = (crtc->config)->ddi_pll_sel; if (type == 8) { __ret_warn_on = dpll != 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 1887, "WARN_ON(dpll != SKL_DPLL0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 1); val = (uint32_t )(~ (((1 << (int )(dpll * 6U + 5U)) | (1 << (int )(dpll * 6U + 4U))) | (7 << (int )(dpll * 6U + 1U)))) & val; val = ((crtc->config)->dpll_hw_state.ctrl1 << (int )(dpll * 6U)) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 442456L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 0); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442460L, 1); val = (uint32_t )(~ ((1 << (int )((unsigned int )port + 15U)) | (3 << (int )((unsigned int )port * 3U + 1U)))) & val; val = ((dpll << (int )((unsigned int )port * 3U + 1U)) | (uint32_t )(1 << (int )((unsigned int )port * 3U))) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 442460L, val, 1); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 8U) { __ret_warn_on___0 = (crtc->config)->ddi_pll_sel == 3758096384U; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 1911, "WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port + 71744U) * 4U), (crtc->config)->ddi_pll_sel, 1); } else { } } if (type == 7 || type == 8) { tmp___3 = enc_to_intel_dp(encoder); intel_dp___0 = tmp___3; intel_ddi_init_dp_buf_reg(intel_encoder); intel_dp_sink_dpms(intel_dp___0, 0); intel_dp_start_link_train(intel_dp___0); intel_dp_complete_link_train(intel_dp___0); if ((unsigned int )port != 0U) { intel_dp_stop_link_train(intel_dp___0); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 8U) { intel_dp_stop_link_train(intel_dp___0); } else { } } } else if (type == 6) { tmp___4 = enc_to_intel_hdmi(encoder); intel_hdmi = tmp___4; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { hdmi_level = (int )dev_priv->vbt.ddi_port_info[(unsigned int )port].hdmi_level_shift; bxt_ddi_vswing_sequence(dev, (u32 )hdmi_level, port, 6); } else { } } else { } (*(intel_hdmi->set_infoframes))(encoder, (int )(crtc->config)->has_hdmi_sink, & (crtc->config)->base.adjusted_mode); } else { } return; } } static void intel_ddi_post_disable(struct intel_encoder *intel_encoder ) { struct drm_encoder *encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; enum port tmp ; int type ; uint32_t val ; bool wait ; struct intel_dp *intel_dp ; struct intel_dp *tmp___0 ; uint32_t tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { encoder = & intel_encoder->base; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_ddi_get_encoder_port(intel_encoder); port = tmp; type = (int )intel_encoder->type; wait = 0; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 1600U) * 256U), 1); if ((int )val < 0) { val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port + 1600U) * 256U), val, 1); wait = 1; } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), 1); val = val & 2147481855U; val = val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), val, 1); if ((int )wait) { intel_wait_ddi_buf_idle(dev_priv, port); } else { } if (type == 7 || type == 8) { tmp___0 = enc_to_intel_dp(encoder); intel_dp = tmp___0; intel_dp_sink_dpms(intel_dp, 3); intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_off(intel_dp); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442460L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 442460L, tmp___1 | (uint32_t )(1 << (int )((unsigned int )port + 15U)), 1); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 8U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port + 71744U) * 4U), 3758096384U, 1); } else { } } return; } } static void intel_enable_ddi(struct intel_encoder *intel_encoder ) { struct drm_encoder *encoder ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; enum port tmp ; int type ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___0 ; struct intel_dp *intel_dp ; struct intel_dp *tmp___1 ; struct drm_i915_private *__p ; { encoder = & intel_encoder->base; crtc = encoder->crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_ddi_get_encoder_port(intel_encoder); port = tmp; type = (int )intel_encoder->type; if (type == 6) { tmp___0 = enc_to_dig_port(encoder); intel_dig_port = tmp___0; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port + 1600U) * 256U), intel_dig_port->saved_port_bits | 2147483648U, 1); } else if (type == 8) { tmp___1 = enc_to_intel_dp(encoder); intel_dp = tmp___1; if ((unsigned int )port == 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 8U) { intel_dp_stop_link_train(intel_dp); } else { } } else { } intel_edp_backlight_on(intel_dp); intel_psr_enable(intel_dp); intel_edp_drrs_enable(intel_dp); } else { } if ((int )(intel_crtc->config)->has_audio) { intel_display_power_get(dev_priv, 22); intel_audio_codec_enable(intel_encoder); } else { } return; } } static void intel_disable_ddi(struct intel_encoder *intel_encoder ) { struct drm_encoder *encoder ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int type ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_dp *intel_dp ; struct intel_dp *tmp ; { encoder = & intel_encoder->base; crtc = encoder->crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; type = (int )intel_encoder->type; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((int )(intel_crtc->config)->has_audio) { intel_audio_codec_disable(intel_encoder); intel_display_power_put(dev_priv, 22); } else { } if (type == 8) { tmp = enc_to_intel_dp(encoder); intel_dp = tmp; intel_edp_drrs_disable(intel_dp); intel_psr_disable(intel_dp); intel_edp_backlight_off(intel_dp); } else { } return; } } static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll ) { { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (int )pll->id == 0 ? 286784L : 286816L, pll->config.hw_state.wrpll, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 286784L : 286816L, 0); __const_udelay(85900UL); return; } } static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll ) { uint32_t val ; { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 286784L : 286816L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (int )pll->id == 0 ? 286784L : 286816L, val & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 286784L : 286816L, 0); return; } } static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll , struct intel_dpll_hw_state *hw_state ) { uint32_t val ; bool tmp ; int tmp___0 ; { tmp = intel_display_power_is_enabled(dev_priv, 23); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (int )pll->id == 0 ? 286784L : 286816L, 1); hw_state->wrpll = val; return ((val & 2147483648U) != 0U); } } static char const * const hsw_ddi_pll_names[2U] = { "WRPLL 1", "WRPLL 2"}; static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv ) { int i ; { dev_priv->num_shared_dpll = 2; i = 0; goto ldv_48849; ldv_48848: dev_priv->shared_dplls[i].id = (enum intel_dpll_id )i; dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; dev_priv->shared_dplls[i].disable = & hsw_ddi_pll_disable; dev_priv->shared_dplls[i].enable = & hsw_ddi_pll_enable; dev_priv->shared_dplls[i].get_hw_state = & hsw_ddi_pll_get_hw_state; i = i + 1; ldv_48849: ; if (dev_priv->num_shared_dpll > i) { goto ldv_48848; } else { } return; } } static char const * const skl_ddi_pll_names[3U] = { "DPLL 1", "DPLL 2", "DPLL 3"}; static struct skl_dpll_regs const skl_dpll_regs[3U] = { {286740U, 442432U, 442436U}, {286784U, 442440U, 442444U}, {286816U, 442448U, 442452U}}; static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll ) { uint32_t val ; unsigned int dpll ; struct skl_dpll_regs const *regs ; uint32_t tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; { regs = (struct skl_dpll_regs const *)(& skl_dpll_regs); dpll = (unsigned int )((int )pll->id + 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 1); val = (uint32_t )(~ (((1 << (int )(dpll * 6U + 5U)) | (1 << (int )(dpll * 6U + 4U))) | (7 << (int )(dpll * 6U + 1U)))) & val; val = (pll->config.hw_state.ctrl1 << (int )(dpll * 6U)) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 442456L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->cfgcr1, pll->config.hw_state.cfgcr1, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->cfgcr2, pll->config.hw_state.cfgcr2, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->cfgcr1, 0); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->cfgcr2, 0); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->ctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->ctl, tmp | 2147483648U, 1); tmp___0 = msecs_to_jiffies(5U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48874; ldv_48873: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442464L, 1); if ((tmp___1 & (uint32_t )(1 << (int )(dpll * 8U))) == 0U) { ret__ = -110; } else { } goto ldv_48872; } else { } tmp___2 = drm_can_sleep___11(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48874: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442464L, 1); if ((tmp___3 & (uint32_t )(1 << (int )(dpll * 8U))) == 0U) { goto ldv_48873; } else { } ldv_48872: ; if (ret__ != 0) { drm_err("DPLL %d not locked\n", dpll); } else { } return; } } static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll ) { struct skl_dpll_regs const *regs ; uint32_t tmp ; { regs = (struct skl_dpll_regs const *)(& skl_dpll_regs); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->ctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->ctl, tmp & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->ctl, 0); return; } } static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll , struct intel_dpll_hw_state *hw_state ) { uint32_t val ; unsigned int dpll ; struct skl_dpll_regs const *regs ; bool tmp ; int tmp___0 ; { regs = (struct skl_dpll_regs const *)(& skl_dpll_regs); tmp = intel_display_power_is_enabled(dev_priv, 23); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } dpll = (unsigned int )((int )pll->id + 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->ctl, 1); if ((int )val >= 0) { return (0); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442456L, 1); hw_state->ctrl1 = (val >> (int )(dpll * 6U)) & 63U; if (((uint32_t )(1 << (int )(dpll * 6U + 5U)) & val) != 0U) { hw_state->cfgcr1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->cfgcr1, 1); hw_state->cfgcr2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(regs + (unsigned long )pll->id)->cfgcr2, 1); } else { } return (1); } } static void skl_shared_dplls_init(struct drm_i915_private *dev_priv ) { int i ; { dev_priv->num_shared_dpll = 3; i = 0; goto ldv_48894; ldv_48893: dev_priv->shared_dplls[i].id = (enum intel_dpll_id )i; dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i]; dev_priv->shared_dplls[i].disable = & skl_ddi_pll_disable; dev_priv->shared_dplls[i].enable = & skl_ddi_pll_enable; dev_priv->shared_dplls[i].get_hw_state = & skl_ddi_pll_get_hw_state; i = i + 1; ldv_48894: ; if (dev_priv->num_shared_dpll > i) { goto ldv_48893; } else { } return; } } static void broxton_phy_init(struct drm_i915_private *dev_priv , enum dpio_phy phy ) { enum port port ; uint32_t val ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; int lane ; uint32_t grc_code ; unsigned long timeout_____0 ; unsigned long tmp___3 ; int ret_____0 ; uint32_t tmp___4 ; bool tmp___5 ; uint32_t tmp___6 ; { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1278096L, 1); val = (uint32_t )(1 << (int )phy) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278096L, val, 1); tmp = msecs_to_jiffies(10U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48912; ldv_48911: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442368U), 1); if ((tmp___0 & 65536U) == 0U) { ret__ = -110; } else { } goto ldv_48910; } else { } tmp___1 = drm_can_sleep___11(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48912: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442368U), 1); if ((tmp___2 & 65536U) == 0U) { goto ldv_48911; } else { } ldv_48910: ; if (ret__ != 0) { drm_err("timeout during PHY%d power on\n", (unsigned int )phy); } else { } port = (unsigned int )phy == 0U; goto ldv_48919; ldv_48918: lane = 0; goto ldv_48916; ldv_48915: val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443704 : 444728) : 1451320) + ((lane >> 1) * 4 + (lane & 1)) * 128), 1); val = val & 3221225471U; if (lane != 1) { val = val | 1073741824U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443704 : 444728) : 1451320) + ((lane >> 1) * 4 + (lane & 1)) * 128), val, 1); lane = lane + 1; ldv_48916: ; if (lane <= 3) { goto ldv_48915; } else { } port = (enum port )((unsigned int )port + 1U); ldv_48919: ; if (((unsigned int )phy == 0U ? 2U : 0U) >= (unsigned int )port) { goto ldv_48918; } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442404U), 1); val = val & 4294902015U; val = val | 58368U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442404U), val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442408U), 1); val = val & 4294902015U; val = val | 58368U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442408U), val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442480U), 1); val = val | 12582915U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442480U), val, 1); if ((unsigned int )phy == 0U) { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 443224L, 1); val = val | 268435456U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 443224L, val, 1); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442488U), 1); val = val & 4294967231U; if ((unsigned int )phy == 1U) { val = val | 64U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )phy * 1007616U + 442488U), val, 1); if ((unsigned int )phy == 0U) { tmp___3 = msecs_to_jiffies(10U); timeout_____0 = (tmp___3 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_48932; ldv_48931: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1450380L, 1); if ((tmp___4 & 4194304U) == 0U) { ret_____0 = -110; } else { } goto ldv_48930; } else { } tmp___5 = drm_can_sleep___11(); if ((int )tmp___5) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48932: tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1450380L, 1); if ((tmp___6 & 4194304U) == 0U) { goto ldv_48931; } else { } ldv_48930: ; if (ret_____0 != 0) { drm_err("timeout waiting for PHY1 GRC\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1450392L, 1); val = val >> 23; grc_code = ((val << 16) | (val << 8)) | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 442776L, grc_code, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 442784L, 1); val = val | 32770U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 442784L, val, 1); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )phy * 4294967280U + 412816U), 1); val = val | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )phy * 4294967280U + 412816U), val, 1); return; } } void broxton_ddi_phy_init(struct drm_device *dev ) { { broxton_phy_init((struct drm_i915_private *)dev->dev_private, 1); broxton_phy_init((struct drm_i915_private *)dev->dev_private, 0); return; } } static void broxton_phy_uninit(struct drm_i915_private *dev_priv , enum dpio_phy phy ) { uint32_t val ; { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )phy * 4294967280U + 412816U), 1); val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )phy * 4294967280U + 412816U), val, 1); return; } } void broxton_ddi_phy_uninit(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; broxton_phy_uninit(dev_priv, 1); broxton_phy_uninit(dev_priv, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1278096L, 0U, 1); return; } } static char const * const bxt_ddi_pll_names[3U] = { "PORT PLL A", "PORT PLL B", "PORT PLL C"}; static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll ) { uint32_t temp ; enum port port ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; uint32_t tmp___1 ; { port = (enum port )pll->id; temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), 1); temp = temp & 4160749567U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442424L : 443204L) : 1450040L, 1); temp = temp & 4294959103U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442424L : 443204L) : 1450040L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442420L : 443200L) : 1450036L, 1); temp = temp & 4294902015U; temp = pll->config.hw_state.ebb0 | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442420L : 443200L) : 1450036L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442624L : 443264L) : 1450240L, 1); temp = temp & 4294967040U; temp = pll->config.hw_state.pll0 | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442624L : 443264L) : 1450240L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442628L : 443268L) : 1450244L, 1); temp = temp & 4294963455U; temp = pll->config.hw_state.pll1 | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442628L : 443268L) : 1450244L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442632L : 443272L) : 1450248L, 1); temp = temp & 4290772992U; temp = pll->config.hw_state.pll2 | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442632L : 443272L) : 1450248L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442636L : 443276L) : 1450252L, 1); temp = temp & 4294901759U; temp = pll->config.hw_state.pll3 | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442636L : 443276L) : 1450252L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442648L : 443288L) : 1450264L, 1); temp = temp & 4294967280U; temp = temp & 4294959359U; temp = temp & 4294508543U; temp = pll->config.hw_state.pll6 | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442648L : 443288L) : 1450264L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442656L : 443296L) : 1450272L, 1); temp = temp & 4294966272U; temp = pll->config.hw_state.pll8 | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442656L : 443296L) : 1450272L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442660L : 443300L) : 1450276L, 1); temp = temp & 4294967281U; temp = temp | 10U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442660L : 443300L) : 1450276L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442664L : 443304L) : 1450280L, 1); temp = temp & 4160749567U; temp = temp & 4294951935U; temp = pll->config.hw_state.pll10 | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442664L : 443304L) : 1450280L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442424L : 443204L) : 1450040L, 1); temp = temp | 16384U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442424L : 443204L) : 1450040L, temp, 1); temp = temp | 8192U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442424L : 443204L) : 1450040L, temp, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), 1); temp = temp | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), 0); tmp = msecs_to_jiffies(1U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48963; ldv_48962: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), 1); if ((tmp___0 & 1073741824U) == 0U) { ret__ = -110; } else { } goto ldv_48961; } else { } cpu_relax(); ldv_48963: tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), 1); if ((tmp___1 & 1073741824U) == 0U) { goto ldv_48962; } else { } ldv_48961: ; if (ret__ != 0) { drm_err("PLL %d not locked\n", (unsigned int )port); } else { } temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443440L : 444464L) : 1451056L, 1); temp = temp & 4294967264U; temp = temp & 4294967231U; temp = pll->config.hw_state.pcsdw12 | temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 445488L : 446000L) : 1453104L, temp, 1); return; } } static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll ) { enum port port ; uint32_t temp ; { port = (enum port )pll->id; temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), 1); temp = temp & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), 0); return; } } static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv , struct intel_shared_dpll *pll , struct intel_dpll_hw_state *hw_state ) { enum port port ; uint32_t val ; bool tmp ; int tmp___0 ; uint32_t tmp___1 ; long tmp___2 ; uint32_t tmp___3 ; { port = (enum port )pll->id; tmp = intel_display_power_is_enabled(dev_priv, 23); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 71709U) * 4U), 1); if ((int )val >= 0) { return (0); } else { } hw_state->ebb0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442420L : 443200L) : 1450036L, 1); hw_state->pll0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442624L : 443264L) : 1450240L, 1); hw_state->pll1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442628L : 443268L) : 1450244L, 1); hw_state->pll2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442632L : 443272L) : 1450248L, 1); hw_state->pll3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442636L : 443276L) : 1450252L, 1); hw_state->pll6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442648L : 443288L) : 1450264L, 1); hw_state->pll8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442656L : 443296L) : 1450272L, 1); hw_state->pll10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 442664L : 443304L) : 1450280L, 1); hw_state->pcsdw12 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443440L : 444464L) : 1451056L, 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, ((unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443952U : 444976U) : 1451568U) != hw_state->pcsdw12, 1); if (tmp___3 != 0U) { tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 443952L : 444976L) : 1451568L, 1); drm_ut_debug_printk("bxt_ddi_pll_get_hw_state", "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", hw_state->pcsdw12, tmp___1); } else { } } else { } return (1); } } static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv ) { int i ; { dev_priv->num_shared_dpll = 3; i = 0; goto ldv_48984; ldv_48983: dev_priv->shared_dplls[i].id = (enum intel_dpll_id )i; dev_priv->shared_dplls[i].name = bxt_ddi_pll_names[i]; dev_priv->shared_dplls[i].disable = & bxt_ddi_pll_disable; dev_priv->shared_dplls[i].enable = & bxt_ddi_pll_enable; dev_priv->shared_dplls[i].get_hw_state = & bxt_ddi_pll_get_hw_state; i = i + 1; ldv_48984: ; if (dev_priv->num_shared_dpll > i) { goto ldv_48983; } else { } return; } } void intel_ddi_pll_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t val ; uint32_t tmp ; int cdclk_freq ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; long tmp___0 ; uint32_t tmp___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1245248L, 1); val = tmp; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { skl_shared_dplls_init(dev_priv); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { bxt_shared_dplls_init(dev_priv); } else { hsw_shared_dplls_init(dev_priv); } } else { hsw_shared_dplls_init(dev_priv); } } cdclk_freq = (*(dev_priv->display.get_display_clock_speed))(dev); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_ddi_pll_init", "CDCLK running at %dKHz\n", cdclk_freq); } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { dev_priv->skl_boot_cdclk = (unsigned int )cdclk_freq; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 286736L, 1); if ((int )tmp___1 >= 0) { drm_err("LCPLL1 is disabled\n"); } else { intel_display_power_get(dev_priv, 23); } } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { broxton_init_cdclk(dev); broxton_ddi_phy_init(dev); } else { goto _L; } } else { _L: /* CIL Label */ if ((val & 2097152U) != 0U) { drm_err("CDCLK source is not LCPLL\n"); } else { } if ((int )val < 0) { drm_err("LCPLL is disabled\n"); } else { } } } return; } } void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_dp *intel_dp ; struct drm_i915_private *dev_priv ; enum port port ; uint32_t val ; bool wait ; uint32_t tmp___0 ; bool tmp___1 ; { tmp = enc_to_dig_port(encoder); intel_dig_port = tmp; intel_dp = & intel_dig_port->dp; dev_priv = (struct drm_i915_private *)(encoder->dev)->dev_private; port = intel_dig_port->port; wait = 0; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), 1); if ((int )tmp___0 < 0) { val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 1600U) * 256U), 1); if ((int )val < 0) { val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port + 1600U) * 256U), val, 1); wait = 1; } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), 1); val = val & 2147481855U; val = val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), 0); if ((int )wait) { intel_wait_ddi_buf_idle(dev_priv, port); } else { } } else { } val = 2147483776U; if ((int )intel_dp->is_mst) { val = val | 134217728U; } else { val = val; tmp___1 = drm_dp_enhanced_frame_cap((u8 const *)(& intel_dp->dpcd)); if ((int )tmp___1) { val = val | 262144U; } else { } } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), 0); intel_dp->DP = intel_dp->DP | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port + 1600U) * 256U), intel_dp->DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 1600U) * 256U), 0); __const_udelay(2577000UL); return; } } void intel_ddi_fdi_disable(struct drm_crtc *crtc ) { struct drm_i915_private *dev_priv ; struct intel_encoder *intel_encoder ; struct intel_encoder *tmp ; uint32_t val ; { dev_priv = (struct drm_i915_private *)(crtc->dev)->dev_private; tmp = intel_ddi_get_crtc_encoder(crtc); intel_encoder = tmp; intel_ddi_post_disable(intel_encoder); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983052L, 1); val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983052L, val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983056L, 1); val = val & 4043309055U; val = val | 167772160U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983056L, val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983052L, 1); val = val & 4294967279U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983052L, val, 1); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 983052L, 1); val = val & 4294959103U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 983052L, val, 1); return; } } static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; int type ; { tmp = enc_to_dig_port(& intel_encoder->base); intel_dig_port = tmp; type = (int )intel_dig_port->base.type; if ((type != 7 && type != 8) && type != 10) { return; } else { } intel_dp_hot_plug(intel_encoder); return; } } void intel_ddi_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum transcoder cpu_transcoder ; struct intel_hdmi *intel_hdmi ; u32 temp ; u32 flags ; bool tmp ; bool tmp___0 ; long tmp___1 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; cpu_transcoder = pipe_config->cpu_transcoder; flags = 0U; temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), 1); if ((temp & 65536U) != 0U) { flags = flags | 1U; } else { flags = flags | 2U; } if ((temp & 131072U) != 0U) { flags = flags | 4U; } else { flags = flags | 8U; } pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | flags; switch (temp & 7340032U) { case 2097152U: pipe_config->pipe_bpp = 18; goto ldv_49062; case 0U: pipe_config->pipe_bpp = 24; goto ldv_49062; case 1048576U: pipe_config->pipe_bpp = 30; goto ldv_49062; case 3145728U: pipe_config->pipe_bpp = 36; goto ldv_49062; default: ; goto ldv_49062; } ldv_49062: ; switch (temp & 117440512U) { case 0U: pipe_config->has_hdmi_sink = 1; intel_hdmi = enc_to_intel_hdmi(& encoder->base); tmp = (*(intel_hdmi->infoframe_enabled))(& encoder->base); if ((int )tmp) { pipe_config->has_infoframe = 1; } else { } goto ldv_49068; case 16777216U: ; case 67108864U: ; goto ldv_49068; case 33554432U: ; case 50331648U: pipe_config->has_dp_encoder = 1; intel_dp_get_m_n(intel_crtc, pipe_config); goto ldv_49068; default: ; goto ldv_49068; } ldv_49068: tmp___0 = intel_display_power_is_enabled(dev_priv, 22); if ((int )tmp___0) { temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 413888L, 1); if (((u32 )(4 << (int )intel_crtc->pipe * 4) & temp) != 0U) { pipe_config->has_audio = 1; } else { } } else { } if (((unsigned int )encoder->type == 8U && dev_priv->vbt.edp_bpp != 0) && pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_ddi_get_config", "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); } else { } dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; } else { } intel_ddi_clock_get(encoder, pipe_config); return; } } static void intel_ddi_destroy(struct drm_encoder *encoder ) { { intel_dp_encoder_destroy(encoder); return; } } static bool intel_ddi_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { int type ; int port ; enum port tmp ; int __ret_warn_on ; long tmp___0 ; bool tmp___1 ; bool tmp___2 ; { type = (int )encoder->type; tmp = intel_ddi_get_encoder_port(encoder); port = (int )tmp; __ret_warn_on = type == 10; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_ddi.c", 2734, "compute_config() on unknown output!\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if (port == 0) { pipe_config->cpu_transcoder = 3; } else { } if (type == 6) { tmp___1 = intel_hdmi_compute_config(encoder, pipe_config); return (tmp___1); } else { tmp___2 = intel_dp_compute_config(encoder, pipe_config); return (tmp___2); } } } static struct drm_encoder_funcs const intel_ddi_funcs = {0, & intel_ddi_destroy}; static struct intel_connector *intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port ) { struct intel_connector *connector ; enum port port ; bool tmp ; int tmp___0 ; { port = intel_dig_port->port; connector = intel_connector_alloc(); if ((unsigned long )connector == (unsigned long )((struct intel_connector *)0)) { return ((struct intel_connector *)0); } else { } intel_dig_port->dp.output_reg = ((unsigned int )port + 1600U) * 256U; tmp = intel_dp_init_connector(intel_dig_port, connector); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { kfree((void const *)connector); return ((struct intel_connector *)0); } else { } return (connector); } } static struct intel_connector *intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port ) { struct intel_connector *connector ; enum port port ; { port = intel_dig_port->port; connector = intel_connector_alloc(); if ((unsigned long )connector == (unsigned long )((struct intel_connector *)0)) { return ((struct intel_connector *)0); } else { } intel_dig_port->hdmi.hdmi_reg = ((unsigned int )port + 1600U) * 256U; intel_hdmi_init_connector(intel_dig_port, connector); return (connector); } } void intel_ddi_init(struct drm_device *dev , enum port port ) { struct drm_i915_private *dev_priv ; struct intel_digital_port *intel_dig_port ; struct intel_encoder *intel_encoder ; struct drm_encoder *encoder ; bool init_hdmi ; bool init_dp ; long tmp ; void *tmp___0 ; uint32_t tmp___1 ; struct intel_connector *tmp___2 ; struct intel_connector *tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; init_hdmi = (bool )((unsigned int )dev_priv->vbt.ddi_port_info[(unsigned int )port].supports_dvi != 0U || (unsigned int )dev_priv->vbt.ddi_port_info[(unsigned int )port].supports_hdmi != 0U); init_dp = (int )dev_priv->vbt.ddi_port_info[(unsigned int )port].supports_dp != 0; if (! init_dp && ! init_hdmi) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_ddi_init", "VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n", (unsigned int )port + 65U); } else { } init_hdmi = 1; init_dp = 1; } else { } tmp___0 = kzalloc(4744UL, 208U); intel_dig_port = (struct intel_digital_port *)tmp___0; if ((unsigned long )intel_dig_port == (unsigned long )((struct intel_digital_port *)0)) { return; } else { } intel_encoder = & intel_dig_port->base; encoder = & intel_encoder->base; drm_encoder_init(dev, encoder, & intel_ddi_funcs, 2); intel_encoder->compute_config = & intel_ddi_compute_config; intel_encoder->enable = & intel_enable_ddi; intel_encoder->pre_enable = & intel_ddi_pre_enable; intel_encoder->disable = & intel_disable_ddi; intel_encoder->post_disable = & intel_ddi_post_disable; intel_encoder->get_hw_state = & intel_ddi_get_hw_state; intel_encoder->get_config = & intel_ddi_get_config; intel_dig_port->port = port; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )port + 1600U) * 256U), 1); intel_dig_port->saved_port_bits = tmp___1 & 65552U; intel_encoder->type = 10; intel_encoder->crtc_mask = 7; intel_encoder->cloneable = 0U; intel_encoder->hot_plug = & intel_ddi_hot_plug; if ((int )init_dp) { tmp___2 = intel_ddi_init_dp_connector(intel_dig_port); if ((unsigned long )tmp___2 == (unsigned long )((struct intel_connector *)0)) { goto err; } else { } intel_dig_port->hpd_pulse = & intel_dp_hpd_pulse; dev_priv->hpd_irq_port[(unsigned int )port] = intel_dig_port; } else { } if ((unsigned int )intel_encoder->type != 8U && (int )init_hdmi) { tmp___3 = intel_ddi_init_hdmi_connector(intel_dig_port); if ((unsigned long )tmp___3 == (unsigned long )((struct intel_connector *)0)) { goto err; } else { } } else { } return; err: drm_encoder_cleanup(encoder); kfree((void const *)intel_dig_port); return; } } extern int ldv_probe_54(void) ; void ldv_main_exported_54(void) { struct drm_encoder *ldvarg395 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(96UL); ldvarg395 = (struct drm_encoder *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_54 == 2) { intel_ddi_destroy(ldvarg395); ldv_state_variable_54 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_49116; case 1: ; if (ldv_state_variable_54 == 1) { ldv_probe_54(); ldv_state_variable_54 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_49116; default: ldv_stop(); } ldv_49116: ; return; } } bool ldv_queue_work_on_853(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_854(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_855(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_856(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_857(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___21(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void *ERR_PTR(long error ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static bool IS_ERR_OR_NULL(void const *ptr ) ; bool ldv_queue_work_on_867(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_869(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_868(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_871(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_870(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_872(struct delayed_work *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_876(struct delayed_work *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_877(struct delayed_work *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_sync_873(struct delayed_work *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_sync_874(struct delayed_work *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_sync_875(struct delayed_work *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_sync_878(struct delayed_work *ldv_func_arg1 ) ; __inline static bool queue_delayed_work___4(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = ldv_queue_delayed_work_on_868(8192, wq, dwork, delay); return (tmp); } } __inline static bool schedule_delayed_work___2(struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = queue_delayed_work___4(system_wq, dwork, delay); return (tmp); } } extern int sysfs_create_link(struct kobject * , struct kobject * , char const * ) ; extern void sysfs_remove_link(struct kobject * , char const * ) ; extern long schedule_timeout_uninterruptible(long ) ; void invoke_work_19(void) ; void disable_work_19(struct work_struct *work ) ; void call_and_disable_all_19(int state ) ; void invoke_work_20(void) ; void activate_work_19(struct work_struct *work , int state ) ; void call_and_disable_work_19(struct work_struct *work ) ; void call_and_disable_work_20(struct work_struct *work ) ; void disable_work_20(struct work_struct *work ) ; void activate_work_20(struct work_struct *work , int state ) ; void call_and_disable_all_20(int state ) ; extern int register_reboot_notifier(struct notifier_block * ) ; extern int unregister_reboot_notifier(struct notifier_block * ) ; extern void drm_mode_probed_add(struct drm_connector * , struct drm_display_mode * ) ; extern struct drm_display_mode *drm_mode_duplicate(struct drm_device * , struct drm_display_mode const * ) ; extern bool drm_probe_ddc(struct i2c_adapter * ) ; extern struct edid *drm_edid_duplicate(struct edid const * ) ; extern int drm_mode_create_scaling_mode_property(struct drm_device * ) ; extern u8 drm_match_cea_mode(struct drm_display_mode const * ) ; extern bool drm_detect_monitor_audio(struct edid * ) ; __inline static bool drm_can_sleep___12(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39718; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39718; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39718; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39718; default: __bad_percpu_size(); } ldv_39718: pscr_ret__ = pfo_ret__; goto ldv_39724; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39728; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39728; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39728; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39728; default: __bad_percpu_size(); } ldv_39728: pscr_ret__ = pfo_ret_____0; goto ldv_39724; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39737; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39737; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39737; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39737; default: __bad_percpu_size(); } ldv_39737: pscr_ret__ = pfo_ret_____1; goto ldv_39724; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39746; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39746; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39746; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39746; default: __bad_percpu_size(); } ldv_39746: pscr_ret__ = pfo_ret_____2; goto ldv_39724; default: __bad_size_call_parameter(); goto ldv_39724; } ldv_39724: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___21(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } extern void pm_qos_update_request(struct pm_qos_request * , s32 ) ; __inline static void wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies , int to_wait_ms ) { unsigned long target_jiffies ; unsigned long tmp_jiffies ; unsigned long remaining_jiffies ; unsigned long tmp ; long tmp___0 ; { tmp_jiffies = jiffies; tmp = msecs_to_jiffies_timeout((unsigned int const )to_wait_ms); target_jiffies = tmp + timestamp_jiffies; if ((long )(tmp_jiffies - target_jiffies) < 0L) { remaining_jiffies = target_jiffies - tmp_jiffies; goto ldv_46456; ldv_46455: tmp___0 = schedule_timeout_uninterruptible((long )remaining_jiffies); remaining_jiffies = (unsigned long )tmp___0; ldv_46456: ; if (remaining_jiffies != 0UL) { goto ldv_46455; } else { } } else { } return; } } extern bool drm_dp_channel_eq_ok(u8 const * , int ) ; extern bool drm_dp_clock_recovery_ok(u8 const * , int ) ; extern u8 drm_dp_get_adjust_request_voltage(u8 const * , int ) ; extern u8 drm_dp_get_adjust_request_pre_emphasis(u8 const * , int ) ; extern void drm_dp_link_train_clock_recovery_delay(u8 const * ) ; extern void drm_dp_link_train_channel_eq_delay(u8 const * ) ; extern u8 drm_dp_link_rate_to_bw_code(int ) ; __inline static u8 drm_dp_max_lane_count(u8 const *dpcd ) { { return ((unsigned int )((u8 )*(dpcd + 2UL)) & 31U); } } __inline static ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux , unsigned int offset , u8 *valuep ) { ssize_t tmp ; { tmp = drm_dp_dpcd_read(aux, offset, (void *)valuep, 1UL); return (tmp); } } extern int drm_dp_aux_register(struct drm_dp_aux * ) ; extern void drm_dp_aux_unregister(struct drm_dp_aux * ) ; extern int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr * , bool ) ; extern int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr * , u8 * , bool * ) ; extern void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr * ) ; extern int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr * ) ; __inline static int vlv_dport_to_channel(struct intel_digital_port *dport ) { { switch ((unsigned int )dport->port) { case 1U: ; case 3U: ; return (0); case 2U: ; return (1); default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/intel_drv.h"), "i" (776), "i" (12UL)); ldv_47513: ; goto ldv_47513; } } } void intel_dp_add_properties(struct intel_dp *intel_dp , struct drm_connector *connector ) ; int intel_dp_max_link_rate(struct intel_dp *intel_dp ) ; int intel_dp_rate_select(struct intel_dp *intel_dp , int rate ) ; int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port , int conn_base_id ) ; void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port ) ; int intel_panel_init(struct intel_panel *panel , struct drm_display_mode *fixed_mode , struct drm_display_mode *downclock_mode ) ; void intel_panel_fini(struct intel_panel *panel ) ; void intel_fixed_panel_mode(struct drm_display_mode const *fixed_mode , struct drm_display_mode *adjusted_mode ) ; void intel_pch_panel_fitting(struct intel_crtc *intel_crtc , struct intel_crtc_state *pipe_config , int fitting_mode ) ; void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc , struct intel_crtc_state *pipe_config , int fitting_mode ) ; int intel_panel_setup_backlight(struct drm_connector *connector , enum pipe pipe ) ; void intel_panel_enable_backlight(struct intel_connector *connector ) ; void intel_panel_disable_backlight(struct intel_connector *connector ) ; enum drm_connector_status intel_panel_detect(struct drm_device *dev ) ; struct drm_display_mode *intel_find_panel_downclock(struct drm_device *dev , struct drm_display_mode *fixed_mode , struct drm_connector *connector ) ; static struct dp_link_dpll const gen4_dpll[2U] = { {6, {2, 23, 8, 2, 10, 0, 0, 0, 0}}, {10, {1, 14, 2, 1, 10, 0, 0, 0, 0}}}; static struct dp_link_dpll const pch_dpll[2U] = { {6, {1, 12, 9, 2, 10, 0, 0, 0, 0}}, {10, {2, 14, 8, 1, 10, 0, 0, 0, 0}}}; static struct dp_link_dpll const vlv_dpll[2U] = { {6, {5, 3, 81, 3, 2, 0, 0, 0, 0}}, {10, {1, 2, 27, 2, 2, 0, 0, 0, 0}}}; static struct dp_link_dpll const chv_dpll[3U] = { {6, {1, 2, 135895450, 4, 2, 0, 0, 0, 0}}, {10, {1, 2, 113246208, 4, 1, 0, 0, 0, 0}}, {20, {1, 2, 113246208, 2, 1, 0, 0, 0, 0}}}; static int const skl_rates[6U] = { 162000, 216000, 270000, 324000, 432000, 540000}; static int const chv_rates[11U] = { 162000, 202500, 210000, 216000, 243000, 270000, 324000, 405000, 420000, 432000, 540000}; static int const default_rates[3U] = { 162000, 270000, 540000}; static bool is_edp(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; return ((unsigned int )intel_dig_port->base.type == 8U); } } static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; return (intel_dig_port->base.base.dev); } } static struct intel_dp *intel_attached_dp(struct drm_connector *connector ) { struct intel_encoder *tmp ; struct intel_dp *tmp___0 ; { tmp = intel_attached_encoder(connector); tmp___0 = enc_to_intel_dp(& tmp->base); return (tmp___0); } } static void intel_dp_link_down(struct intel_dp *intel_dp ) ; static bool edp_panel_vdd_on(struct intel_dp *intel_dp ) ; static void edp_panel_vdd_off(struct intel_dp *intel_dp , bool sync ) ; static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp ) ; static void vlv_steal_power_sequencer(struct drm_device *dev , enum pipe pipe ) ; static int intel_dp_max_link_bw(struct intel_dp *intel_dp ) { int max_link_bw ; int __ret_warn_on ; long tmp ; { max_link_bw = (int )intel_dp->dpcd[1]; switch (max_link_bw) { case 6: ; case 10: ; case 20: ; goto ldv_48383; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 146, "invalid max DP link bw val %x, using 1.62Gbps\n", max_link_bw); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); max_link_bw = 6; goto ldv_48383; } ldv_48383: ; return (max_link_bw); } } static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; u8 source_max ; u8 sink_max ; struct drm_i915_private *__p ; u8 _min1 ; u8 _min2 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; source_max = 4U; __p = to_i915((struct drm_device const *)dev); if (((unsigned int )*((unsigned char *)__p + 46UL) != 0U && (unsigned int )intel_dig_port->port == 0U) && (intel_dig_port->saved_port_bits & 16U) == 0U) { source_max = 2U; } else { } sink_max = drm_dp_max_lane_count((u8 const *)(& intel_dp->dpcd)); _min1 = source_max; _min2 = sink_max; return ((u8 )((int )_min1 < (int )_min2 ? _min1 : _min2)); } } static int intel_dp_link_required(int pixel_clock , int bpp ) { { return ((pixel_clock * bpp + 9) / 10); } } static int intel_dp_max_data_rate(int max_link_clock , int max_lanes ) { { return (((max_link_clock * max_lanes) * 8) / 10); } } static enum drm_mode_status intel_dp_mode_valid(struct drm_connector *connector , struct drm_display_mode *mode ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct drm_display_mode *fixed_mode ; int target_clock ; int max_rate ; int mode_rate ; int max_lanes ; int max_link_clock ; bool tmp___0 ; u8 tmp___1 ; { tmp = intel_attached_dp(connector); intel_dp = tmp; __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; fixed_mode = intel_connector->panel.fixed_mode; target_clock = mode->clock; tmp___0 = is_edp(intel_dp); if ((int )tmp___0 && (unsigned long )fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { if (mode->hdisplay > fixed_mode->hdisplay) { return (29); } else { } if (mode->vdisplay > fixed_mode->vdisplay) { return (29); } else { } target_clock = fixed_mode->clock; } else { } max_link_clock = intel_dp_max_link_rate(intel_dp); tmp___1 = intel_dp_max_lane_count(intel_dp); max_lanes = (int )tmp___1; max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, 18); if (mode_rate > max_rate) { return (15); } else { } if (mode->clock <= 9999) { return (16); } else { } if ((mode->flags & 4096U) != 0U) { return (3); } else { } return (0); } } uint32_t intel_dp_pack_aux(uint8_t const *src , int src_bytes ) { int i ; uint32_t v ; { v = 0U; if (src_bytes > 4) { src_bytes = 4; } else { } i = 0; goto ldv_48432; ldv_48431: v = ((unsigned int )*(src + (unsigned long )i) << (3 - i) * 8) | v; i = i + 1; ldv_48432: ; if (i < src_bytes) { goto ldv_48431; } else { } return (v); } } static void intel_dp_unpack_aux(uint32_t src , uint8_t *dst , int dst_bytes ) { int i ; { if (dst_bytes > 4) { dst_bytes = 4; } else { } i = 0; goto ldv_48441; ldv_48440: *(dst + (unsigned long )i) = (uint8_t )(src >> (3 - i) * 8); i = i + 1; ldv_48441: ; if (i < dst_bytes) { goto ldv_48440; } else { } return; } } static int intel_hrawclk(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; uint32_t clkcfg ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { return (200); } else { } clkcfg = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 68608L, 1); switch (clkcfg & 7U) { case 5U: ; return (100); case 1U: ; return (133); case 3U: ; return (166); case 2U: ; return (200); case 6U: ; return (266); case 7U: ; return (333); case 4U: ; case 0U: ; return (400); default: ; return (133); } } } static void intel_dp_init_panel_power_sequencer(struct drm_device *dev , struct intel_dp *intel_dp ) ; static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev , struct intel_dp *intel_dp ) ; static void pps_lock(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_encoder *encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum intel_display_power_domain power_domain ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; encoder = & intel_dig_port->base; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; power_domain = intel_display_port_power_domain(encoder); intel_display_power_get(dev_priv, power_domain); mutex_lock_nested(& dev_priv->pps_mutex, 0U); return; } } static void pps_unlock(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_encoder *encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum intel_display_power_domain power_domain ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; encoder = & intel_dig_port->base; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; mutex_unlock(& dev_priv->pps_mutex); power_domain = intel_display_port_power_domain(encoder); intel_display_power_put(dev_priv, power_domain); return; } } static void vlv_power_sequencer_kick(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum pipe pipe ; bool pll_enabled ; uint32_t DP ; int __ret_warn_on ; uint32_t tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; uint32_t tmp___4 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; uint32_t tmp___5 ; struct dpll const *tmp___6 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipe = intel_dp->pps_pipe; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 1); __ret_warn_on = (int )tmp___0 < 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 342, "skipping pipe %c power seqeuncer kick due to port %c being active\n", (int )pipe + 65, (unsigned int )intel_dig_port->port + 65U); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return; } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("vlv_power_sequencer_kick", "kicking pipe %c power sequencer for port %c\n", (int )pipe + 65, (unsigned int )intel_dig_port->port + 65U); } else { } tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 1); DP = tmp___4 & 4U; DP = DP; DP = DP; DP = DP; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { DP = (uint32_t )((int )pipe << 16) | DP; } else { goto _L; } } else _L: /* CIL Label */ if ((int )pipe == 1) { DP = DP | 1073741824U; } else { } tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); pll_enabled = (tmp___5 & 2147483648U) != 0U; if (! pll_enabled) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 8U) { tmp___6 = & chv_dpll[0].dpll; } else { tmp___6 = & vlv_dpll[0].dpll; } } else { tmp___6 = & vlv_dpll[0].dpll; } vlv_force_pll_on(dev, pipe, tmp___6); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, DP | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, DP & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); if (! pll_enabled) { vlv_force_pll_off(dev, pipe); } else { } return; } } static enum pipe vlv_power_sequencer_pipe(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_encoder *encoder ; unsigned int pipes ; enum pipe pipe ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; int __ret_warn_on___0 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; struct list_head const *__mptr ; struct intel_dp *tmp___6 ; struct list_head const *__mptr___0 ; int tmp___7 ; int __ret_warn_on___1 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; pipes = 3U; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 400, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = is_edp(intel_dp); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } __ret_warn_on___0 = tmp___4; tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 403, "WARN_ON(!is_edp(intel_dp))"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if ((int )intel_dp->pps_pipe != -1) { return (intel_dp->pps_pipe); } else { } __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_48541; ldv_48540: ; if ((unsigned int )encoder->type != 8U) { goto ldv_48539; } else { } tmp___6 = enc_to_intel_dp(& encoder->base); if ((int )tmp___6->pps_pipe != -1) { pipes = (unsigned int )(~ (1 << (int )tmp___6->pps_pipe)) & pipes; } else { } ldv_48539: __mptr___0 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_48541: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_48540; } else { } __ret_warn_on___1 = pipes == 0U; tmp___8 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___8 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 429, "WARN_ON(pipes == 0)"); } else { } tmp___9 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___9 != 0L) { pipe = 0; } else { tmp___7 = ffs((int )pipes); pipe = (enum pipe )(tmp___7 + -1); } vlv_steal_power_sequencer(dev, pipe); intel_dp->pps_pipe = pipe; tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("vlv_power_sequencer_pipe", "picked pipe %c power sequencer for port %c\n", (int )intel_dp->pps_pipe + 65, (unsigned int )intel_dig_port->port + 65U); } else { } intel_dp_init_panel_power_sequencer(dev, intel_dp); intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); vlv_power_sequencer_kick(intel_dp); return (intel_dp->pps_pipe); } } static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv , enum pipe pipe ) { uint32_t tmp ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((int )pipe + 7698) * 256), 1); return ((tmp & 2147483648U) != 0U); } } static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv , enum pipe pipe ) { uint32_t tmp ; { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 256 + 1970692), 1); return ((tmp & 8U) != 0U); } } static bool vlv_pipe_any(struct drm_i915_private *dev_priv , enum pipe pipe ) { { return (1); } } static enum pipe vlv_initial_pps_pipe(struct drm_i915_private *dev_priv , enum port port , bool (*pipe_check)(struct drm_i915_private * , enum pipe ) ) { enum pipe pipe ; u32 port_sel ; uint32_t tmp ; bool tmp___0 ; int tmp___1 ; { pipe = 0; goto ldv_48570; ldv_48569: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe * 256 + 1970696), 1); port_sel = tmp & 3221225472U; if ((unsigned int )port << 30 != port_sel) { goto ldv_48568; } else { } tmp___0 = (*pipe_check)(dev_priv, pipe); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto ldv_48568; } else { } return (pipe); ldv_48568: pipe = (enum pipe )((int )pipe + 1); ldv_48570: ; if ((int )pipe <= 1) { goto ldv_48569; } else { } return (-1); } } static void vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; port = intel_dig_port->port; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 506, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, & vlv_pipe_has_pp_on); if ((int )intel_dp->pps_pipe == -1) { intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, & vlv_pipe_has_vdd_on); } else { } if ((int )intel_dp->pps_pipe == -1) { intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, & vlv_pipe_any); } else { } if ((int )intel_dp->pps_pipe == -1) { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("vlv_initial_power_sequencer_setup", "no initial power sequencer for port %c\n", (unsigned int )port + 65U); } else { } return; } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("vlv_initial_power_sequencer_setup", "initial power sequencer for port %c: pipe %c\n", (unsigned int )port + 65U, (int )intel_dp->pps_pipe + 65); } else { } intel_dp_init_panel_power_sequencer(dev, intel_dp); intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); return; } } void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv ) { struct drm_device *dev ; struct intel_encoder *encoder ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp ; long tmp___0 ; struct list_head const *__mptr ; struct intel_dp *intel_dp ; struct list_head const *__mptr___0 ; { dev = dev_priv->dev; __p = to_i915((struct drm_device const *)dev); __ret_warn_on = (unsigned int )*((unsigned char *)__p + 45UL) == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 540, "WARN_ON(!IS_VALLEYVIEW(dev))"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return; } else { } __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_48602; ldv_48601: ; if ((unsigned int )encoder->type != 8U) { goto ldv_48600; } else { } intel_dp = enc_to_intel_dp(& encoder->base); intel_dp->pps_pipe = -1; ldv_48600: __mptr___0 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_48602: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_48601; } else { } return; } } static u32 _pp_ctrl_reg(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; enum pipe tmp___0 ; struct drm_i915_private *__p ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { return (815620U); } else { tmp___0 = vlv_power_sequencer_pipe(intel_dp); return ((u32 )((int )tmp___0 * 256 + 1970692)); } } } static u32 _pp_stat_reg(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; enum pipe tmp___0 ; struct drm_i915_private *__p ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { return (815616U); } else { tmp___0 = vlv_power_sequencer_pipe(intel_dp); return ((u32 )(((int )tmp___0 + 7698) * 256)); } } } static int edp_notify_handler(struct notifier_block *this , unsigned long code , void *unused ) { struct intel_dp *intel_dp ; struct notifier_block const *__mptr ; struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; u32 pp_div ; u32 pp_ctrl_reg ; u32 pp_div_reg ; bool tmp___0 ; int tmp___1 ; enum pipe pipe ; enum pipe tmp___2 ; struct drm_i915_private *__p ; { __mptr = (struct notifier_block const *)this; intel_dp = (struct intel_dp *)__mptr + 0xfffffffffffff638UL; tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = is_edp(intel_dp); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1 || code != 1UL) { return (0); } else { } pps_lock(intel_dp); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { tmp___2 = vlv_power_sequencer_pipe(intel_dp); pipe = tmp___2; pp_ctrl_reg = (u32 )((int )pipe * 256 + 1970692); pp_div_reg = (u32 )((int )pipe * 256 + 1970704); pp_div = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_div_reg, 1); pp_div = pp_div & 4294967040U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_div_reg, pp_div | 31U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, 2882338816U, 1); msleep((unsigned int )intel_dp->panel_power_cycle_delay); } else { } pps_unlock(intel_dp); return (0); } } static bool edp_have_panel_power(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; struct drm_i915_private *__p ; u32 tmp___3 ; uint32_t tmp___4 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 625, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && (int )intel_dp->pps_pipe == -1) { return (0); } else { } tmp___3 = _pp_stat_reg(intel_dp); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )tmp___3, 1); return ((int )tmp___4 < 0); } } static bool edp_have_panel_vdd(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; struct drm_i915_private *__p ; u32 tmp___3 ; uint32_t tmp___4 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 639, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U && (int )intel_dp->pps_pipe == -1) { return (0); } else { } tmp___3 = _pp_ctrl_reg(intel_dp); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )tmp___3, 1); return ((tmp___4 & 8U) != 0U); } } static void intel_dp_check_edp(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; bool tmp___0 ; int tmp___1 ; int __ret_warn_on ; long tmp___2 ; u32 tmp___3 ; uint32_t tmp___4 ; u32 tmp___5 ; uint32_t tmp___6 ; long tmp___7 ; bool tmp___8 ; int tmp___9 ; bool tmp___10 ; int tmp___11 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = is_edp(intel_dp); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return; } else { } tmp___8 = edp_have_panel_power(intel_dp); if (tmp___8) { tmp___9 = 0; } else { tmp___9 = 1; } if (tmp___9) { tmp___10 = edp_have_panel_vdd(intel_dp); if (tmp___10) { tmp___11 = 0; } else { tmp___11 = 1; } if (tmp___11) { __ret_warn_on = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 658, "eDP powered off while attempting aux channel communication.\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { tmp___3 = _pp_ctrl_reg(intel_dp); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )tmp___3, 1); tmp___5 = _pp_stat_reg(intel_dp); tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )tmp___5, 1); drm_ut_debug_printk("intel_dp_check_edp", "Status 0x%08x Control 0x%08x\n", tmp___6, tmp___4); } else { } } else { } } else { } return; } } static uint32_t intel_dp_aux_wait_done(struct intel_dp *intel_dp , bool has_aux_irq ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t ch_ctl ; uint32_t status ; bool done ; long __ret ; unsigned long tmp___0 ; wait_queue_t __wait ; long __ret___0 ; unsigned long tmp___1 ; long __int ; long tmp___2 ; bool __cond ; bool __cond___0 ; unsigned long timeout__ ; unsigned long tmp___3 ; int ret__ ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ch_ctl = intel_dp->aux_ch_ctl_reg; if ((int )has_aux_irq) { tmp___0 = msecs_to_jiffies_timeout(10U); __ret = (long )tmp___0; __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 678, 0); status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ch_ctl, 0); __cond___0 = (int )status >= 0; if ((int )__cond___0 && __ret == 0L) { __ret = 1L; } else { } if (((int )__cond___0 || __ret == 0L) == 0) { tmp___1 = msecs_to_jiffies_timeout(10U); __ret___0 = (long )tmp___1; INIT_LIST_HEAD(& __wait.task_list); __wait.flags = 0U; ldv_48698: tmp___2 = prepare_to_wait_event(& dev_priv->gmbus_wait_queue, & __wait, 2); __int = tmp___2; status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ch_ctl, 0); __cond = (int )status >= 0; if ((int )__cond && __ret___0 == 0L) { __ret___0 = 1L; } else { } if (((int )__cond || __ret___0 == 0L) != 0) { goto ldv_48697; } else { } __ret___0 = schedule_timeout(__ret___0); goto ldv_48698; ldv_48697: finish_wait(& dev_priv->gmbus_wait_queue, & __wait); __ret = __ret___0; } else { } done = __ret != 0L; } else { tmp___3 = msecs_to_jiffies(10U); timeout__ = (tmp___3 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48711; ldv_48710: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ch_ctl, 0); if ((int )status < 0) { ret__ = -110; } else { } goto ldv_48709; } else { } cpu_relax(); ldv_48711: status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ch_ctl, 0); if ((int )status < 0) { goto ldv_48710; } else { } ldv_48709: done = ret__ == 0; } if (! done) { drm_err("dp aux hw did not signal timeout (has irq: %i)!\n", (int )has_aux_irq); } else { } return (status); } } static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp , int index ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; int tmp___0 ; uint32_t tmp___1 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; if (index == 0) { tmp___0 = intel_hrawclk(dev); tmp___1 = (uint32_t )(tmp___0 / 2); } else { tmp___1 = 0U; } return (tmp___1); } } static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp , int index ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int tmp___0 ; int tmp___1 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (index != 0) { return (0U); } else { } if ((unsigned int )intel_dig_port->port == 0U) { tmp___0 = (*(dev_priv->display.get_display_clock_speed))(dev); return ((uint32_t )((tmp___0 + 1999) / 2000)); } else { tmp___1 = intel_pch_rawclk(dev); return ((uint32_t )((tmp___1 + 1) / 2)); } } } static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp , int index ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int __x ; int tmp___0 ; int __d ; int tmp___1 ; uint32_t tmp___2 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned int )intel_dig_port->port == 0U) { if (index != 0) { return (0U); } else { } tmp___0 = (*(dev_priv->display.get_display_clock_speed))(dev); __x = tmp___0; __d = 2000; return ((uint32_t )(__x > 0 ? (__d / 2 + __x) / __d : (__x - __d / 2) / __d)); } else if ((unsigned int )dev_priv->pch_id == 35840U) { switch (index) { case 0: ; return (63U); case 1: ; return (72U); default: ; return (0U); } } else { if (index == 0) { tmp___1 = intel_pch_rawclk(dev); tmp___2 = (uint32_t )((tmp___1 + 1) / 2); } else { tmp___2 = 0U; } return (tmp___2); } } } static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp , int index ) { { return (index != 0 ? 0U : 100U); } } static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp , int index ) { { return (index == 0); } } static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp , bool has_aux_irq , int send_bytes , uint32_t aux_clock_divider ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; uint32_t precharge ; uint32_t timeout ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U) { precharge = 3U; } else { precharge = 5U; } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { if (intel_dp->aux_ch_ctl_reg == 409616U) { timeout = 67108864U; } else { timeout = 0U; } } else { timeout = 0U; } } else { timeout = 0U; } return (((((((int )has_aux_irq ? 4026531840U : 3489660928U) | timeout) | (uint32_t )(send_bytes << 20)) | (precharge << 16)) | aux_clock_divider) | 33554432U); } } static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp , bool has_aux_irq , int send_bytes , uint32_t unused ) { { return ((uint32_t )((((int )has_aux_irq ? -33554432 : -570425344) | (send_bytes << 20)) | 31)); } } static int intel_dp_aux_ch(struct intel_dp *intel_dp , uint8_t const *send , int send_bytes , uint8_t *recv , int recv_size ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t ch_ctl ; uint32_t ch_data ; uint32_t aux_clock_divider ; int i ; int ret ; int recv_bytes ; uint32_t status ; int try ; int clock ; bool has_aux_irq ; struct drm_i915_private *__p ; bool vdd ; int __ret_warn_on ; uint32_t tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; long tmp___3 ; u32 send_ctl ; uint32_t tmp___4 ; uint32_t tmp___5 ; int tmp___6 ; long tmp___7 ; uint32_t tmp___8 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ch_ctl = intel_dp->aux_ch_ctl_reg; ch_data = ch_ctl + 4U; clock = 0; __p = to_i915((struct drm_device const *)dev); has_aux_irq = (unsigned int )((unsigned char )__p->info.gen) > 4U; pps_lock(intel_dp); vdd = edp_panel_vdd_on(intel_dp); pm_qos_update_request(& dev_priv->pm_qos, 0); intel_dp_check_edp(intel_dp); intel_aux_display_runtime_get(dev_priv); try = 0; goto ldv_48810; ldv_48809: status = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ch_ctl, 0); if ((int )status >= 0) { goto ldv_48808; } else { } msleep(1U); try = try + 1; ldv_48810: ; if (try <= 2) { goto ldv_48809; } else { } ldv_48808: ; if (try == 3) { __ret_warn_on = 1; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ch_ctl, 1); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 846, "dp_aux_ch not started status 0x%08x\n", tmp___0); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = -16; goto out; } else { } __ret_warn_on___0 = send_bytes > 20 || recv_size > 20; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 852, "WARN_ON(send_bytes > 20 || recv_size > 20)"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { ret = -7; goto out; } else { } goto ldv_48826; ldv_48825: tmp___4 = (*(intel_dp->get_aux_send_ctl))(intel_dp, (int )has_aux_irq, send_bytes, aux_clock_divider); send_ctl = tmp___4; try = 0; goto ldv_48823; ldv_48822: i = 0; goto ldv_48818; ldv_48817: tmp___5 = intel_dp_pack_aux(send + (unsigned long )i, send_bytes - i); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(ch_data + (uint32_t )i), tmp___5, 1); i = i + 4; ldv_48818: ; if (i < send_bytes) { goto ldv_48817; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )ch_ctl, send_ctl, 1); status = intel_dp_aux_wait_done(intel_dp, (int )has_aux_irq); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )ch_ctl, status | 1375731712U, 1); if ((status & 268435456U) != 0U) { goto ldv_48820; } else { } if ((status & 33554432U) != 0U) { usleep_range(400UL, 500UL); goto ldv_48820; } else { } if ((status & 1073741824U) != 0U) { goto done; } else { } ldv_48820: try = try + 1; ldv_48823: ; if (try <= 4) { goto ldv_48822; } else { } ldv_48826: tmp___6 = clock; clock = clock + 1; aux_clock_divider = (*(intel_dp->get_aux_clock_divider))(intel_dp, tmp___6); if (aux_clock_divider != 0U) { goto ldv_48825; } else { } if ((status & 1073741824U) == 0U) { drm_err("dp_aux_ch not done status 0x%08x\n", status); ret = -16; goto out; } else { } done: ; if ((status & 33554432U) != 0U) { drm_err("dp_aux_ch receive error status 0x%08x\n", status); ret = -5; goto out; } else { } if ((status & 268435456U) != 0U) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_dp_aux_ch", "dp_aux_ch timeout status 0x%08x\n", status); } else { } ret = -110; goto out; } else { } recv_bytes = (int )((status & 32505856U) >> 20); if (recv_bytes > recv_size) { recv_bytes = recv_size; } else { } i = 0; goto ldv_48830; ldv_48829: tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(ch_data + (uint32_t )i), 1); intel_dp_unpack_aux(tmp___8, recv + (unsigned long )i, recv_bytes - i); i = i + 4; ldv_48830: ; if (i < recv_bytes) { goto ldv_48829; } else { } ret = recv_bytes; out: pm_qos_update_request(& dev_priv->pm_qos, -1); intel_aux_display_runtime_put(dev_priv); if ((int )vdd) { edp_panel_vdd_off(intel_dp, 0); } else { } pps_unlock(intel_dp); return (ret); } } static ssize_t intel_dp_aux_transfer(struct drm_dp_aux *aux , struct drm_dp_aux_msg *msg ) { struct intel_dp *intel_dp ; struct drm_dp_aux const *__mptr ; uint8_t txbuf[20U] ; uint8_t rxbuf[20U] ; size_t txsize ; size_t rxsize ; int ret ; int __ret_warn_on ; long tmp ; long tmp___0 ; int __min1 ; int __max1 ; int __max2 ; int __min2 ; int __ret_warn_on___0 ; long tmp___1 ; long tmp___2 ; { __mptr = (struct drm_dp_aux const *)aux; intel_dp = (struct intel_dp *)__mptr + 0xffffffffffffffa0UL; txbuf[0] = (unsigned int )((int )msg->request << 4U) | ((unsigned int )((uint8_t )(msg->address >> 16)) & 15U); txbuf[1] = (uint8_t )(msg->address >> 8); txbuf[2] = (uint8_t )msg->address; txbuf[3] = (unsigned int )((uint8_t )msg->size) - 1U; switch ((int )msg->request & -5) { case 8: ; case 0: txsize = msg->size != 0UL ? msg->size + 4UL : 3UL; rxsize = 2UL; __ret_warn_on = txsize > 20UL; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 969, "WARN_ON(txsize > 20)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-7L); } else { } memcpy((void *)(& txbuf) + 4U, (void const *)msg->buffer, msg->size); ret = intel_dp_aux_ch(intel_dp, (uint8_t const *)(& txbuf), (int )txsize, (uint8_t *)(& rxbuf), (int )rxsize); if (ret > 0) { msg->reply = (u8 )((int )rxbuf[0] >> 4); if (ret > 1) { __max1 = (int )rxbuf[1]; __max2 = 0; __min1 = __max1 > __max2 ? __max1 : __max2; __min2 = (int )msg->size; ret = __min1 < __min2 ? __min1 : __min2; } else { ret = (int )msg->size; } } else { } goto ldv_48854; case 9: ; case 1: txsize = msg->size != 0UL ? 4UL : 3UL; rxsize = msg->size + 1UL; __ret_warn_on___0 = rxsize > 20UL; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 993, "WARN_ON(rxsize > 20)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { return (-7L); } else { } ret = intel_dp_aux_ch(intel_dp, (uint8_t const *)(& txbuf), (int )txsize, (uint8_t *)(& rxbuf), (int )rxsize); if (ret > 0) { msg->reply = (u8 )((int )rxbuf[0] >> 4); ret = ret - 1; memcpy(msg->buffer, (void const *)(& rxbuf) + 1U, (size_t )ret); } else { } goto ldv_48854; default: ret = -22; goto ldv_48854; } ldv_48854: ; return ((ssize_t )ret); } } static void intel_dp_aux_init(struct intel_dp *intel_dp , struct intel_connector *connector ) { struct drm_device *dev ; struct drm_device *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___0 ; enum port port ; char const *name ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; long tmp___1 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; tmp___0 = dp_to_dig_port(intel_dp); intel_dig_port = tmp___0; port = intel_dig_port->port; name = (char const *)0; switch ((unsigned int )port) { case 0U: intel_dp->aux_ch_ctl_reg = 409616U; name = "DPDDC-A"; goto ldv_48870; case 1U: intel_dp->aux_ch_ctl_reg = 934160U; name = "DPDDC-B"; goto ldv_48870; case 2U: intel_dp->aux_ch_ctl_reg = 934416U; name = "DPDDC-C"; goto ldv_48870; case 3U: intel_dp->aux_ch_ctl_reg = 934672U; name = "DPDDC-D"; goto ldv_48870; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c"), "i" (1045), "i" (12UL)); ldv_48875: ; goto ldv_48875; } ldv_48870: __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 16U; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) != 8U) { intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 16U; } else { } } } else { } intel_dp->aux.name = name; intel_dp->aux.dev = dev->dev; intel_dp->aux.transfer = & intel_dp_aux_transfer; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_aux_init", "registering %s bus for %s\n", name, (connector->base.kdev)->kobj.name); } else { } ret = drm_dp_aux_register(& intel_dp->aux); if (ret < 0) { drm_err("drm_dp_aux_register() for %s failed (%d)\n", name, ret); return; } else { } ret = sysfs_create_link(& (connector->base.kdev)->kobj, & intel_dp->aux.ddc.dev.kobj, intel_dp->aux.ddc.dev.kobj.name); if (ret < 0) { drm_err("sysfs_create_link() for %s failed (%d)\n", name, ret); drm_dp_aux_unregister(& intel_dp->aux); } else { } return; } } static void intel_dp_connector_unregister(struct intel_connector *intel_connector ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; { tmp = intel_attached_dp(& intel_connector->base); intel_dp = tmp; if ((unsigned long )intel_connector->mst_port == (unsigned long )((struct intel_dp *)0)) { sysfs_remove_link(& (intel_connector->base.kdev)->kobj, intel_dp->aux.ddc.dev.kobj.name); } else { } intel_connector_unregister(intel_connector); return; } } static void skl_edp_set_pll_config(struct intel_crtc_state *pipe_config , int link_clock ) { u32 ctrl1 ; { memset((void *)(& pipe_config->dpll_hw_state), 0, 68UL); pipe_config->ddi_pll_sel = 0U; pipe_config->dpll_hw_state.cfgcr1 = 0U; pipe_config->dpll_hw_state.cfgcr2 = 0U; ctrl1 = 1U; switch (link_clock / 2) { case 81000: ctrl1 = ctrl1 | 4U; goto ldv_48905; case 135000: ctrl1 = ctrl1 | 2U; goto ldv_48905; case 270000: ctrl1 = ctrl1; goto ldv_48905; case 162000: ctrl1 = ctrl1 | 6U; goto ldv_48905; case 108000: ctrl1 = ctrl1 | 8U; goto ldv_48905; case 216000: ctrl1 = ctrl1 | 10U; goto ldv_48905; } ldv_48905: pipe_config->dpll_hw_state.ctrl1 = ctrl1; return; } } static void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config , int link_bw ) { { memset((void *)(& pipe_config->dpll_hw_state), 0, 68UL); switch (link_bw) { case 6: pipe_config->ddi_pll_sel = 1073741824U; goto ldv_48916; case 10: pipe_config->ddi_pll_sel = 536870912U; goto ldv_48916; case 20: pipe_config->ddi_pll_sel = 0U; goto ldv_48916; } ldv_48916: ; return; } } static int intel_dp_sink_rates(struct intel_dp *intel_dp , int const **sink_rates ) { int tmp ; { if ((unsigned int )intel_dp->num_sink_rates != 0U) { *sink_rates = (int const *)(& intel_dp->sink_rates); return ((int )intel_dp->num_sink_rates); } else { } *sink_rates = (int const *)(& default_rates); tmp = intel_dp_max_link_bw(intel_dp); return ((tmp >> 3) + 1); } } static int intel_dp_source_rates(struct drm_device *dev , int const **source_rates ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { *source_rates = (int const *)(& skl_rates); return (6); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { *source_rates = (int const *)(& chv_rates); return (11); } else { } } else { } } *source_rates = (int const *)(& default_rates); __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U) { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((__p___7->dev)->pdev)->revision <= 1U) { return (2); } else { goto _L; } } else { _L: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 7U) { return (3); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___4->info.device_id) != 2574U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___5->info.device_id) != 2590U) { return (3); } else { return (2); } } else { return (2); } } else { return (2); } } } } } static void intel_dp_set_clock(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config , int link_bw ) { struct drm_device *dev ; struct dp_link_dpll const *divisor ; int i ; int count ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { dev = encoder->base.dev; divisor = (struct dp_link_dpll const *)0; count = 0; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) != 0U) { divisor = (struct dp_link_dpll const *)(& gen4_dpll); count = 2; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___2->pch_type != 0U) { divisor = (struct dp_link_dpll const *)(& pch_dpll); count = 2; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { divisor = (struct dp_link_dpll const *)(& chv_dpll); count = 3; } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { divisor = (struct dp_link_dpll const *)(& vlv_dpll); count = 2; } else { } } } } if ((unsigned long )divisor != (unsigned long )((struct dp_link_dpll const *)0) && count != 0) { i = 0; goto ldv_49034; ldv_49033: ; if ((int )(divisor + (unsigned long )i)->link_bw == link_bw) { pipe_config->dpll = (divisor + (unsigned long )i)->dpll; pipe_config->clock_set = 1; goto ldv_49032; } else { } i = i + 1; ldv_49034: ; if (i < count) { goto ldv_49033; } else { } ldv_49032: ; } else { } return; } } static int intersect_rates(int const *source_rates , int source_len , int const *sink_rates , int sink_len , int *common_rates ) { int i ; int j ; int k ; int __ret_warn_on ; long tmp ; long tmp___0 ; { i = 0; j = 0; k = 0; goto ldv_49048; ldv_49047: ; if ((int )*(source_rates + (unsigned long )i) == (int )*(sink_rates + (unsigned long )j)) { __ret_warn_on = k > 7; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1236, "WARN_ON(k >= DP_MAX_SUPPORTED_RATES)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (k); } else { } *(common_rates + (unsigned long )k) = *(source_rates + (unsigned long )i); k = k + 1; i = i + 1; j = j + 1; } else if ((int )*(source_rates + (unsigned long )i) < (int )*(sink_rates + (unsigned long )j)) { i = i + 1; } else { j = j + 1; } ldv_49048: ; if (i < source_len && j < sink_len) { goto ldv_49047; } else { } return (k); } } static int intel_dp_common_rates(struct intel_dp *intel_dp , int *common_rates ) { struct drm_device *dev ; struct drm_device *tmp ; int const *source_rates ; int const *sink_rates ; int source_len ; int sink_len ; int tmp___0 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; sink_len = intel_dp_sink_rates(intel_dp, & sink_rates); source_len = intel_dp_source_rates(dev, & source_rates); tmp___0 = intersect_rates(source_rates, source_len, sink_rates, sink_len, common_rates); return (tmp___0); } } static void snprintf_int_array(char *str , size_t len , int const *array , int nelem ) { int i ; int r ; int tmp ; { *str = 0; i = 0; goto ldv_49068; ldv_49067: tmp = snprintf(str, len, "%s%d", i != 0 ? (char *)", " : (char *)"", *(array + (unsigned long )i)); r = tmp; if ((size_t )r >= len) { return; } else { } str = str + (unsigned long )r; len = len - (size_t )r; i = i + 1; ldv_49068: ; if (i < nelem) { goto ldv_49067; } else { } return; } } static void intel_dp_print_rates(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; int const *source_rates ; int const *sink_rates ; int source_len ; int sink_len ; int common_len ; int common_rates[8U] ; char str[128U] ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; if ((drm_debug & 4U) == 0U) { return; } else { } source_len = intel_dp_source_rates(dev, & source_rates); snprintf_int_array((char *)(& str), 128UL, source_rates, source_len); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dp_print_rates", "source rates: %s\n", (char *)(& str)); } else { } sink_len = intel_dp_sink_rates(intel_dp, & sink_rates); snprintf_int_array((char *)(& str), 128UL, sink_rates, sink_len); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_print_rates", "sink rates: %s\n", (char *)(& str)); } else { } common_len = intel_dp_common_rates(intel_dp, (int *)(& common_rates)); snprintf_int_array((char *)(& str), 128UL, (int const *)(& common_rates), common_len); tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_dp_print_rates", "common rates: %s\n", (char *)(& str)); } else { } return; } } static int rate_to_index(int find , int const *rates ) { int i ; { i = 0; i = 0; goto ldv_49089; ldv_49088: ; if ((int )*(rates + (unsigned long )i) == find) { goto ldv_49087; } else { } i = i + 1; ldv_49089: ; if (i <= 7) { goto ldv_49088; } else { } ldv_49087: ; return (i); } } int intel_dp_max_link_rate(struct intel_dp *intel_dp ) { int rates[8U] ; int len ; int __ret_warn_on ; long tmp ; long tmp___0 ; int tmp___1 ; { rates[0] = 0; rates[1] = 0; rates[2] = 0; rates[3] = 0; rates[4] = 0; rates[5] = 0; rates[6] = 0; rates[7] = 0; len = intel_dp_common_rates(intel_dp, (int *)(& rates)); __ret_warn_on = len <= 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1324, "WARN_ON(len <= 0)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (162000); } else { } tmp___1 = rate_to_index(0, (int const *)(& rates)); return (rates[tmp___1 + -1]); } } int intel_dp_rate_select(struct intel_dp *intel_dp , int rate ) { int tmp ; { tmp = rate_to_index(rate, (int const *)(& intel_dp->sink_rates)); return (tmp); } } bool intel_dp_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_display_mode *adjusted_mode ; struct intel_dp *intel_dp ; struct intel_dp *tmp ; enum port port ; struct intel_digital_port *tmp___0 ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_connector *intel_connector ; int lane_count ; int clock ; int min_lane_count ; int max_lane_count ; u8 tmp___1 ; int min_clock ; int max_clock ; int bpp ; int mode_rate ; int link_avail ; int link_clock ; int common_rates[8U] ; int common_len ; int __ret_warn_on ; long tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int ret ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; bool tmp___3 ; long tmp___4 ; long tmp___5 ; bool tmp___6 ; u8 tmp___7 ; int tmp___8 ; long tmp___9 ; long tmp___10 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; bool tmp___11 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; adjusted_mode = & pipe_config->base.adjusted_mode; tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); port = tmp___0->port; __mptr = (struct drm_crtc const *)pipe_config->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; intel_connector = intel_dp->attached_connector; min_lane_count = 1; tmp___1 = intel_dp_max_lane_count(intel_dp); max_lane_count = (int )tmp___1; min_clock = 0; common_rates[0] = 0; common_rates[1] = 0; common_rates[2] = 0; common_rates[3] = 0; common_rates[4] = 0; common_rates[5] = 0; common_rates[6] = 0; common_rates[7] = 0; common_len = intel_dp_common_rates(intel_dp, (int *)(& common_rates)); __ret_warn_on = common_len <= 0; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1360, "WARN_ON(common_len <= 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); max_clock = common_len + -1; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) == 0U) { if ((unsigned int )port != 0U) { pipe_config->has_pch_encoder = 1; } else { } } else { } } else { } pipe_config->has_dp_encoder = 1; pipe_config->has_drrs = 0; pipe_config->has_audio = (bool )((int )intel_dp->has_audio && (unsigned int )port != 0U); tmp___3 = is_edp(intel_dp); if ((int )tmp___3 && (unsigned long )intel_connector->panel.fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { intel_fixed_panel_mode((struct drm_display_mode const *)intel_connector->panel.fixed_mode, adjusted_mode); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 8U) { ret = skl_update_scaler_users(intel_crtc, pipe_config, (struct intel_plane *)0, (struct intel_plane_state *)0, 0); if (ret != 0) { return (ret != 0); } else { } } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___2->pch_type == 0U) { intel_gmch_panel_fitting(intel_crtc, pipe_config, intel_connector->panel.fitting_mode); } else { intel_pch_panel_fitting(intel_crtc, pipe_config, intel_connector->panel.fitting_mode); } } else { } if ((adjusted_mode->flags & 4096U) != 0U) { return (0); } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_dp_compute_config", "DP link computation with max lane count %i max bw %d pixel clock %iKHz\n", max_lane_count, common_rates[max_clock], adjusted_mode->crtc_clock); } else { } bpp = pipe_config->pipe_bpp; tmp___6 = is_edp(intel_dp); if ((int )tmp___6) { if (dev_priv->vbt.edp_bpp != 0 && dev_priv->vbt.edp_bpp < bpp) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_compute_config", "clamping bpp for eDP panel to BIOS-provided %i\n", dev_priv->vbt.edp_bpp); } else { } bpp = dev_priv->vbt.edp_bpp; } else { } min_lane_count = max_lane_count; min_clock = max_clock; } else { } goto ldv_49162; ldv_49161: mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, bpp); clock = min_clock; goto ldv_49159; ldv_49158: lane_count = min_lane_count; goto ldv_49156; ldv_49155: link_clock = common_rates[clock]; link_avail = intel_dp_max_data_rate(link_clock, lane_count); if (mode_rate <= link_avail) { goto found; } else { } lane_count = lane_count << 1; ldv_49156: ; if (lane_count <= max_lane_count) { goto ldv_49155; } else { } clock = clock + 1; ldv_49159: ; if (clock <= max_clock) { goto ldv_49158; } else { } bpp = bpp + -6; ldv_49162: ; if (bpp > 17) { goto ldv_49161; } else { } return (0); found: ; if ((int )intel_dp->color_range_auto) { if (bpp != 18) { tmp___7 = drm_match_cea_mode((struct drm_display_mode const *)adjusted_mode); if ((unsigned int )tmp___7 > 1U) { intel_dp->color_range = 256U; } else { intel_dp->color_range = 0U; } } else { intel_dp->color_range = 0U; } } else { } if (intel_dp->color_range != 0U) { pipe_config->limited_color_range = 1; } else { } intel_dp->lane_count = (uint8_t )lane_count; if ((unsigned int )intel_dp->num_sink_rates != 0U) { intel_dp->link_bw = 0U; tmp___8 = intel_dp_rate_select(intel_dp, common_rates[clock]); intel_dp->rate_select = (uint8_t )tmp___8; } else { intel_dp->link_bw = drm_dp_link_rate_to_bw_code(common_rates[clock]); intel_dp->rate_select = 0U; } pipe_config->pipe_bpp = bpp; pipe_config->port_clock = common_rates[clock]; tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("intel_dp_compute_config", "DP link bw %02x lane count %d clock %d bpp %d\n", (int )intel_dp->link_bw, (int )intel_dp->lane_count, pipe_config->port_clock, bpp); } else { } tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("intel_dp_compute_config", "DP link bw required %i available %i\n", mode_rate, link_avail); } else { } intel_link_compute_m_n(bpp, lane_count, adjusted_mode->crtc_clock, pipe_config->port_clock, & pipe_config->dp_m_n); if ((unsigned long )intel_connector->panel.downclock_mode != (unsigned long )((struct drm_display_mode *)0) && (unsigned int )dev_priv->drrs.type == 2U) { pipe_config->has_drrs = 1; intel_link_compute_m_n(bpp, lane_count, (intel_connector->panel.downclock_mode)->clock, pipe_config->port_clock, & pipe_config->dp_m2_n2); } else { } __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { tmp___11 = is_edp(intel_dp); if ((int )tmp___11) { skl_edp_set_pll_config(pipe_config, common_rates[clock]); } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) == 0U) { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) == 9U) { } else { goto _L; } } else { _L: /* CIL Label */ __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { hsw_dp_set_ddi_pll_sel(pipe_config, (int )intel_dp->link_bw); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) == 0U) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 8U) { hsw_dp_set_ddi_pll_sel(pipe_config, (int )intel_dp->link_bw); } else { intel_dp_set_clock(encoder, pipe_config, (int )intel_dp->link_bw); } } else { intel_dp_set_clock(encoder, pipe_config, (int )intel_dp->link_bw); } } } } return (1); } } static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp ) { struct intel_digital_port *dig_port ; struct intel_digital_port *tmp ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 dpa_ctl ; long tmp___0 ; long tmp___1 ; { tmp = dp_to_dig_port(intel_dp); dig_port = tmp; __mptr = (struct drm_crtc const *)dig_port->base.base.crtc; crtc = (struct intel_crtc *)__mptr; dev = crtc->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ironlake_set_pll_cpu_edp", "eDP PLL enable for clock %d\n", (crtc->config)->port_clock); } else { } dpa_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 409600L, 1); dpa_ctl = dpa_ctl & 4294770687U; if ((crtc->config)->port_clock == 162000) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("ironlake_set_pll_cpu_edp", "160MHz cpu eDP clock, might need ilk devA w/a\n"); } else { } dpa_ctl = dpa_ctl | 65536U; intel_dp->DP = intel_dp->DP | 65536U; } else { dpa_ctl = dpa_ctl; intel_dp->DP = intel_dp->DP; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 409600L, dpa_ctl, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 409600L, 0); __const_udelay(2147500UL); return; } } static void intel_dp_prepare(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_dp *intel_dp ; struct intel_dp *tmp ; enum port port ; struct intel_digital_port *tmp___0 ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode *adjusted_mode ; uint32_t tmp___1 ; bool tmp___2 ; u32 trans_dp ; bool tmp___3 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; bool tmp___4 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); port = tmp___0->port; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; adjusted_mode = & (crtc->config)->base.adjusted_mode; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 1); intel_dp->DP = tmp___1 & 4U; intel_dp->DP = intel_dp->DP; intel_dp->DP = intel_dp->DP | (uint32_t )(((int )intel_dp->lane_count + -1) << 19); if ((int )(crtc->config)->has_audio) { intel_dp->DP = intel_dp->DP | 64U; } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 7U && (unsigned int )port == 0U) { if ((int )adjusted_mode->flags & 1) { intel_dp->DP = intel_dp->DP | 8U; } else { } if ((adjusted_mode->flags & 4U) != 0U) { intel_dp->DP = intel_dp->DP | 16U; } else { } intel_dp->DP = intel_dp->DP | 768U; tmp___2 = drm_dp_enhanced_frame_cap((u8 const *)(& intel_dp->dpcd)); if ((int )tmp___2) { intel_dp->DP = intel_dp->DP | 262144U; } else { } intel_dp->DP = intel_dp->DP | (uint32_t )((int )crtc->pipe << 29); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type == 2U && (unsigned int )port != 0U) { intel_dp->DP = intel_dp->DP | 768U; trans_dp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe * 4096 + 918272), 1); tmp___3 = drm_dp_enhanced_frame_cap((u8 const *)(& intel_dp->dpcd)); if ((int )tmp___3) { trans_dp = trans_dp | 262144U; } else { trans_dp = trans_dp & 4294705151U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )crtc->pipe * 4096 + 918272), trans_dp, 1); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { intel_dp->DP = intel_dp->DP | intel_dp->color_range; } else { } } else { } if ((int )adjusted_mode->flags & 1) { intel_dp->DP = intel_dp->DP | 8U; } else { } if ((adjusted_mode->flags & 4U) != 0U) { intel_dp->DP = intel_dp->DP | 16U; } else { } intel_dp->DP = intel_dp->DP | 805306368U; tmp___4 = drm_dp_enhanced_frame_cap((u8 const *)(& intel_dp->dpcd)); if ((int )tmp___4) { intel_dp->DP = intel_dp->DP | 262144U; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { intel_dp->DP = intel_dp->DP | (uint32_t )((int )crtc->pipe << 16); } else { goto _L; } } else _L: /* CIL Label */ if ((int )crtc->pipe == 1) { intel_dp->DP = intel_dp->DP | 1073741824U; } else { } } } return; } } static void wait_panel_status(struct intel_dp *intel_dp , u32 mask , u32 value ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; u32 pp_stat_reg ; u32 pp_ctrl_reg ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; long tmp___5 ; uint32_t tmp___6 ; uint32_t tmp___7 ; unsigned long timeout__ ; unsigned long tmp___8 ; int ret__ ; uint32_t tmp___9 ; bool tmp___10 ; uint32_t tmp___11 ; long tmp___12 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1634, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 1); tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_stat_reg, 1); drm_ut_debug_printk("wait_panel_status", "mask %08x value %08x status %08x control %08x\n", mask, value, tmp___4, tmp___3); } else { } tmp___8 = msecs_to_jiffies(5000U); timeout__ = (tmp___8 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_49281; ldv_49280: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_stat_reg, 1); if ((tmp___9 & mask) != value) { ret__ = -110; } else { } goto ldv_49279; } else { } tmp___10 = drm_can_sleep___12(); if ((int )tmp___10) { usleep_range(10000UL, 20000UL); } else { cpu_relax(); } ldv_49281: tmp___11 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_stat_reg, 1); if ((tmp___11 & mask) != value) { goto ldv_49280; } else { } ldv_49279: ; if (ret__ != 0) { tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 1); tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_stat_reg, 1); drm_err("Panel status timeout: status %08x control %08x\n", tmp___7, tmp___6); } else { } tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("wait_panel_status", "Wait complete\n"); } else { } return; } } static void wait_panel_on(struct intel_dp *intel_dp ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("wait_panel_on", "Wait for panel power on\n"); } else { } wait_panel_status(intel_dp, 2952790031U, 2147483656U); return; } } static void wait_panel_off(struct intel_dp *intel_dp ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("wait_panel_off", "Wait for panel power off time\n"); } else { } wait_panel_status(intel_dp, 2952790016U, 0U); return; } } static void wait_panel_power_cycle(struct intel_dp *intel_dp ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("wait_panel_power_cycle", "Wait for panel power cycle\n"); } else { } wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle, intel_dp->panel_power_cycle_delay); wait_panel_status(intel_dp, 3087007759U, 0U); return; } } static void wait_backlight_on(struct intel_dp *intel_dp ) { { wait_remaining_ms_from_jiffies(intel_dp->last_power_on, intel_dp->backlight_on_delay); return; } } static void edp_wait_backlight_off(struct intel_dp *intel_dp ) { { wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, intel_dp->backlight_off_delay); return; } } static u32 ironlake_get_pp_control(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; u32 control ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; u32 tmp___3 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1699, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = _pp_ctrl_reg(intel_dp); control = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )tmp___3, 1); control = control & 65535U; control = control | 2882338816U; return (control); } } static bool edp_panel_vdd_on(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___0 ; struct intel_encoder *intel_encoder ; struct drm_i915_private *dev_priv ; enum intel_display_power_domain power_domain ; u32 pp ; u32 pp_stat_reg ; u32 pp_ctrl_reg ; bool need_to_disable ; int __ret_warn_on ; int tmp___1 ; int tmp___2 ; long tmp___3 ; bool tmp___4 ; int tmp___5 ; bool tmp___6 ; long tmp___7 ; bool tmp___8 ; int tmp___9 ; uint32_t tmp___10 ; uint32_t tmp___11 ; long tmp___12 ; long tmp___13 ; bool tmp___14 ; int tmp___15 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; tmp___0 = dp_to_dig_port(intel_dp); intel_dig_port = tmp___0; intel_encoder = & intel_dig_port->base; dev_priv = (struct drm_i915_private *)dev->dev_private; need_to_disable = (bool )(! ((int )intel_dp->want_panel_vdd != 0)); if (debug_locks != 0) { tmp___1 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___1 == 0) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } __ret_warn_on = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1723, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___4 = is_edp(intel_dp); if (tmp___4) { tmp___5 = 0; } else { tmp___5 = 1; } if (tmp___5) { return (0); } else { } ldv_cancel_delayed_work_872(& intel_dp->panel_vdd_work); intel_dp->want_panel_vdd = 1; tmp___6 = edp_have_panel_vdd(intel_dp); if ((int )tmp___6) { return (need_to_disable); } else { } power_domain = intel_display_port_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("edp_panel_vdd_on", "Turning eDP port %c VDD on\n", (unsigned int )intel_dig_port->port + 65U); } else { } tmp___8 = edp_have_panel_power(intel_dp); if (tmp___8) { tmp___9 = 0; } else { tmp___9 = 1; } if (tmp___9) { wait_panel_power_cycle(intel_dp); } else { } pp = ironlake_get_pp_control(intel_dp); pp = pp | 8U; pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, pp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 0); tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 1); tmp___11 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_stat_reg, 1); drm_ut_debug_printk("edp_panel_vdd_on", "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", tmp___11, tmp___10); } else { } tmp___14 = edp_have_panel_power(intel_dp); if (tmp___14) { tmp___15 = 0; } else { tmp___15 = 1; } if (tmp___15) { tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("edp_panel_vdd_on", "eDP port %c panel power wasn\'t enabled\n", (unsigned int )intel_dig_port->port + 65U); } else { } msleep((unsigned int )intel_dp->panel_power_up_delay); } else { } return (need_to_disable); } } void intel_edp_panel_vdd_on(struct intel_dp *intel_dp ) { bool vdd ; bool tmp ; int tmp___0 ; int __ret_warn_on ; int __ret_warn_on___0 ; struct intel_digital_port *tmp___1 ; long tmp___2 ; struct intel_digital_port *tmp___3 ; long tmp___4 ; { tmp = is_edp(intel_dp); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } pps_lock(intel_dp); vdd = edp_panel_vdd_on(intel_dp); pps_unlock(intel_dp); __ret_warn_on = ! vdd; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___0 = 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { tmp___1 = dp_to_dig_port(intel_dp); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1784, "eDP port %c VDD already requested on\n", (unsigned int )tmp___1->port + 65U); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); } else { tmp___3 = dp_to_dig_port(intel_dp); drm_err("eDP port %c VDD already requested on\n", (unsigned int )tmp___3->port + 65U); } } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___0 ; struct intel_encoder *intel_encoder ; enum intel_display_power_domain power_domain ; u32 pp ; u32 pp_stat_reg ; u32 pp_ctrl_reg ; int __ret_warn_on ; int tmp___1 ; int tmp___2 ; long tmp___3 ; int __ret_warn_on___0 ; long tmp___4 ; bool tmp___5 ; int tmp___6 ; long tmp___7 ; uint32_t tmp___8 ; uint32_t tmp___9 ; long tmp___10 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = dp_to_dig_port(intel_dp); intel_dig_port = tmp___0; intel_encoder = & intel_dig_port->base; if (debug_locks != 0) { tmp___1 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___1 == 0) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } __ret_warn_on = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1798, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (int )intel_dp->want_panel_vdd; tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1800, "WARN_ON(intel_dp->want_panel_vdd)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); tmp___5 = edp_have_panel_vdd(intel_dp); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { return; } else { } tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("edp_panel_vdd_off_sync", "Turning eDP port %c VDD off\n", (unsigned int )intel_dig_port->port + 65U); } else { } pp = ironlake_get_pp_control(intel_dp); pp = pp & 4294967287U; pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp_stat_reg = _pp_stat_reg(intel_dp); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, pp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 0); tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 1); tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_stat_reg, 1); drm_ut_debug_printk("edp_panel_vdd_off_sync", "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", tmp___9, tmp___8); } else { } if ((pp & 1U) == 0U) { intel_dp->last_power_cycle = jiffies; } else { } power_domain = intel_display_port_power_domain(intel_encoder); intel_display_power_put(dev_priv, power_domain); return; } } static void edp_panel_vdd_work(struct work_struct *__work ) { struct intel_dp *intel_dp ; struct delayed_work const *__mptr ; struct delayed_work *tmp ; { tmp = to_delayed_work(__work); __mptr = (struct delayed_work const *)tmp; intel_dp = (struct intel_dp *)__mptr + 0xfffffffffffff738UL; pps_lock(intel_dp); if (! intel_dp->want_panel_vdd) { edp_panel_vdd_off_sync(intel_dp); } else { } pps_unlock(intel_dp); return; } } static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp ) { unsigned long delay ; { delay = msecs_to_jiffies((unsigned int const )(intel_dp->panel_power_cycle_delay * 5)); schedule_delayed_work___2(& intel_dp->panel_vdd_work, delay); return; } } static void edp_panel_vdd_off(struct intel_dp *intel_dp , bool sync ) { struct drm_i915_private *dev_priv ; struct drm_device *tmp ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; int __ret_warn_on___0 ; int __ret_warn_on___1 ; struct intel_digital_port *tmp___5 ; long tmp___6 ; struct intel_digital_port *tmp___7 ; long tmp___8 ; { tmp = intel_dp_to_dev(intel_dp); dev_priv = (struct drm_i915_private *)tmp->dev_private; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1862, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = is_edp(intel_dp); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return; } else { } __ret_warn_on___0 = ! intel_dp->want_panel_vdd; tmp___8 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___8 != 0L) { if ((int )i915.verbose_state_checks) { __ret_warn_on___1 = 1; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { tmp___5 = dp_to_dig_port(intel_dp); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1868, "eDP port %c VDD not forced on", (unsigned int )tmp___5->port + 65U); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); } else { tmp___7 = dp_to_dig_port(intel_dp); drm_err("eDP port %c VDD not forced on", (unsigned int )tmp___7->port + 65U); } } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); intel_dp->want_panel_vdd = 0; if ((int )sync) { edp_panel_vdd_off_sync(intel_dp); } else { edp_panel_vdd_schedule_off(intel_dp); } return; } } static void edp_panel_on(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; u32 pp ; u32 pp_ctrl_reg ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; struct intel_digital_port *tmp___5 ; long tmp___6 ; int __ret_warn_on___0 ; bool tmp___7 ; struct intel_digital_port *tmp___8 ; long tmp___9 ; long tmp___10 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1885, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = is_edp(intel_dp); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return; } else { } tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { tmp___5 = dp_to_dig_port(intel_dp); drm_ut_debug_printk("edp_panel_on", "Turn eDP port %c panel power on\n", (unsigned int )tmp___5->port + 65U); } else { } tmp___7 = edp_have_panel_power(intel_dp); __ret_warn_on___0 = (int )tmp___7; tmp___9 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___9 != 0L) { tmp___8 = dp_to_dig_port(intel_dp); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1895, "eDP port %c panel power already on\n", (unsigned int )tmp___8->port + 65U); } else { } tmp___10 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___10 != 0L) { return; } else { } wait_panel_power_cycle(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp = ironlake_get_pp_control(intel_dp); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 5U) { pp = pp & 4294967293U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, pp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 0); } else { } pp = pp | 1U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) != 5U) { pp = pp | 2U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, pp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 0); wait_panel_on(intel_dp); intel_dp->last_power_on = jiffies; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 5U) { pp = pp | 2U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, pp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 0); } else { } return; } } void intel_edp_panel_on(struct intel_dp *intel_dp ) { bool tmp ; int tmp___0 ; { tmp = is_edp(intel_dp); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } pps_lock(intel_dp); edp_panel_on(intel_dp); pps_unlock(intel_dp); return; } } static void edp_panel_off(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_encoder *intel_encoder ; struct drm_device *dev ; struct drm_device *tmp___0 ; struct drm_i915_private *dev_priv ; enum intel_display_power_domain power_domain ; u32 pp ; u32 pp_ctrl_reg ; int __ret_warn_on ; int tmp___1 ; int tmp___2 ; long tmp___3 ; bool tmp___4 ; int tmp___5 ; struct intel_digital_port *tmp___6 ; long tmp___7 ; int __ret_warn_on___0 ; struct intel_digital_port *tmp___8 ; long tmp___9 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; intel_encoder = & intel_dig_port->base; tmp___0 = intel_dp_to_dev(intel_dp); dev = tmp___0; dev_priv = (struct drm_i915_private *)dev->dev_private; if (debug_locks != 0) { tmp___1 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___1 == 0) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } __ret_warn_on = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1947, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___4 = is_edp(intel_dp); if (tmp___4) { tmp___5 = 0; } else { tmp___5 = 1; } if (tmp___5) { return; } else { } tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { tmp___6 = dp_to_dig_port(intel_dp); drm_ut_debug_printk("edp_panel_off", "Turn eDP port %c panel power off\n", (unsigned int )tmp___6->port + 65U); } else { } __ret_warn_on___0 = ! intel_dp->want_panel_vdd; tmp___9 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___9 != 0L) { tmp___8 = dp_to_dig_port(intel_dp); warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 1956, "Need eDP port %c VDD to turn off panel\n", (unsigned int )tmp___8->port + 65U); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); pp = ironlake_get_pp_control(intel_dp); pp = pp & 4294967280U; pp_ctrl_reg = _pp_ctrl_reg(intel_dp); intel_dp->want_panel_vdd = 0; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, pp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 0); intel_dp->last_power_cycle = jiffies; wait_panel_off(intel_dp); power_domain = intel_display_port_power_domain(intel_encoder); intel_display_power_put(dev_priv, power_domain); return; } } void intel_edp_panel_off(struct intel_dp *intel_dp ) { bool tmp ; int tmp___0 ; { tmp = is_edp(intel_dp); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } pps_lock(intel_dp); edp_panel_off(intel_dp); pps_unlock(intel_dp); return; } } static void _intel_edp_backlight_on(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 pp ; u32 pp_ctrl_reg ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; wait_backlight_on(intel_dp); pps_lock(intel_dp); pp = ironlake_get_pp_control(intel_dp); pp = pp | 4U; pp_ctrl_reg = _pp_ctrl_reg(intel_dp); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, pp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 0); pps_unlock(intel_dp); return; } } void intel_edp_backlight_on(struct intel_dp *intel_dp ) { bool tmp ; int tmp___0 ; long tmp___1 ; { tmp = is_edp(intel_dp); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_edp_backlight_on", "\n"); } else { } intel_panel_enable_backlight(intel_dp->attached_connector); _intel_edp_backlight_on(intel_dp); return; } } static void _intel_edp_backlight_off(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; u32 pp ; u32 pp_ctrl_reg ; bool tmp___0 ; int tmp___1 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = is_edp(intel_dp); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return; } else { } pps_lock(intel_dp); pp = ironlake_get_pp_control(intel_dp); pp = pp & 4294967291U; pp_ctrl_reg = _pp_ctrl_reg(intel_dp); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, pp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_ctrl_reg, 0); pps_unlock(intel_dp); intel_dp->last_backlight_off = jiffies; edp_wait_backlight_off(intel_dp); return; } } void intel_edp_backlight_off(struct intel_dp *intel_dp ) { bool tmp ; int tmp___0 ; long tmp___1 ; { tmp = is_edp(intel_dp); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_edp_backlight_off", "\n"); } else { } _intel_edp_backlight_off(intel_dp); intel_panel_disable_backlight(intel_dp->attached_connector); return; } } static void intel_edp_backlight_power(struct intel_connector *connector , bool enable ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; bool is_enabled ; u32 tmp___0 ; long tmp___1 ; { tmp = intel_attached_dp(& connector->base); intel_dp = tmp; pps_lock(intel_dp); tmp___0 = ironlake_get_pp_control(intel_dp); is_enabled = (tmp___0 & 4U) != 0U; pps_unlock(intel_dp); if ((int )is_enabled == (int )enable) { return; } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_edp_backlight_power", "panel power control backlight %s\n", (int )enable ? (char *)"enable" : (char *)"disable"); } else { } if ((int )enable) { _intel_edp_backlight_on(intel_dp); } else { _intel_edp_backlight_off(intel_dp); } return; } } static void ironlake_edp_pll_on(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_crtc *crtc ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 dpa_ctl ; struct drm_crtc const *__mptr ; long tmp___0 ; int __ret_warn_on ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; crtc = intel_dig_port->base.base.crtc; dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; assert_pipe(dev_priv, ((struct intel_crtc *)__mptr)->pipe, 0); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("ironlake_edp_pll_on", "\n"); } else { } dpa_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 409600L, 1); __ret_warn_on = (dpa_ctl & 16384U) != 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 2109, "dp pll on, should be off\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (int )dpa_ctl < 0; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 2110, "dp port still on, should be off\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); intel_dp->DP = intel_dp->DP & 2147483583U; intel_dp->DP = intel_dp->DP | 16384U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 409600L, intel_dp->DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 409600L, 0); __const_udelay(859000UL); return; } } static void ironlake_edp_pll_off(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_crtc *crtc ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 dpa_ctl ; struct drm_crtc const *__mptr ; int __ret_warn_on ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; crtc = intel_dig_port->base.base.crtc; dev = crtc->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)crtc; assert_pipe(dev_priv, ((struct intel_crtc *)__mptr)->pipe, 0); dpa_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 409600L, 1); __ret_warn_on = (dpa_ctl & 16384U) == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 2135, "dp pll off, should be on\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (int )dpa_ctl < 0; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 2136, "dp port still on, should be off\n"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); dpa_ctl = dpa_ctl & 4294950911U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 409600L, dpa_ctl, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 409600L, 0); __const_udelay(859000UL); return; } } void intel_dp_sink_dpms(struct intel_dp *intel_dp , int mode ) { int ret ; int i ; ssize_t tmp ; ssize_t tmp___0 ; long tmp___1 ; { if ((unsigned int )intel_dp->dpcd[0] <= 16U) { return; } else { } if (mode != 0) { tmp = drm_dp_dpcd_writeb(& intel_dp->aux, 1536U, 2); ret = (int )tmp; } else { i = 0; goto ldv_49487; ldv_49486: tmp___0 = drm_dp_dpcd_writeb(& intel_dp->aux, 1536U, 1); ret = (int )tmp___0; if (ret == 1) { goto ldv_49485; } else { } msleep(1U); i = i + 1; ldv_49487: ; if (i <= 2) { goto ldv_49486; } else { } ldv_49485: ; } if (ret != 1) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_sink_dpms", "failed to %s sink power state\n", mode == 0 ? (char *)"enable" : (char *)"disable"); } else { } } else { } return; } } static bool intel_dp_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; enum port port ; struct intel_digital_port *tmp___0 ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum intel_display_power_domain power_domain ; u32 tmp___1 ; bool tmp___2 ; int tmp___3 ; enum pipe p ; u32 trans_dp ; uint32_t tmp___4 ; struct drm_i915_private *__p ; long tmp___5 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); port = tmp___0->port; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; power_domain = intel_display_port_power_domain(encoder); tmp___2 = intel_display_power_is_enabled(dev_priv, power_domain); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 1); if ((int )tmp___1 >= 0) { return (0); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 7U && (unsigned int )port == 0U) { *pipe = (enum pipe )((tmp___1 & 1610612736U) >> 29); } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___2->pch_type == 2U && (unsigned int )port != 0U) { p = 0; goto ldv_49520; ldv_49519: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )p * 4096 + 918272), 1); trans_dp = tmp___4; if (((trans_dp & 1610612736U) >> 29) + 1U == (u32 )port) { *pipe = p; return (1); } else { } p = (enum pipe )((int )p + 1); ldv_49520: __p = dev_priv; if ((int )__p->info.num_pipes > (int )p) { goto ldv_49519; } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_get_hw_state", "No pipe for dp port 0x%x found\n", intel_dp->output_reg); } else { } } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { *pipe = (enum pipe )((tmp___1 & 196608U) >> 16); } else { *pipe = (enum pipe )((tmp___1 & 1073741824U) >> 30); } } else { *pipe = (enum pipe )((tmp___1 & 1073741824U) >> 30); } } } return (1); } } static void intel_dp_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; u32 tmp___0 ; u32 flags ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; struct intel_digital_port *tmp___1 ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; int dotclock ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; uint32_t tmp___2 ; struct drm_i915_private *__p___2 ; long tmp___3 ; bool tmp___4 ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; flags = 0U; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___1 = dp_to_dig_port(intel_dp); port = tmp___1->port; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 1); pipe_config->has_audio = (bool )((tmp___0 & 64U) != 0U && (unsigned int )port != 0U); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 2U && (unsigned int )port != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )crtc->pipe * 4096 + 918272), 1); if ((tmp___0 & 8U) != 0U) { flags = flags | 1U; } else { flags = flags | 2U; } if ((tmp___0 & 16U) != 0U) { flags = flags | 4U; } else { flags = flags | 8U; } } else { if ((tmp___0 & 8U) != 0U) { flags = flags | 1U; } else { flags = flags | 2U; } if ((tmp___0 & 16U) != 0U) { flags = flags | 4U; } else { flags = flags | 8U; } } pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | flags; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { if ((tmp___0 & 256U) != 0U) { pipe_config->limited_color_range = 1; } else { } } else { } } else { } pipe_config->has_dp_encoder = 1; intel_dp_get_m_n(crtc, pipe_config); if ((unsigned int )port == 0U) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 409600L, 1); if ((tmp___2 & 196608U) == 65536U) { pipe_config->port_clock = 162000; } else { pipe_config->port_clock = 270000; } } else { } dotclock = intel_dotclock_calculate(pipe_config->port_clock, (struct intel_link_m_n const *)(& pipe_config->dp_m_n)); __p___2 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p___2->pch_type != 0U && (unsigned int )port != 0U) { ironlake_check_encoder_dotclock((struct intel_crtc_state const *)pipe_config, dotclock); } else { } pipe_config->base.adjusted_mode.crtc_clock = dotclock; tmp___4 = is_edp(intel_dp); if (((int )tmp___4 && dev_priv->vbt.edp_bpp != 0) && pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_dp_get_config", "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); } else { } dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; } else { } return; } } static void intel_disable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct drm_device *dev ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; dev = encoder->base.dev; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; if ((int )(crtc->config)->has_audio) { intel_audio_codec_disable(encoder); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { goto _L; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { goto _L; } else { goto _L___1; } } else { _L___1: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { goto _L; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 8U) { goto _L; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) != 0U) { _L: /* CIL Label */ __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 46UL) == 0U) { intel_psr_disable(intel_dp); } else { } } else { } } } } } intel_edp_panel_vdd_on(intel_dp); intel_edp_backlight_off(intel_dp); intel_dp_sink_dpms(intel_dp, 3); intel_edp_panel_off(intel_dp); __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) <= 4U) { intel_dp_link_down(intel_dp); } else { } return; } } static void ilk_post_disable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; enum port port ; struct intel_digital_port *tmp___0 ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); port = tmp___0->port; intel_dp_link_down(intel_dp); if ((unsigned int )port == 0U) { ironlake_edp_pll_off(intel_dp); } else { } return; } } static void vlv_post_disable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; intel_dp_link_down(intel_dp); return; } } static void chv_post_disable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct intel_digital_port *dport ; struct intel_digital_port *tmp___0 ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum dpio_channel ch ; int tmp___1 ; enum pipe pipe ; u32 val ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); dport = tmp___0; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___1 = vlv_dport_to_channel(dport); ch = (enum dpio_channel )tmp___1; pipe = intel_crtc->pipe; intel_dp_link_down(intel_dp); mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 516U)); val = val | 8388608U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 516U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1028U)); val = val | 8388608U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1028U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 512U)); val = val & 4294901631U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 512U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1024U)); val = val & 4294901631U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1024U), val); mutex_unlock(& dev_priv->sb_lock); return; } } static void _intel_dp_set_link_train(struct intel_dp *intel_dp , uint32_t *DP , uint8_t dp_train_pat ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; uint32_t temp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; port = intel_dig_port->port; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 46UL) != 0U) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), 1); temp = tmp___0; if (((int )dp_train_pat & 32) != 0) { temp = temp | 128U; } else { temp = temp & 4294967167U; } temp = temp & 4294965503U; switch ((int )dp_train_pat & 3) { case 0: temp = temp | 768U; goto ldv_49675; case 1: temp = temp; goto ldv_49675; case 2: temp = temp | 256U; goto ldv_49675; case 3: temp = temp | 1024U; goto ldv_49675; } ldv_49675: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), temp, 1); } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 7U && (unsigned int )port == 0U) { goto _L; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___4->pch_type == 2U && (unsigned int )port != 0U) { _L: /* CIL Label */ *DP = *DP & 4294965503U; switch ((int )dp_train_pat & 3) { case 0: *DP = *DP | 768U; goto ldv_49692; case 1: *DP = *DP; goto ldv_49692; case 2: *DP = *DP | 256U; goto ldv_49692; case 3: drm_err("DP training pattern 3 not supported\n"); *DP = *DP | 256U; goto ldv_49692; } ldv_49692: ; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { *DP = *DP & 3489644543U; } else { *DP = *DP & 3489660927U; } } else { *DP = *DP & 3489660927U; } switch ((int )dp_train_pat & 3) { case 0: *DP = *DP | 805306368U; goto ldv_49709; case 1: *DP = *DP; goto ldv_49709; case 2: *DP = *DP | 268435456U; goto ldv_49709; case 3: __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { *DP = *DP | 16384U; } else { drm_err("DP training pattern 3 not supported\n"); *DP = *DP | 268435456U; } } else { drm_err("DP training pattern 3 not supported\n"); *DP = *DP | 268435456U; } goto ldv_49709; } ldv_49709: ; } } } return; } } static void intel_dp_enable_port(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; _intel_dp_set_link_train(intel_dp, & intel_dp->DP, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, intel_dp->DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); intel_dp->DP = intel_dp->DP | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, intel_dp->DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); return; } } static void intel_enable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; uint32_t dp_reg ; uint32_t tmp___0 ; unsigned int lane_mask ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; struct drm_i915_private *__p ; struct intel_digital_port *tmp___3 ; struct drm_i915_private *__p___0 ; long tmp___4 ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 1); dp_reg = tmp___0; lane_mask = 0U; __ret_warn_on = (int )dp_reg < 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 2499, "WARN_ON(dp_reg & DP_PORT_EN)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return; } else { } pps_lock(intel_dp); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { vlv_init_panel_power_sequencer(intel_dp); } else { } intel_dp_enable_port(intel_dp); edp_panel_vdd_on(intel_dp); edp_panel_on(intel_dp); edp_panel_vdd_off(intel_dp, 1); pps_unlock(intel_dp); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { tmp___3 = dp_to_dig_port(intel_dp); vlv_wait_port_ready(dev_priv, tmp___3, lane_mask); } else { } intel_dp_sink_dpms(intel_dp, 0); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); if ((int )(crtc->config)->has_audio) { tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_enable_dp", "Enabling DP audio on pipe %c\n", (int )crtc->pipe + 65); } else { } intel_audio_codec_enable(encoder); } else { } return; } } static void g4x_enable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; intel_enable_dp(encoder); intel_edp_backlight_on(intel_dp); return; } } static void vlv_enable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; intel_edp_backlight_on(intel_dp); intel_psr_enable(intel_dp); return; } } static void g4x_pre_enable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct intel_digital_port *dport ; struct intel_digital_port *tmp___0 ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); dport = tmp___0; intel_dp_prepare(encoder); if ((unsigned int )dport->port == 0U) { ironlake_set_pll_cpu_edp(intel_dp); ironlake_edp_pll_on(intel_dp); } else { } return; } } static void vlv_detach_power_sequencer(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_i915_private *dev_priv ; enum pipe pipe ; int pp_on_reg ; long tmp___0 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev_priv = (struct drm_i915_private *)(intel_dig_port->base.base.dev)->dev_private; pipe = intel_dp->pps_pipe; pp_on_reg = (int )pipe * 256 + 1970696; edp_panel_vdd_off_sync(intel_dp); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("vlv_detach_power_sequencer", "detaching pipe %c power sequencer from port %c\n", (int )pipe + 65, (unsigned int )intel_dig_port->port + 65U); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_on_reg, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_on_reg, 0); intel_dp->pps_pipe = -1; return; } } static void vlv_steal_power_sequencer(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *dev_priv ; struct intel_encoder *encoder ; int __ret_warn_on ; int tmp ; int tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; long tmp___3 ; struct list_head const *__mptr ; struct intel_dp *intel_dp ; enum port port ; struct intel_digital_port *tmp___4 ; long tmp___5 ; int __ret_warn_on___1 ; long tmp___6 ; struct list_head const *__mptr___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (debug_locks != 0) { tmp = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp == 0) { tmp___0 = 1; } else { tmp___0 = 0; } } else { tmp___0 = 0; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 2593, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = (int )pipe != 0 && (int )pipe != 1; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 2595, "WARN_ON(pipe != PIPE_A && pipe != PIPE_B)"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { return; } else { } __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_49798; ldv_49797: ; if ((unsigned int )encoder->type != 8U) { goto ldv_49793; } else { } intel_dp = enc_to_intel_dp(& encoder->base); tmp___4 = dp_to_dig_port(intel_dp); port = tmp___4->port; if ((int )intel_dp->pps_pipe != (int )pipe) { goto ldv_49793; } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("vlv_steal_power_sequencer", "stealing pipe %c power sequencer from port %c\n", (int )pipe + 65, (unsigned int )port + 65U); } else { } __ret_warn_on___1 = (int )encoder->connectors_active; tmp___6 = ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 2617, "stealing pipe %c power sequencer from active eDP port %c\n", (int )pipe + 65, (unsigned int )port + 65U); } else { } ldv__builtin_expect(__ret_warn_on___1 != 0, 0L); vlv_detach_power_sequencer(intel_dp); ldv_49793: __mptr___0 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49798: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_49797; } else { } return; } } static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_encoder *encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; encoder = & intel_dig_port->base; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 2632, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = is_edp(intel_dp); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return; } else { } if ((int )intel_dp->pps_pipe == (int )crtc->pipe) { return; } else { } if ((int )intel_dp->pps_pipe != -1) { vlv_detach_power_sequencer(intel_dp); } else { } vlv_steal_power_sequencer(dev, crtc->pipe); intel_dp->pps_pipe = crtc->pipe; tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("vlv_init_panel_power_sequencer", "initializing pipe %c power sequencer for port %c\n", (int )intel_dp->pps_pipe + 65, (unsigned int )intel_dig_port->port + 65U); } else { } intel_dp_init_panel_power_sequencer(dev, intel_dp); intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); return; } } static void vlv_pre_enable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct intel_digital_port *dport ; struct intel_digital_port *tmp___0 ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum dpio_channel port ; int tmp___1 ; int pipe ; u32 val ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); dport = tmp___0; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___1 = vlv_dport_to_channel(dport); port = (enum dpio_channel )tmp___1; pipe = intel_crtc->pipe; mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 9216U + 544U)); val = 0U; if (pipe != 0) { val = val | 2097152U; } else { val = val & 4292870143U; } val = val | 1048772U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33312U), val); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33336U), 7733272U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33372U), 4196488U); mutex_unlock(& dev_priv->sb_lock); intel_enable_dp(encoder); return; } } static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder ) { struct intel_digital_port *dport ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum dpio_channel port ; int tmp___0 ; int pipe ; { tmp = enc_to_dig_port(& encoder->base); dport = tmp; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___0 = vlv_dport_to_channel(dport); port = (enum dpio_channel )tmp___0; pipe = intel_crtc->pipe; intel_dp_prepare(encoder); mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )(((unsigned int )port + 65U) * 512U), 65664U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33284U), 6291552U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33328U), 7671552U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33452U), 5376U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33464U), 1077936128U); mutex_unlock(& dev_priv->sb_lock); return; } } static void chv_pre_enable_dp(struct intel_encoder *encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct intel_digital_port *dport ; struct intel_digital_port *tmp___0 ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum dpio_channel ch ; int tmp___1 ; int pipe ; int data ; int i ; int stagger ; u32 val ; { tmp = enc_to_intel_dp(& encoder->base); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); dport = tmp___0; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___1 = vlv_dport_to_channel(dport); ch = (enum dpio_channel )tmp___1; pipe = intel_crtc->pipe; mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 556U)); val = val & 4294967287U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 556U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1068U)); val = val & 4294967287U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1068U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 516U)); val = val | 8388608U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 516U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1028U)); val = val | 8388608U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1028U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 512U)); val = val | 65664U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 512U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1024U)); val = val | 65664U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1024U), val); i = 0; goto ldv_49854; ldv_49853: data = i != 1; vlv_dpio_write(dev_priv, (enum pipe )pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 184, (u32 )(data << 30)); i = i + 1; ldv_49854: ; if (i <= 3) { goto ldv_49853; } else { } if ((intel_crtc->config)->port_clock > 270000) { stagger = 24; } else if ((intel_crtc->config)->port_clock > 135000) { stagger = 13; } else if ((intel_crtc->config)->port_clock > 67500) { stagger = 7; } else if ((intel_crtc->config)->port_clock > 33750) { stagger = 4; } else { stagger = 2; } val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 556U)); val = val | 520093696U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 556U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1068U)); val = val | 520093696U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1068U), val); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 560U), (u32 )(stagger | 401216)); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1072U), (u32 )(stagger | 5709632)); mutex_unlock(& dev_priv->sb_lock); intel_enable_dp(encoder); return; } } static void chv_dp_pre_pll_enable(struct intel_encoder *encoder ) { struct intel_digital_port *dport ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum dpio_channel ch ; int tmp___0 ; enum pipe pipe ; u32 val ; { tmp = enc_to_dig_port(& encoder->base); dport = tmp; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___0 = vlv_dport_to_channel(dport); ch = (enum dpio_channel )tmp___0; pipe = intel_crtc->pipe; intel_dp_prepare(encoder); mutex_lock_nested(& dev_priv->sb_lock, 0U); if ((int )pipe != 1) { val = vlv_dpio_read(dev_priv, pipe, 33044); val = val & 4279238655U; if ((unsigned int )ch == 0U) { val = val | 12582912U; } else { } if ((unsigned int )ch == 1U) { val = val | 3145728U; } else { } vlv_dpio_write(dev_priv, pipe, 33044, val); } else { val = vlv_dpio_read(dev_priv, pipe, 32900); val = val & 4293001215U; if ((unsigned int )ch == 0U) { val = val | 393216U; } else { } if ((unsigned int )ch == 1U) { val = val | 1572864U; } else { } vlv_dpio_write(dev_priv, pipe, 32900, val); } val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 544U)); val = val | 1048576U; if ((int )pipe != 1) { val = val & 4292870143U; } else { val = val | 2097152U; } vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 544U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1056U)); val = val | 1048576U; if ((int )pipe != 1) { val = val & 4292870143U; } else { val = val | 2097152U; } vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1056U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 4294967116U + 33100U)); if ((int )pipe != 1) { val = val & 4294959103U; } else { val = val | 8192U; } vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 4294967116U + 33100U), val); mutex_unlock(& dev_priv->sb_lock); return; } } static ssize_t intel_dp_dpcd_read_wake(struct drm_dp_aux *aux , unsigned int offset , void *buffer , size_t size ) { ssize_t ret ; int i ; { drm_dp_dpcd_read(aux, 0U, buffer, 1UL); i = 0; goto ldv_49877; ldv_49876: ret = drm_dp_dpcd_read(aux, offset, buffer, size); if ((unsigned long )ret == size) { return (ret); } else { } msleep(1U); i = i + 1; ldv_49877: ; if (i <= 2) { goto ldv_49876; } else { } return (ret); } } static bool intel_dp_get_link_status(struct intel_dp *intel_dp , uint8_t *link_status ) { ssize_t tmp ; { tmp = intel_dp_dpcd_read_wake(& intel_dp->aux, 514U, (void *)link_status, 6UL); return (tmp == 6L); } } static uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; enum port port ; struct intel_digital_port *tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = dp_to_dig_port(intel_dp); port = tmp___0->port; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 9U) { return (3U); } else { goto _L; } } else { _L: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 8U) { if ((int )dev_priv->edp_low_vswing && (unsigned int )port == 0U) { return (3U); } else { } return (2U); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { return (3U); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 7U && (unsigned int )port == 0U) { return (2U); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 2U && (unsigned int )port != 0U) { return (3U); } else { return (2U); } } } } } } } static uint8_t intel_dp_pre_emphasis_max(struct intel_dp *intel_dp , uint8_t voltage_swing ) { struct drm_device *dev ; struct drm_device *tmp ; enum port port ; struct intel_digital_port *tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; tmp___0 = dp_to_dig_port(intel_dp); port = tmp___0->port; __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 8U) { switch ((int )voltage_swing & 3) { case 0: ; return (24U); case 1: ; return (16U); case 2: ; return (8U); case 3: ; return (0U); default: ; return (0U); } } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { _L: /* CIL Label */ switch ((int )voltage_swing & 3) { case 0: ; return (24U); case 1: ; return (16U); case 2: ; return (8U); case 3: ; default: ; return (0U); } } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { switch ((int )voltage_swing & 3) { case 0: ; return (24U); case 1: ; return (16U); case 2: ; return (8U); case 3: ; default: ; return (0U); } } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 7U && (unsigned int )port == 0U) { switch ((int )voltage_swing & 3) { case 0: ; return (16U); case 1: ; case 2: ; return (8U); default: ; return (0U); } } else { switch ((int )voltage_swing & 3) { case 0: ; return (16U); case 1: ; return (16U); case 2: ; return (8U); case 3: ; default: ; return (0U); } } } } } } } } static uint32_t vlv_signal_levels(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; struct intel_digital_port *dport ; struct intel_digital_port *tmp___0 ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; unsigned long demph_reg_value ; unsigned long preemph_reg_value ; unsigned long uniqtranscale_reg_value ; uint8_t train_set ; enum dpio_channel port ; int tmp___1 ; int pipe ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = dp_to_dig_port(intel_dp); dport = tmp___0; __mptr = (struct drm_crtc const *)dport->base.base.crtc; intel_crtc = (struct intel_crtc *)__mptr; train_set = intel_dp->train_set[0]; tmp___1 = vlv_dport_to_channel(dport); port = (enum dpio_channel )tmp___1; pipe = intel_crtc->pipe; switch ((int )train_set & 24) { case 0: preemph_reg_value = 16384UL; switch ((int )train_set & 3) { case 0: demph_reg_value = 725636437UL; uniqtranscale_reg_value = 1428863034UL; goto ldv_50008; case 1: demph_reg_value = 725631040UL; uniqtranscale_reg_value = 1430829114UL; goto ldv_50008; case 2: demph_reg_value = 723801429UL; uniqtranscale_reg_value = 1432401978UL; goto ldv_50008; case 3: demph_reg_value = 725636437UL; uniqtranscale_reg_value = 1436080698UL; goto ldv_50008; default: ; return (0U); } ldv_50008: ; goto ldv_50013; case 8: preemph_reg_value = 8192UL; switch ((int )train_set & 3) { case 0: demph_reg_value = 725631040UL; uniqtranscale_reg_value = 1431484474UL; goto ldv_50016; case 1: demph_reg_value = 725633096UL; uniqtranscale_reg_value = 1434499130UL; goto ldv_50016; case 2: demph_reg_value = 725631040UL; uniqtranscale_reg_value = 1437456954UL; goto ldv_50016; default: ; return (0U); } ldv_50016: ; goto ldv_50013; case 16: preemph_reg_value = 0UL; switch ((int )train_set & 3) { case 0: demph_reg_value = 724587861UL; uniqtranscale_reg_value = 1433450554UL; goto ldv_50022; case 1: demph_reg_value = 724254784UL; uniqtranscale_reg_value = 1437456954UL; goto ldv_50022; default: ; return (0U); } ldv_50022: ; goto ldv_50013; case 24: preemph_reg_value = 24576UL; switch ((int )train_set & 3) { case 0: demph_reg_value = 457200981UL; uniqtranscale_reg_value = 1437456954UL; goto ldv_50027; default: ; return (0U); } ldv_50027: ; goto ldv_50013; default: ; return (0U); } ldv_50013: mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33428U), 0U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33424U), (u32 )demph_reg_value); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33416U), (u32 )uniqtranscale_reg_value); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33420U), 209199168U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33324U), 196608U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33316U), (u32 )preemph_reg_value); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33428U), 2147483648U); mutex_unlock(& dev_priv->sb_lock); return (0U); } } static uint32_t chv_signal_levels(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; struct intel_digital_port *dport ; struct intel_digital_port *tmp___0 ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; u32 deemph_reg_value ; u32 margin_reg_value ; u32 val ; uint8_t train_set ; enum dpio_channel ch ; int tmp___1 ; enum pipe pipe ; int i ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = dp_to_dig_port(intel_dp); dport = tmp___0; __mptr = (struct drm_crtc const *)dport->base.base.crtc; intel_crtc = (struct intel_crtc *)__mptr; train_set = intel_dp->train_set[0]; tmp___1 = vlv_dport_to_channel(dport); ch = (enum dpio_channel )tmp___1; pipe = intel_crtc->pipe; switch ((int )train_set & 24) { case 0: ; switch ((int )train_set & 3) { case 0: deemph_reg_value = 128U; margin_reg_value = 52U; goto ldv_50048; case 1: deemph_reg_value = 128U; margin_reg_value = 77U; goto ldv_50048; case 2: deemph_reg_value = 128U; margin_reg_value = 102U; goto ldv_50048; case 3: deemph_reg_value = 128U; margin_reg_value = 154U; goto ldv_50048; default: ; return (0U); } ldv_50048: ; goto ldv_50053; case 8: ; switch ((int )train_set & 3) { case 0: deemph_reg_value = 85U; margin_reg_value = 78U; goto ldv_50056; case 1: deemph_reg_value = 85U; margin_reg_value = 116U; goto ldv_50056; case 2: deemph_reg_value = 85U; margin_reg_value = 154U; goto ldv_50056; default: ; return (0U); } ldv_50056: ; goto ldv_50053; case 16: ; switch ((int )train_set & 3) { case 0: deemph_reg_value = 64U; margin_reg_value = 104U; goto ldv_50062; case 1: deemph_reg_value = 64U; margin_reg_value = 154U; goto ldv_50062; default: ; return (0U); } ldv_50062: ; goto ldv_50053; case 24: ; switch ((int )train_set & 3) { case 0: deemph_reg_value = 43U; margin_reg_value = 154U; goto ldv_50067; default: ; return (0U); } ldv_50067: ; goto ldv_50053; default: ; return (0U); } ldv_50053: mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 552U)); val = val & 1073741823U; val = val & 4042326015U; val = val; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 552U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1064U)); val = val & 1073741823U; val = val & 4042326015U; val = val; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1064U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 548U)); val = val & 4294902783U; val = val; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 548U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1060U)); val = val & 4294902783U; val = val; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1060U), val); i = 0; goto ldv_50071; ldv_50070: val = vlv_dpio_read(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 144); val = val & 16777215U; val = (deemph_reg_value << 24) | val; vlv_dpio_write(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 144, val); i = i + 1; ldv_50071: ; if (i <= 3) { goto ldv_50070; } else { } i = 0; goto ldv_50074; ldv_50073: val = vlv_dpio_read(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 136); val = val & 4278255615U; val = (margin_reg_value << 16) | val; vlv_dpio_write(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 136, val); i = i + 1; ldv_50074: ; if (i <= 3) { goto ldv_50073; } else { } i = 0; goto ldv_50077; ldv_50076: val = vlv_dpio_read(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 140); val = val & 4160749567U; vlv_dpio_write(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 140, val); i = i + 1; ldv_50077: ; if (i <= 3) { goto ldv_50076; } else { } if (((int )train_set & 24) == 0 && ((int )train_set & 3) == 3) { i = 0; goto ldv_50080; ldv_50079: val = vlv_dpio_read(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 140); val = val | 134217728U; vlv_dpio_write(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 140, val); i = i + 1; ldv_50080: ; if (i <= 3) { goto ldv_50079; } else { } i = 0; goto ldv_50083; ldv_50082: val = vlv_dpio_read(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 136); val = val & 4294902015U; val = val | 39424U; vlv_dpio_write(dev_priv, pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 136, val); i = i + 1; ldv_50083: ; if (i <= 3) { goto ldv_50082; } else { } } else { } val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 552U)); val = val | 3221225472U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 552U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1064U)); val = val | 3221225472U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1064U), val); val = vlv_dpio_read(dev_priv, pipe, 33144); val = val | 8U; vlv_dpio_write(dev_priv, pipe, 33144, val); mutex_unlock(& dev_priv->sb_lock); return (0U); } } static void intel_get_adjust_train(struct intel_dp *intel_dp , uint8_t const *link_status ) { uint8_t v ; uint8_t p ; int lane ; uint8_t voltage_max ; uint8_t preemph_max ; uint8_t this_v ; u8 tmp ; uint8_t this_p ; u8 tmp___0 ; { v = 0U; p = 0U; lane = 0; goto ldv_50097; ldv_50096: tmp = drm_dp_get_adjust_request_voltage(link_status, lane); this_v = tmp; tmp___0 = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); this_p = tmp___0; if ((int )this_v > (int )v) { v = this_v; } else { } if ((int )this_p > (int )p) { p = this_p; } else { } lane = lane + 1; ldv_50097: ; if ((int )intel_dp->lane_count > lane) { goto ldv_50096; } else { } voltage_max = intel_dp_voltage_max(intel_dp); if ((int )v >= (int )voltage_max) { v = (uint8_t )((unsigned int )voltage_max | 4U); } else { } preemph_max = intel_dp_pre_emphasis_max(intel_dp, (int )v); if ((int )p >= (int )preemph_max) { p = (uint8_t )((unsigned int )preemph_max | 32U); } else { } lane = 0; goto ldv_50100; ldv_50099: intel_dp->train_set[lane] = (uint8_t )((int )v | (int )p); lane = lane + 1; ldv_50100: ; if (lane <= 3) { goto ldv_50099; } else { } return; } } static uint32_t gen4_signal_levels(uint8_t train_set ) { uint32_t signal_levels ; { signal_levels = 0U; switch ((int )train_set & 3) { case 0: ; default: signal_levels = signal_levels; goto ldv_50108; case 1: signal_levels = signal_levels | 33554432U; goto ldv_50108; case 2: signal_levels = signal_levels | 67108864U; goto ldv_50108; case 3: signal_levels = signal_levels | 100663296U; goto ldv_50108; } ldv_50108: ; switch ((int )train_set & 24) { case 0: ; default: signal_levels = signal_levels; goto ldv_50114; case 8: signal_levels = signal_levels | 4194304U; goto ldv_50114; case 16: signal_levels = signal_levels | 8388608U; goto ldv_50114; case 24: signal_levels = signal_levels | 12582912U; goto ldv_50114; } ldv_50114: ; return (signal_levels); } } static uint32_t gen6_edp_signal_levels(uint8_t train_set ) { int signal_levels ; long tmp ; { signal_levels = (int )train_set & 27; switch (signal_levels) { case 0: ; case 1: ; return (0U); case 8: ; return (4194304U); case 16: ; case 17: ; return (243269632U); case 9: ; case 10: ; return (239075328U); case 2: ; case 3: ; return (234881024U); default: tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen6_edp_signal_levels", "Unsupported voltage swing/pre-emphasis level:0x%x\n", signal_levels); } else { } return (0U); } } } static uint32_t gen7_edp_signal_levels(uint8_t train_set ) { int signal_levels ; long tmp ; { signal_levels = (int )train_set & 27; switch (signal_levels) { case 0: ; return (150994944U); case 8: ; return (176160768U); case 16: ; return (197132288U); case 1: ; return (201326592U); case 9: ; return (226492416U); case 2: ; return (234881024U); case 10: ; return (260046848U); default: tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("gen7_edp_signal_levels", "Unsupported voltage swing/pre-emphasis level:0x%x\n", signal_levels); } else { } return (0U); } } } static uint32_t hsw_signal_levels(uint8_t train_set ) { int signal_levels ; long tmp ; { signal_levels = (int )train_set & 27; switch (signal_levels) { case 0: ; return (0U); case 8: ; return (16777216U); case 16: ; return (33554432U); case 24: ; return (50331648U); case 1: ; return (67108864U); case 9: ; return (83886080U); case 17: ; return (100663296U); case 2: ; return (117440512U); case 10: ; return (134217728U); case 3: ; return (150994944U); default: tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("hsw_signal_levels", "Unsupported voltage swing/pre-emphasis level:0x%x\n", signal_levels); } else { } return (0U); } } } static void bxt_signal_levels(struct intel_dp *intel_dp ) { struct intel_digital_port *dport ; struct intel_digital_port *tmp ; enum port port ; struct drm_device *dev ; struct intel_encoder *encoder ; uint8_t train_set ; uint32_t level ; int signal_levels ; long tmp___0 ; { tmp = dp_to_dig_port(intel_dp); dport = tmp; port = dport->port; dev = dport->base.base.dev; encoder = & dport->base; train_set = intel_dp->train_set[0]; level = 0U; signal_levels = (int )train_set & 27; switch (signal_levels) { default: tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("bxt_signal_levels", "Unsupported voltage swing/pre-emph level\n"); } else { } case 0: level = 0U; goto ldv_50175; case 8: level = 1U; goto ldv_50175; case 16: level = 2U; goto ldv_50175; case 24: level = 3U; goto ldv_50175; case 1: level = 4U; goto ldv_50175; case 9: level = 5U; goto ldv_50175; case 17: level = 6U; goto ldv_50175; case 2: level = 7U; goto ldv_50175; case 10: level = 8U; goto ldv_50175; case 3: level = 9U; goto ldv_50175; } ldv_50175: bxt_ddi_vswing_sequence(dev, level, port, (int )encoder->type); return; } } static void intel_dp_set_signal_levels(struct intel_dp *intel_dp , uint32_t *DP ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; enum port port ; struct drm_device *dev ; uint32_t signal_levels ; uint32_t mask ; uint8_t train_set ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; port = intel_dig_port->port; dev = intel_dig_port->base.base.dev; train_set = intel_dp->train_set[0]; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 9U) { signal_levels = 0U; bxt_signal_levels(intel_dp); mask = 0U; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 46UL) != 0U) { signal_levels = hsw_signal_levels((int )train_set); mask = 251658240U; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) != 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { signal_levels = chv_signal_levels(intel_dp); mask = 0U; } else { goto _L; } } else { _L: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { signal_levels = vlv_signal_levels(intel_dp); mask = 0U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 7U && (unsigned int )port == 0U) { signal_levels = gen7_edp_signal_levels((int )train_set); mask = 264241152U; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 6U && (unsigned int )port == 0U) { signal_levels = gen6_edp_signal_levels((int )train_set); mask = 264241152U; } else { signal_levels = gen4_signal_levels((int )train_set); mask = 264241152U; } } } } } } if (mask != 0U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dp_set_signal_levels", "Using signal levels %08x\n", signal_levels); } else { } } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_set_signal_levels", "Using vswing level %d\n", (int )train_set & 3); } else { } tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_dp_set_signal_levels", "Using pre-emphasis level %d\n", ((int )train_set & 24) >> 3); } else { } *DP = (*DP & ~ mask) | signal_levels; return; } } static bool intel_dp_set_link_train(struct intel_dp *intel_dp , uint32_t *DP , uint8_t dp_train_pat ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint8_t buf[5U] ; int ret ; int len ; ssize_t tmp___0 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; _intel_dp_set_link_train(intel_dp, DP, (int )dp_train_pat); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, *DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); buf[0] = dp_train_pat; if (((int )dp_train_pat & 3) == 0) { len = 1; } else { memcpy((void *)(& buf) + 1U, (void const *)(& intel_dp->train_set), (size_t )intel_dp->lane_count); len = (int )intel_dp->lane_count + 1; } tmp___0 = drm_dp_dpcd_write(& intel_dp->aux, 258U, (void *)(& buf), (size_t )len); ret = (int )tmp___0; return (ret == len); } } static bool intel_dp_reset_link_train(struct intel_dp *intel_dp , uint32_t *DP , uint8_t dp_train_pat ) { bool tmp ; { if (! intel_dp->train_set_valid) { memset((void *)(& intel_dp->train_set), 0, 4UL); } else { } intel_dp_set_signal_levels(intel_dp, DP); tmp = intel_dp_set_link_train(intel_dp, DP, (int )dp_train_pat); return (tmp); } } static bool intel_dp_update_link_train(struct intel_dp *intel_dp , uint32_t *DP , uint8_t const *link_status ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int ret ; ssize_t tmp___0 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_get_adjust_train(intel_dp, link_status); intel_dp_set_signal_levels(intel_dp, DP); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, *DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); tmp___0 = drm_dp_dpcd_write(& intel_dp->aux, 259U, (void *)(& intel_dp->train_set), (size_t )intel_dp->lane_count); ret = (int )tmp___0; return ((int )intel_dp->lane_count == ret); } } static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; uint32_t val ; struct drm_i915_private *__p ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; port = intel_dig_port->port; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { return; } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), 1); val = val & 4294965503U; val = val | 512U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )port * 256U + 409664U), val, 1); if ((unsigned int )port == 0U) { return; } else { } tmp___0 = msecs_to_jiffies(1U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_50293; ldv_50292: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409668U), 1); if ((tmp___1 & 33554432U) == 0U) { ret__ = -110; } else { } goto ldv_50291; } else { } tmp___2 = drm_can_sleep___12(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_50293: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409668U), 1); if ((tmp___3 & 33554432U) == 0U) { goto ldv_50292; } else { } ldv_50291: ; if (ret__ != 0) { drm_err("Timed out waiting for DP idle patterns\n"); } else { } return; } } void intel_dp_start_link_train(struct intel_dp *intel_dp ) { struct drm_encoder *encoder ; struct intel_digital_port *tmp ; struct drm_device *dev ; int i ; uint8_t voltage ; int voltage_tries ; int loop_tries ; uint32_t DP ; uint8_t link_config[2U] ; struct drm_i915_private *__p ; bool tmp___0 ; bool tmp___1 ; int tmp___2 ; uint8_t link_status[6U] ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; bool tmp___6 ; long tmp___7 ; bool tmp___8 ; int tmp___9 ; bool tmp___10 ; int tmp___11 ; { tmp = dp_to_dig_port(intel_dp); encoder = & tmp->base.base; dev = encoder->dev; DP = intel_dp->DP; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) != 0U) { intel_ddi_prepare_link_retrain(encoder); } else { } link_config[0] = intel_dp->link_bw; link_config[1] = intel_dp->lane_count; tmp___0 = drm_dp_enhanced_frame_cap((u8 const *)(& intel_dp->dpcd)); if ((int )tmp___0) { link_config[1] = (uint8_t )((unsigned int )link_config[1] | 128U); } else { } drm_dp_dpcd_write(& intel_dp->aux, 256U, (void *)(& link_config), 2UL); if ((unsigned int )intel_dp->num_sink_rates != 0U) { drm_dp_dpcd_write(& intel_dp->aux, 277U, (void *)(& intel_dp->rate_select), 1UL); } else { } link_config[0] = 0U; link_config[1] = 1U; drm_dp_dpcd_write(& intel_dp->aux, 263U, (void *)(& link_config), 2UL); DP = DP | 2147483648U; tmp___1 = intel_dp_reset_link_train(intel_dp, & DP, 33); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { drm_err("failed to enable link training\n"); return; } else { } voltage = 255U; voltage_tries = 0; loop_tries = 0; ldv_50319: drm_dp_link_train_clock_recovery_delay((u8 const *)(& intel_dp->dpcd)); tmp___3 = intel_dp_get_link_status(intel_dp, (uint8_t *)(& link_status)); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { drm_err("failed to get link status\n"); goto ldv_50313; } else { } tmp___6 = drm_dp_clock_recovery_ok((u8 const *)(& link_status), (int )intel_dp->lane_count); if ((int )tmp___6) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_start_link_train", "clock recovery OK\n"); } else { } goto ldv_50313; } else { } if ((int )intel_dp->train_set_valid) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_dp_start_link_train", "clock recovery not ok, reset"); } else { } intel_dp->train_set_valid = 0; tmp___8 = intel_dp_reset_link_train(intel_dp, & DP, 33); if (tmp___8) { tmp___9 = 0; } else { tmp___9 = 1; } if (tmp___9) { drm_err("failed to enable link training\n"); return; } else { } goto ldv_50315; } else { } i = 0; goto ldv_50318; ldv_50317: ; if (((int )intel_dp->train_set[i] & 4) == 0) { goto ldv_50316; } else { } i = i + 1; ldv_50318: ; if ((int )intel_dp->lane_count > i) { goto ldv_50317; } else { } ldv_50316: ; if ((int )intel_dp->lane_count == i) { loop_tries = loop_tries + 1; if (loop_tries == 5) { drm_err("too many full retries, give up\n"); goto ldv_50313; } else { } intel_dp_reset_link_train(intel_dp, & DP, 33); voltage_tries = 0; goto ldv_50315; } else { } if (((int )intel_dp->train_set[0] & 3) == (int )voltage) { voltage_tries = voltage_tries + 1; if (voltage_tries == 5) { drm_err("too many voltage retries, give up\n"); goto ldv_50313; } else { } } else { voltage_tries = 0; } voltage = (unsigned int )intel_dp->train_set[0] & 3U; tmp___10 = intel_dp_update_link_train(intel_dp, & DP, (uint8_t const *)(& link_status)); if (tmp___10) { tmp___11 = 0; } else { tmp___11 = 1; } if (tmp___11) { drm_err("failed to update link training\n"); goto ldv_50313; } else { } ldv_50315: ; goto ldv_50319; ldv_50313: intel_dp->DP = DP; return; } } void intel_dp_complete_link_train(struct intel_dp *intel_dp ) { bool channel_eq ; int tries ; int cr_tries ; uint32_t DP ; uint32_t training_pattern ; bool tmp ; int tmp___0 ; uint8_t link_status[6U] ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; bool tmp___5 ; bool tmp___6 ; int tmp___7 ; long tmp___8 ; { channel_eq = 0; DP = intel_dp->DP; training_pattern = 2U; if ((unsigned int )intel_dp->link_bw == 20U || (int )intel_dp->use_tps3) { training_pattern = 3U; } else { } tmp = intel_dp_set_link_train(intel_dp, & DP, (int )((unsigned int )((uint8_t )training_pattern) | 32U)); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { drm_err("failed to start channel equalization\n"); return; } else { } tries = 0; cr_tries = 0; channel_eq = 0; ldv_50331: ; if (cr_tries > 5) { drm_err("failed to train DP, aborting\n"); goto ldv_50329; } else { } drm_dp_link_train_channel_eq_delay((u8 const *)(& intel_dp->dpcd)); tmp___1 = intel_dp_get_link_status(intel_dp, (uint8_t *)(& link_status)); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { drm_err("failed to get link status\n"); goto ldv_50329; } else { } tmp___3 = drm_dp_clock_recovery_ok((u8 const *)(& link_status), (int )intel_dp->lane_count); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { intel_dp->train_set_valid = 0; intel_dp_start_link_train(intel_dp); intel_dp_set_link_train(intel_dp, & DP, (int )((unsigned int )((uint8_t )training_pattern) | 32U)); cr_tries = cr_tries + 1; goto ldv_50330; } else { } tmp___5 = drm_dp_channel_eq_ok((u8 const *)(& link_status), (int )intel_dp->lane_count); if ((int )tmp___5) { channel_eq = 1; goto ldv_50329; } else { } if (tries > 5) { intel_dp->train_set_valid = 0; intel_dp_start_link_train(intel_dp); intel_dp_set_link_train(intel_dp, & DP, (int )((unsigned int )((uint8_t )training_pattern) | 32U)); tries = 0; cr_tries = cr_tries + 1; goto ldv_50330; } else { } tmp___6 = intel_dp_update_link_train(intel_dp, & DP, (uint8_t const *)(& link_status)); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } if (tmp___7) { drm_err("failed to update link training\n"); goto ldv_50329; } else { } tries = tries + 1; ldv_50330: ; goto ldv_50331; ldv_50329: intel_dp_set_idle_link_train(intel_dp); intel_dp->DP = DP; if ((int )channel_eq) { intel_dp->train_set_valid = 1; tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_dp_complete_link_train", "Channel EQ done. DP Training successful\n"); } else { } } else { } return; } } void intel_dp_stop_link_train(struct intel_dp *intel_dp ) { { intel_dp_set_link_train(intel_dp, & intel_dp->DP, 0); return; } } static void intel_dp_link_down(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; enum port port ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t DP ; int __ret_warn_on ; struct drm_i915_private *__p ; long tmp___0 ; long tmp___1 ; int __ret_warn_on___0 ; uint32_t tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; __mptr = (struct drm_crtc const *)intel_dig_port->base.base.crtc; crtc = (struct intel_crtc *)__mptr; port = intel_dig_port->port; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; DP = intel_dp->DP; __p = to_i915((struct drm_device const *)dev); __ret_warn_on = (unsigned int )*((unsigned char *)__p + 46UL) != 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 3854, "WARN_ON(HAS_DDI(dev))"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return; } else { } tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 1); __ret_warn_on___0 = (int )tmp___2 >= 0; tmp___3 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 3857, "WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)"); } else { } tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { return; } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_link_down", "\n"); } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 7U && (unsigned int )port == 0U) { DP = DP & 4294965503U; DP = DP | 512U; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type == 2U && (unsigned int )port != 0U) { DP = DP & 4294965503U; DP = DP | 512U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { DP = DP & 3489644543U; } else { DP = DP & 3489660927U; } } else { DP = DP & 3489660927U; } DP = DP | 536870912U; } } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); DP = DP & 2147483583U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); __p___4 = to_i915((struct drm_device const *)dev); if (((unsigned int )__p___4->pch_type == 1U && (int )crtc->pipe == 1) && (unsigned int )port != 0U) { DP = DP & 2415919103U; DP = DP | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); DP = DP & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_dp->output_reg, DP, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 0); } else { } msleep((unsigned int )intel_dp->panel_power_down_delay); return; } } static bool intel_dp_get_dpcd(struct intel_dp *intel_dp ) { struct intel_digital_port *dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint8_t rev ; ssize_t tmp___0 ; long tmp___1 ; long tmp___2 ; uint8_t frame_sync_cap ; long tmp___3 ; struct drm_i915_private *__p ; bool tmp___4 ; long tmp___5 ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; __le16 sink_rates[8U] ; int i ; int val ; bool tmp___6 ; ssize_t tmp___7 ; ssize_t tmp___8 ; { tmp = dp_to_dig_port(intel_dp); dig_port = tmp; dev = dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = intel_dp_dpcd_read_wake(& intel_dp->aux, 0U, (void *)(& intel_dp->dpcd), 15UL); if (tmp___0 < 0L) { return (0); } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_get_dpcd", "DPCD: %*ph\n", 15, (uint8_t *)(& intel_dp->dpcd)); } else { } if ((unsigned int )intel_dp->dpcd[0] == 0U) { return (0); } else { } memset((void *)(& intel_dp->psr_dpcd), 0, 2UL); tmp___4 = is_edp(intel_dp); if ((int )tmp___4) { intel_dp_dpcd_read_wake(& intel_dp->aux, 112U, (void *)(& intel_dp->psr_dpcd), 2UL); if ((int )intel_dp->psr_dpcd[0] & 1) { dev_priv->psr.sink_support = 1; tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_dp_get_dpcd", "Detected EDP PSR Panel.\n"); } else { } } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 8U && ((int )intel_dp->psr_dpcd[0] & 2) != 0) { dev_priv->psr.sink_support = 1; intel_dp_dpcd_read_wake(& intel_dp->aux, 47U, (void *)(& frame_sync_cap), 1UL); dev_priv->psr.aux_frame_sync = (unsigned int )frame_sync_cap != 0U; dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync; tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_dp_get_dpcd", "PSR2 %s on sink", (int )dev_priv->psr.psr2_support ? (char *)"supported" : (char *)"not supported"); } else { } } else { } } else { } if ((unsigned int )intel_dp->dpcd[0] > 17U && ((int )intel_dp->dpcd[2] & 64) != 0) { __p___0 = dev_priv; if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { goto _L; } else { __p___1 = dev_priv; if ((unsigned int )((unsigned char )__p___1->info.gen) > 7U) { _L: /* CIL Label */ intel_dp->use_tps3 = 1; tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_get_dpcd", "Displayport TPS3 supported\n"); } else { } } else { intel_dp->use_tps3 = 0; } } } else { intel_dp->use_tps3 = 0; } tmp___6 = is_edp(intel_dp); if ((int )tmp___6 && ((int )intel_dp->dpcd[13] & 8) != 0) { tmp___7 = intel_dp_dpcd_read_wake(& intel_dp->aux, 1792U, (void *)(& rev), 1UL); if (tmp___7 == 1L) { if ((unsigned int )rev > 2U) { intel_dp_dpcd_read_wake(& intel_dp->aux, 16U, (void *)(& sink_rates), 16UL); i = 0; goto ldv_50422; ldv_50421: val = (int )sink_rates[i]; if (val == 0) { goto ldv_50420; } else { } intel_dp->sink_rates[i] = (val * 200) / 10; i = i + 1; ldv_50422: ; if ((unsigned int )i <= 7U) { goto ldv_50421; } else { } ldv_50420: intel_dp->num_sink_rates = (uint8_t )i; } else { } } else { } } else { } intel_dp_print_rates(intel_dp); if (((int )intel_dp->dpcd[5] & 1) == 0) { return (1); } else { } if ((unsigned int )intel_dp->dpcd[0] == 16U) { return (1); } else { } tmp___8 = intel_dp_dpcd_read_wake(& intel_dp->aux, 128U, (void *)(& intel_dp->downstream_ports), 16UL); if (tmp___8 < 0L) { return (0); } else { } return (1); } } static void intel_dp_probe_oui(struct intel_dp *intel_dp ) { u8 buf[3U] ; long tmp ; ssize_t tmp___0 ; long tmp___1 ; ssize_t tmp___2 ; { if ((int )((signed char )intel_dp->dpcd[7]) >= 0) { return; } else { } tmp___0 = intel_dp_dpcd_read_wake(& intel_dp->aux, 1024U, (void *)(& buf), 3UL); if (tmp___0 == 3L) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dp_probe_oui", "Sink OUI: %02hx%02hx%02hx\n", (int )buf[0], (int )buf[1], (int )buf[2]); } else { } } else { } tmp___2 = intel_dp_dpcd_read_wake(& intel_dp->aux, 1280U, (void *)(& buf), 3UL); if (tmp___2 == 3L) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_probe_oui", "Branch OUI: %02hx%02hx%02hx\n", (int )buf[0], (int )buf[1], (int )buf[2]); } else { } } else { } return; } } static bool intel_dp_probe_mst(struct intel_dp *intel_dp ) { u8 buf[1U] ; long tmp ; long tmp___0 ; ssize_t tmp___1 ; { if (! intel_dp->can_mst) { return (0); } else { } if ((unsigned int )intel_dp->dpcd[0] <= 17U) { return (0); } else { } tmp___1 = intel_dp_dpcd_read_wake(& intel_dp->aux, 33U, (void *)(& buf), 1UL); if (tmp___1 != 0L) { if ((int )buf[0] & 1) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dp_probe_mst", "Sink is MST capable\n"); } else { } intel_dp->is_mst = 1; } else { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dp_probe_mst", "Sink is not MST capable\n"); } else { } intel_dp->is_mst = 0; } } else { } drm_dp_mst_topology_mgr_set_mst(& intel_dp->mst_mgr, (int )intel_dp->is_mst); return (intel_dp->is_mst); } } int intel_dp_sink_crc(struct intel_dp *intel_dp , u8 *crc ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; u8 buf ; int test_crc_count ; int attempts ; int ret ; ssize_t tmp___0 ; ssize_t tmp___1 ; ssize_t tmp___2 ; ssize_t tmp___3 ; ssize_t tmp___4 ; long tmp___5 ; ssize_t tmp___6 ; ssize_t tmp___7 ; ssize_t tmp___8 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; __mptr = (struct drm_crtc const *)intel_dig_port->base.base.crtc; intel_crtc = (struct intel_crtc *)__mptr; attempts = 6; ret = 0; hsw_disable_ips(intel_crtc); tmp___0 = drm_dp_dpcd_readb(& intel_dp->aux, 582U, & buf); if (tmp___0 < 0L) { ret = -5; goto out; } else { } if (((int )buf & 32) == 0) { ret = -25; goto out; } else { } tmp___1 = drm_dp_dpcd_readb(& intel_dp->aux, 624U, & buf); if (tmp___1 < 0L) { ret = -5; goto out; } else { } tmp___2 = drm_dp_dpcd_writeb(& intel_dp->aux, 624U, (int )((unsigned int )buf | 1U)); if (tmp___2 < 0L) { ret = -5; goto out; } else { } tmp___3 = drm_dp_dpcd_readb(& intel_dp->aux, 582U, & buf); if (tmp___3 < 0L) { ret = -5; goto out; } else { } test_crc_count = (int )buf & 7; ldv_50447: tmp___4 = drm_dp_dpcd_readb(& intel_dp->aux, 582U, & buf); if (tmp___4 < 0L) { ret = -5; goto out; } else { } intel_wait_for_vblank(dev, (int )intel_crtc->pipe); attempts = attempts - 1; if (attempts != 0 && ((int )buf & 7) == test_crc_count) { goto ldv_50447; } else { } if (attempts == 0) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_sink_crc", "Panel is unable to calculate CRC after 6 vblanks\n"); } else { } ret = -110; goto out; } else { } tmp___6 = drm_dp_dpcd_read(& intel_dp->aux, 576U, (void *)crc, 6UL); if (tmp___6 < 0L) { ret = -5; goto out; } else { } tmp___7 = drm_dp_dpcd_readb(& intel_dp->aux, 624U, & buf); if (tmp___7 < 0L) { ret = -5; goto out; } else { } tmp___8 = drm_dp_dpcd_writeb(& intel_dp->aux, 624U, (int )buf & 254); if (tmp___8 < 0L) { ret = -5; goto out; } else { } out: hsw_enable_ips(intel_crtc); return (ret); } } static bool intel_dp_get_sink_irq(struct intel_dp *intel_dp , u8 *sink_irq_vector ) { ssize_t tmp ; { tmp = intel_dp_dpcd_read_wake(& intel_dp->aux, 513U, (void *)sink_irq_vector, 1UL); return (tmp == 1L); } } static bool intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp , u8 *sink_irq_vector ) { int ret ; ssize_t tmp ; { tmp = intel_dp_dpcd_read_wake(& intel_dp->aux, 8194U, (void *)sink_irq_vector, 14UL); ret = (int )tmp; if (ret != 14) { return (0); } else { } return (1); } } static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp ) { uint8_t test_result ; { test_result = 1U; return (test_result); } } static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp ) { uint8_t test_result ; { test_result = 2U; return (test_result); } } static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp ) { uint8_t test_result ; struct intel_connector *intel_connector ; struct drm_connector *connector ; long tmp ; long tmp___0 ; ssize_t tmp___1 ; { test_result = 2U; intel_connector = intel_dp->attached_connector; connector = & intel_connector->base; if (((unsigned long )intel_connector->detect_edid == (unsigned long )((struct edid *)0) || (int )connector->edid_corrupt) || intel_dp->aux.i2c_defer_count > 6U) { if (intel_dp->aux.i2c_nack_count != 0U || intel_dp->aux.i2c_defer_count != 0U) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dp_autotest_edid", "EDID read had %d NACKs, %d DEFERs\n", intel_dp->aux.i2c_nack_count, intel_dp->aux.i2c_defer_count); } else { } } else { } intel_dp->compliance_test_data = 3UL; } else { tmp___1 = drm_dp_dpcd_write(& intel_dp->aux, 609U, (void *)(& (intel_connector->detect_edid)->checksum), 1UL); if (tmp___1 == 0L) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dp_autotest_edid", "Failed to write EDID checksum\n"); } else { } } else { } test_result = 5U; intel_dp->compliance_test_data = 2UL; } intel_dp->compliance_test_active = 1; return (test_result); } } static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp ) { uint8_t test_result ; { test_result = 2U; return (test_result); } } static void intel_dp_handle_test_request(struct intel_dp *intel_dp ) { uint8_t response ; uint8_t rxdata ; int status ; ssize_t tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; ssize_t tmp___6 ; long tmp___7 ; { response = 2U; rxdata = 0U; status = 0; intel_dp->compliance_test_active = 0; intel_dp->compliance_test_type = 0UL; intel_dp->compliance_test_data = 0UL; intel_dp->aux.i2c_nack_count = 0U; intel_dp->aux.i2c_defer_count = 0U; tmp = drm_dp_dpcd_read(& intel_dp->aux, 536U, (void *)(& rxdata), 1UL); status = (int )tmp; if (status <= 0) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dp_handle_test_request", "Could not read test request from sink\n"); } else { } goto update_status; } else { } switch ((int )rxdata) { case 1: tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_handle_test_request", "LINK_TRAINING test requested\n"); } else { } intel_dp->compliance_test_type = 1UL; response = intel_dp_autotest_link_training(intel_dp); goto ldv_50487; case 2: tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_dp_handle_test_request", "TEST_PATTERN test requested\n"); } else { } intel_dp->compliance_test_type = 2UL; response = intel_dp_autotest_video_pattern(intel_dp); goto ldv_50487; case 4: tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_dp_handle_test_request", "EDID test requested\n"); } else { } intel_dp->compliance_test_type = 4UL; response = intel_dp_autotest_edid(intel_dp); goto ldv_50487; case 8: tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_dp_handle_test_request", "PHY_PATTERN test requested\n"); } else { } intel_dp->compliance_test_type = 8UL; response = intel_dp_autotest_phy_pattern(intel_dp); goto ldv_50487; default: tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_handle_test_request", "Invalid test request \'%02x\'\n", (int )rxdata); } else { } goto ldv_50487; } ldv_50487: ; update_status: tmp___6 = drm_dp_dpcd_write(& intel_dp->aux, 608U, (void *)(& response), 1UL); status = (int )tmp___6; if (status <= 0) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_dp_handle_test_request", "Could not write test response to sink\n"); } else { } } else { } return; } } static int intel_dp_check_mst_status(struct intel_dp *intel_dp ) { bool bret ; u8 esi[16U] ; unsigned int tmp ; int ret ; int retry ; bool handled ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; int wret ; ssize_t tmp___4 ; long tmp___5 ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___6 ; long tmp___7 ; { if ((int )intel_dp->is_mst) { esi[0] = 0U; tmp = 1U; while (1) { if (tmp >= 16U) { break; } else { } esi[tmp] = (unsigned char)0; tmp = tmp + 1U; } ret = 0; bret = intel_dp_get_sink_irq_esi(intel_dp, (u8 *)(& esi)); go_again: ; if ((int )bret) { if (intel_dp->active_mst_links != 0) { tmp___1 = drm_dp_channel_eq_ok((u8 const *)(& esi) + 10U, (int )intel_dp->lane_count); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dp_check_mst_status", "channel EQ not ok, retraining\n"); } else { } intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); } else { } } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_dp_check_mst_status", "got esi %3ph\n", (u8 *)(& esi)); } else { } ret = drm_dp_mst_hpd_irq(& intel_dp->mst_mgr, (u8 *)(& esi), & handled); if ((int )handled) { retry = 0; goto ldv_50505; ldv_50504: tmp___4 = drm_dp_dpcd_write(& intel_dp->aux, 8195U, (void *)(& esi) + 1U, 3UL); wret = (int )tmp___4; if (wret == 3) { goto ldv_50503; } else { } retry = retry + 1; ldv_50505: ; if (retry <= 2) { goto ldv_50504; } else { } ldv_50503: bret = intel_dp_get_sink_irq_esi(intel_dp, (u8 *)(& esi)); if ((int )bret) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_check_mst_status", "got esi2 %3ph\n", (u8 *)(& esi)); } else { } goto go_again; } else { } } else { ret = 0; } return (ret); } else { tmp___6 = dp_to_dig_port(intel_dp); intel_dig_port = tmp___6; tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_dp_check_mst_status", "failed to get ESI - device may have failed\n"); } else { } intel_dp->is_mst = 0; drm_dp_mst_topology_mgr_set_mst(& intel_dp->mst_mgr, (int )intel_dp->is_mst); drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev); } } else { } return (-22); } } static void intel_dp_check_link_status(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct intel_encoder *intel_encoder ; struct intel_digital_port *tmp___0 ; u8 sink_irq_vector ; u8 link_status[6U] ; int __ret_warn_on ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; int __ret_warn_on___0 ; long tmp___4 ; long tmp___5 ; struct drm_crtc const *__mptr ; int tmp___6 ; bool tmp___7 ; int tmp___8 ; bool tmp___9 ; int tmp___10 ; long tmp___11 ; long tmp___12 ; bool tmp___13 ; long tmp___14 ; bool tmp___15 ; int tmp___16 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; tmp___0 = dp_to_dig_port(intel_dp); intel_encoder = & tmp___0->base; tmp___1 = drm_modeset_is_locked(& dev->mode_config.connection_mutex); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } __ret_warn_on = tmp___2; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 4317, "WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if (! intel_encoder->connectors_active) { return; } else { } __ret_warn_on___0 = (unsigned long )intel_encoder->base.crtc == (unsigned long )((struct drm_crtc *)0); tmp___4 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 4322, "WARN_ON(!intel_encoder->base.crtc)"); } else { } tmp___5 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___5 != 0L) { return; } else { } __mptr = (struct drm_crtc const *)intel_encoder->base.crtc; if (((struct intel_crtc *)__mptr)->active) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { return; } else { } tmp___7 = intel_dp_get_link_status(intel_dp, (uint8_t *)(& link_status)); if (tmp___7) { tmp___8 = 0; } else { tmp___8 = 1; } if (tmp___8) { return; } else { } tmp___9 = intel_dp_get_dpcd(intel_dp); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } if (tmp___10) { return; } else { } if ((unsigned int )intel_dp->dpcd[0] > 16U) { tmp___13 = intel_dp_get_sink_irq(intel_dp, & sink_irq_vector); if ((int )tmp___13) { drm_dp_dpcd_writeb(& intel_dp->aux, 513U, (int )sink_irq_vector); if (((int )sink_irq_vector & 2) != 0) { tmp___11 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("intel_dp_check_link_status", "Test request in short pulse not handled\n"); } else { } } else { } if (((int )sink_irq_vector & 68) != 0) { tmp___12 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("intel_dp_check_link_status", "CP or sink specific irq unhandled\n"); } else { } } else { } } else { } } else { } tmp___15 = drm_dp_channel_eq_ok((u8 const *)(& link_status), (int )intel_dp->lane_count); if (tmp___15) { tmp___16 = 0; } else { tmp___16 = 1; } if (tmp___16) { tmp___14 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("intel_dp_check_link_status", "%s: channel EQ not ok, retraining\n", intel_encoder->base.name); } else { } intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); } else { } return; } } static enum drm_connector_status intel_dp_detect_dpcd(struct intel_dp *intel_dp ) { uint8_t *dpcd ; uint8_t type ; bool tmp ; int tmp___0 ; uint8_t reg ; ssize_t tmp___1 ; bool tmp___2 ; long tmp___3 ; { dpcd = (uint8_t *)(& intel_dp->dpcd); tmp = intel_dp_get_dpcd(intel_dp); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (2); } else { } if (((int )*(dpcd + 5UL) & 1) == 0) { return (1); } else { } if ((unsigned int )intel_dp->dpcd[0] > 16U && ((int )intel_dp->downstream_ports[0] & 8) != 0) { tmp___1 = intel_dp_dpcd_read_wake(& intel_dp->aux, 512U, (void *)(& reg), 1UL); if (tmp___1 < 0L) { return (3); } else { } return (((((int )reg & 128) >> 1) | ((int )reg & 63)) != 0 ? 1 : 2); } else { } tmp___2 = drm_probe_ddc(& intel_dp->aux.ddc); if ((int )tmp___2) { return (1); } else { } if ((unsigned int )intel_dp->dpcd[0] > 16U) { type = (unsigned int )intel_dp->downstream_ports[0] & 7U; if ((unsigned int )type == 1U || (unsigned int )type == 4U) { return (3); } else { } } else { type = (unsigned int )intel_dp->dpcd[5] & 6U; if ((unsigned int )type == 2U || (unsigned int )type == 6U) { return (3); } else { } } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_dp_detect_dpcd", "Broken DP branch device, ignoring\n"); } else { } return (2); } } static enum drm_connector_status edp_detect(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; enum drm_connector_status status ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; status = intel_panel_detect(dev); if ((unsigned int )status == 3U) { status = 1; } else { } return (status); } } static enum drm_connector_status ironlake_dp_detect(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___0 ; bool tmp___1 ; int tmp___2 ; enum drm_connector_status tmp___3 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = dp_to_dig_port(intel_dp); intel_dig_port = tmp___0; tmp___1 = ibx_digital_port_connected(dev_priv, intel_dig_port); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (2); } else { } tmp___3 = intel_dp_detect_dpcd(intel_dp); return (tmp___3); } } static int g4x_digital_port_connected(struct drm_device *dev , struct intel_digital_port *intel_dig_port ) { struct drm_i915_private *dev_priv ; uint32_t bit ; struct drm_i915_private *__p ; uint32_t tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { switch ((unsigned int )intel_dig_port->port) { case 1U: bit = 536870912U; goto ldv_50552; case 2U: bit = 268435456U; goto ldv_50552; case 3U: bit = 134217728U; goto ldv_50552; default: ; return (-22); } ldv_50552: ; } else { switch ((unsigned int )intel_dig_port->port) { case 1U: bit = 134217728U; goto ldv_50557; case 2U: bit = 268435456U; goto ldv_50557; case 3U: bit = 536870912U; goto ldv_50557; default: ; return (-22); } ldv_50557: ; } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397588U), 1); if ((tmp & bit) == 0U) { return (0); } else { } return (1); } } static enum drm_connector_status g4x_dp_detect(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___0 ; int ret ; enum drm_connector_status status ; bool tmp___1 ; enum drm_connector_status tmp___2 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; tmp___0 = dp_to_dig_port(intel_dp); intel_dig_port = tmp___0; tmp___1 = is_edp(intel_dp); if ((int )tmp___1) { status = intel_panel_detect(dev); if ((unsigned int )status == 3U) { status = 1; } else { } return (status); } else { } ret = g4x_digital_port_connected(dev, intel_dig_port); if (ret == -22) { return (3); } else if (ret == 0) { return (2); } else { } tmp___2 = intel_dp_detect_dpcd(intel_dp); return (tmp___2); } } static struct edid *intel_dp_get_edid(struct intel_dp *intel_dp ) { struct intel_connector *intel_connector ; bool tmp ; struct edid *tmp___0 ; struct edid *tmp___1 ; { intel_connector = intel_dp->attached_connector; if ((unsigned long )intel_connector->edid != (unsigned long )((struct edid *)0)) { tmp = IS_ERR((void const *)intel_connector->edid); if ((int )tmp) { return ((struct edid *)0); } else { } tmp___0 = drm_edid_duplicate((struct edid const *)intel_connector->edid); return (tmp___0); } else { tmp___1 = drm_get_edid(& intel_connector->base, & intel_dp->aux.ddc); return (tmp___1); } } } static void intel_dp_set_edid(struct intel_dp *intel_dp ) { struct intel_connector *intel_connector ; struct edid *edid ; { intel_connector = intel_dp->attached_connector; edid = intel_dp_get_edid(intel_dp); intel_connector->detect_edid = edid; if ((int )intel_dp->force_audio != 0) { intel_dp->has_audio = (int )intel_dp->force_audio == 1; } else { intel_dp->has_audio = drm_detect_monitor_audio(edid); } return; } } static void intel_dp_unset_edid(struct intel_dp *intel_dp ) { struct intel_connector *intel_connector ; { intel_connector = intel_dp->attached_connector; kfree((void const *)intel_connector->detect_edid); intel_connector->detect_edid = (struct edid *)0; intel_dp->has_audio = 0; return; } } static enum intel_display_power_domain intel_dp_power_get(struct intel_dp *dp ) { struct intel_encoder *encoder ; struct intel_digital_port *tmp ; enum intel_display_power_domain power_domain ; struct drm_i915_private *tmp___0 ; { tmp = dp_to_dig_port(dp); encoder = & tmp->base; power_domain = intel_display_port_power_domain(encoder); tmp___0 = to_i915((struct drm_device const *)encoder->base.dev); intel_display_power_get(tmp___0, power_domain); return (power_domain); } } static void intel_dp_power_put(struct intel_dp *dp , enum intel_display_power_domain power_domain ) { struct intel_encoder *encoder ; struct intel_digital_port *tmp ; struct drm_i915_private *tmp___0 ; { tmp = dp_to_dig_port(dp); encoder = & tmp->base; tmp___0 = to_i915((struct drm_device const *)encoder->base.dev); intel_display_power_put(tmp___0, power_domain); return; } } static enum drm_connector_status intel_dp_detect(struct drm_connector *connector , bool force ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___0 ; struct intel_encoder *intel_encoder ; struct drm_device *dev ; enum drm_connector_status status ; enum intel_display_power_domain power_domain ; bool ret ; u8 sink_irq_vector ; long tmp___1 ; struct drm_i915_private *__p ; bool tmp___2 ; long tmp___3 ; bool tmp___4 ; { tmp = intel_attached_dp(connector); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); intel_dig_port = tmp___0; intel_encoder = & intel_dig_port->base; dev = connector->dev; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_detect", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } intel_dp_unset_edid(intel_dp); if ((int )intel_dp->is_mst) { if ((unsigned int )intel_encoder->type != 8U) { intel_encoder->type = 7; } else { } return (2); } else { } power_domain = intel_dp_power_get(intel_dp); tmp___2 = is_edp(intel_dp); if ((int )tmp___2) { status = edp_detect(intel_dp); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { status = ironlake_dp_detect(intel_dp); } else { status = g4x_dp_detect(intel_dp); } } if ((unsigned int )status != 1U) { goto out; } else { } intel_dp_probe_oui(intel_dp); ret = intel_dp_probe_mst(intel_dp); if ((int )ret) { if ((unsigned int )intel_encoder->type != 8U) { intel_encoder->type = 7; } else { } status = 2; goto out; } else { } intel_dp_set_edid(intel_dp); if ((unsigned int )intel_encoder->type != 8U) { intel_encoder->type = 7; } else { } status = 1; if ((unsigned int )intel_dp->dpcd[0] > 16U) { tmp___4 = intel_dp_get_sink_irq(intel_dp, & sink_irq_vector); if ((int )tmp___4) { drm_dp_dpcd_writeb(& intel_dp->aux, 513U, (int )sink_irq_vector); if (((int )sink_irq_vector & 2) != 0) { intel_dp_handle_test_request(intel_dp); } else { } if (((int )sink_irq_vector & 68) != 0) { tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_dp_detect", "CP or sink specific irq unhandled\n"); } else { } } else { } } else { } } else { } out: intel_dp_power_put(intel_dp, power_domain); return (status); } } static void intel_dp_force(struct drm_connector *connector ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; struct intel_encoder *intel_encoder ; struct intel_digital_port *tmp___0 ; enum intel_display_power_domain power_domain ; long tmp___1 ; { tmp = intel_attached_dp(connector); intel_dp = tmp; tmp___0 = dp_to_dig_port(intel_dp); intel_encoder = & tmp___0->base; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_force", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } intel_dp_unset_edid(intel_dp); if ((unsigned int )connector->status != 1U) { return; } else { } power_domain = intel_dp_power_get(intel_dp); intel_dp_set_edid(intel_dp); intel_dp_power_put(intel_dp, power_domain); if ((unsigned int )intel_encoder->type != 8U) { intel_encoder->type = 7; } else { } return; } } static int intel_dp_get_modes(struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct edid *edid ; int ret ; int tmp ; struct drm_display_mode *mode ; struct intel_dp *tmp___0 ; bool tmp___1 ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; edid = intel_connector->detect_edid; if ((unsigned long )edid != (unsigned long )((struct edid *)0)) { tmp = intel_connector_update_modes(connector, edid); ret = tmp; if (ret != 0) { return (ret); } else { } } else { } tmp___0 = intel_attached_dp(connector); tmp___1 = is_edp(tmp___0); if ((int )tmp___1 && (unsigned long )intel_connector->panel.fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { mode = drm_mode_duplicate(connector->dev, (struct drm_display_mode const *)intel_connector->panel.fixed_mode); if ((unsigned long )mode != (unsigned long )((struct drm_display_mode *)0)) { drm_mode_probed_add(connector, mode); return (1); } else { } } else { } return (0); } } static bool intel_dp_detect_audio(struct drm_connector *connector ) { bool has_audio ; struct edid *edid ; struct drm_connector const *__mptr ; { has_audio = 0; __mptr = (struct drm_connector const *)connector; edid = ((struct intel_connector *)__mptr)->detect_edid; if ((unsigned long )edid != (unsigned long )((struct edid *)0)) { has_audio = drm_detect_monitor_audio(edid); } else { } return (has_audio); } } static int intel_dp_set_property(struct drm_connector *connector , struct drm_property *property , uint64_t val ) { struct drm_i915_private *dev_priv ; struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct intel_encoder *intel_encoder ; struct intel_encoder *tmp ; struct intel_dp *intel_dp ; struct intel_dp *tmp___0 ; int ret ; int i ; bool has_audio ; bool old_auto ; uint32_t old_range ; long tmp___1 ; bool tmp___2 ; { dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; tmp = intel_attached_encoder(connector); intel_encoder = tmp; tmp___0 = enc_to_intel_dp(& intel_encoder->base); intel_dp = tmp___0; ret = drm_object_property_set_value(& connector->base, property, val); if (ret != 0) { return (ret); } else { } if ((unsigned long )dev_priv->force_audio_property == (unsigned long )property) { i = (int )val; if ((int )intel_dp->force_audio == i) { return (0); } else { } intel_dp->force_audio = (enum hdmi_force_audio )i; if (i == 0) { has_audio = intel_dp_detect_audio(connector); } else { has_audio = i == 1; } if ((int )intel_dp->has_audio == (int )has_audio) { return (0); } else { } intel_dp->has_audio = has_audio; goto done; } else { } if ((unsigned long )dev_priv->broadcast_rgb_property == (unsigned long )property) { old_auto = intel_dp->color_range_auto; old_range = intel_dp->color_range; switch (val) { case 0ULL: intel_dp->color_range_auto = 1; goto ldv_50652; case 1ULL: intel_dp->color_range_auto = 0; intel_dp->color_range = 0U; goto ldv_50652; case 2ULL: intel_dp->color_range_auto = 0; intel_dp->color_range = 256U; goto ldv_50652; default: ; return (-22); } ldv_50652: ; if ((int )intel_dp->color_range_auto == (int )old_auto && intel_dp->color_range == old_range) { return (0); } else { } goto done; } else { } tmp___2 = is_edp(intel_dp); if ((int )tmp___2 && (unsigned long )(connector->dev)->mode_config.scaling_mode_property == (unsigned long )property) { if (val == 0ULL) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_set_property", "no scaling not supported\n"); } else { } return (-22); } else { } if ((uint64_t )intel_connector->panel.fitting_mode == val) { return (0); } else { } intel_connector->panel.fitting_mode = (int )val; goto done; } else { } return (-22); done: ; if ((unsigned long )intel_encoder->base.crtc != (unsigned long )((struct drm_crtc *)0)) { intel_crtc_restore_mode(intel_encoder->base.crtc); } else { } return (0); } } static void intel_dp_connector_destroy(struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; bool tmp ; int tmp___0 ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; kfree((void const *)intel_connector->detect_edid); tmp = IS_ERR_OR_NULL((void const *)intel_connector->edid); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { kfree((void const *)intel_connector->edid); } else { } if (connector->connector_type == 14) { intel_panel_fini(& intel_connector->panel); } else { } drm_connector_cleanup(connector); kfree((void const *)connector); return; } } void intel_dp_encoder_destroy(struct drm_encoder *encoder ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_dp *intel_dp ; bool tmp___0 ; { tmp = enc_to_dig_port(encoder); intel_dig_port = tmp; intel_dp = & intel_dig_port->dp; drm_dp_aux_unregister(& intel_dp->aux); intel_dp_mst_encoder_cleanup(intel_dig_port); tmp___0 = is_edp(intel_dp); if ((int )tmp___0) { ldv_cancel_delayed_work_sync_873(& intel_dp->panel_vdd_work); pps_lock(intel_dp); edp_panel_vdd_off_sync(intel_dp); pps_unlock(intel_dp); if ((unsigned long )intel_dp->edp_notifier.notifier_call != (unsigned long )((int (*)(struct notifier_block * , unsigned long , void * ))0)) { unregister_reboot_notifier(& intel_dp->edp_notifier); intel_dp->edp_notifier.notifier_call = (int (*)(struct notifier_block * , unsigned long , void * ))0; } else { } } else { } drm_encoder_cleanup(encoder); kfree((void const *)intel_dig_port); return; } } static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder ) { struct intel_dp *intel_dp ; struct intel_dp *tmp ; bool tmp___0 ; int tmp___1 ; { tmp = enc_to_intel_dp(& intel_encoder->base); intel_dp = tmp; tmp___0 = is_edp(intel_dp); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return; } else { } ldv_cancel_delayed_work_sync_874(& intel_dp->panel_vdd_work); pps_lock(intel_dp); edp_panel_vdd_off_sync(intel_dp); pps_unlock(intel_dp); return; } } static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp ) { struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum intel_display_power_domain power_domain ; int __ret_warn_on ; int tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; int tmp___4 ; long tmp___5 ; { tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (debug_locks != 0) { tmp___0 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___0 == 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 4861, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___3 = edp_have_panel_vdd(intel_dp); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return; } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_edp_panel_vdd_sanitize", "VDD left on by BIOS, adjusting state tracking\n"); } else { } power_domain = intel_display_port_power_domain(& intel_dig_port->base); intel_display_power_get(dev_priv, power_domain); edp_panel_vdd_schedule_off(intel_dp); return; } } static void intel_dp_encoder_reset(struct drm_encoder *encoder ) { struct intel_dp *intel_dp ; struct drm_encoder const *__mptr ; struct drm_i915_private *__p ; { __mptr = (struct drm_encoder const *)encoder; if ((unsigned int )((struct intel_encoder *)__mptr)->type != 8U) { return; } else { } intel_dp = enc_to_intel_dp(encoder); pps_lock(intel_dp); __p = to_i915((struct drm_device const *)encoder->dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { vlv_initial_power_sequencer_setup(intel_dp); } else { } intel_edp_panel_vdd_sanitize(intel_dp); pps_unlock(intel_dp); return; } } static struct drm_connector_funcs const intel_dp_connector_funcs = {& intel_connector_dpms, 0, 0, 0, & intel_dp_detect, & drm_helper_probe_single_connector_modes, & intel_dp_set_property, & intel_dp_connector_destroy, & intel_dp_force, & drm_atomic_helper_connector_duplicate_state, & drm_atomic_helper_connector_destroy_state, 0, & intel_connector_atomic_get_property}; static struct drm_connector_helper_funcs const intel_dp_connector_helper_funcs = {& intel_dp_get_modes, & intel_dp_mode_valid, & intel_best_encoder}; static struct drm_encoder_funcs const intel_dp_enc_funcs = {& intel_dp_encoder_reset, & intel_dp_encoder_destroy}; void intel_dp_hot_plug(struct intel_encoder *intel_encoder ) { { return; } } enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port , bool long_hpd ) { struct intel_dp *intel_dp ; struct intel_encoder *intel_encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum intel_display_power_domain power_domain ; enum irqreturn ret ; long tmp ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; struct drm_i915_private *__p ; bool tmp___4 ; int tmp___5 ; bool tmp___6 ; int tmp___7 ; int tmp___8 ; long tmp___9 ; { intel_dp = & intel_dig_port->dp; intel_encoder = & intel_dig_port->base; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; ret = 0; if ((unsigned int )intel_dig_port->base.type != 8U) { intel_dig_port->base.type = 7; } else { } if ((int )long_hpd && (unsigned int )intel_dig_port->base.type == 8U) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dp_hpd_pulse", "ignoring long hpd on eDP port %c\n", (unsigned int )intel_dig_port->port + 65U); } else { } return (1); } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dp_hpd_pulse", "got hpd irq on port %c - %s\n", (unsigned int )intel_dig_port->port + 65U, (int )long_hpd ? (char *)"long" : (char *)"short"); } else { } power_domain = intel_display_port_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); if ((int )long_hpd) { intel_dp->train_set_valid = 0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { tmp___1 = ibx_digital_port_connected(dev_priv, intel_dig_port); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto mst_fail; } else { } } else { tmp___3 = g4x_digital_port_connected(dev, intel_dig_port); if (tmp___3 != 1) { goto mst_fail; } else { } } tmp___4 = intel_dp_get_dpcd(intel_dp); if (tmp___4) { tmp___5 = 0; } else { tmp___5 = 1; } if (tmp___5) { goto mst_fail; } else { } intel_dp_probe_oui(intel_dp); tmp___6 = intel_dp_probe_mst(intel_dp); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } if (tmp___7) { goto mst_fail; } else { } } else { if ((int )intel_dp->is_mst) { tmp___8 = intel_dp_check_mst_status(intel_dp); if (tmp___8 == -22) { goto mst_fail; } else { } } else { } if (! intel_dp->is_mst) { drm_modeset_lock(& dev->mode_config.connection_mutex, (struct drm_modeset_acquire_ctx *)0); intel_dp_check_link_status(intel_dp); drm_modeset_unlock(& dev->mode_config.connection_mutex); } else { } } ret = 1; goto put_power; mst_fail: ; if ((int )intel_dp->is_mst) { tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("intel_dp_hpd_pulse", "MST device may have disappeared %d vs %d\n", (int )intel_dp->is_mst, (int )intel_dp->mst_mgr.mst_state); } else { } intel_dp->is_mst = 0; drm_dp_mst_topology_mgr_set_mst(& intel_dp->mst_mgr, (int )intel_dp->is_mst); } else { } put_power: intel_display_power_put(dev_priv, power_domain); return (ret); } } int intel_trans_dp_port_sel(struct drm_crtc *crtc ) { struct drm_device *dev ; struct intel_encoder *intel_encoder ; struct intel_dp *intel_dp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = crtc->dev; __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; intel_encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_50730; ldv_50729: ; if ((unsigned long )intel_encoder->base.crtc == (unsigned long )crtc) { intel_dp = enc_to_intel_dp(& intel_encoder->base); if ((unsigned int )intel_encoder->type == 7U || (unsigned int )intel_encoder->type == 8U) { return ((int )intel_dp->output_reg); } else { } } else { } __mptr___0 = (struct list_head const *)intel_encoder->base.head.next; intel_encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_50730: ; if ((unsigned long )(& intel_encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_50729; } else { } return (-1); } } bool intel_dp_is_edp(struct drm_device *dev , enum port port ) { struct drm_i915_private *dev_priv ; union child_device_config *p_child ; int i ; short port_mapping[4U] ; { dev_priv = (struct drm_i915_private *)dev->dev_private; port_mapping[0] = (short)0; port_mapping[1] = 7; port_mapping[2] = 8; port_mapping[3] = 9; if ((unsigned int )port == 0U) { return (1); } else { } if (dev_priv->vbt.child_dev_num == 0) { return (0); } else { } i = 0; goto ldv_50741; ldv_50740: p_child = dev_priv->vbt.child_dev + (unsigned long )i; if ((int )p_child->common.dvo_port == (int )port_mapping[(unsigned int )port] && ((int )p_child->common.device_type & 7999) == 6150) { return (1); } else { } i = i + 1; ldv_50741: ; if (dev_priv->vbt.child_dev_num > i) { goto ldv_50740; } else { } return (0); } } void intel_dp_add_properties(struct intel_dp *intel_dp , struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; bool tmp ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_dp->color_range_auto = 1; tmp = is_edp(intel_dp); if ((int )tmp) { drm_mode_create_scaling_mode_property(connector->dev); drm_object_attach_property(& connector->base, (connector->dev)->mode_config.scaling_mode_property, 3ULL); intel_connector->panel.fitting_mode = 3; } else { } return; } } static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp ) { { intel_dp->last_power_cycle = jiffies; intel_dp->last_power_on = jiffies; intel_dp->last_backlight_off = jiffies; return; } } static void intel_dp_init_panel_power_sequencer(struct drm_device *dev , struct intel_dp *intel_dp ) { struct drm_i915_private *dev_priv ; struct edp_power_seq cur ; struct edp_power_seq vbt ; struct edp_power_seq spec ; struct edp_power_seq *final ; u32 pp_on ; u32 pp_off ; u32 pp_div ; u32 pp ; int pp_ctrl_reg ; int pp_on_reg ; int pp_off_reg ; int pp_div_reg ; int __ret_warn_on ; int tmp ; int tmp___0 ; long tmp___1 ; enum pipe pipe ; enum pipe tmp___2 ; struct drm_i915_private *__p ; long tmp___3 ; long tmp___4 ; u16 _max1___0 ; u16 _max2___0 ; u16 _max1___1 ; u16 _max2___1 ; u16 _max1___3 ; u16 _max2___3 ; u16 _max1___4 ; u16 _max2___4 ; u16 _max1___6 ; u16 _max2___6 ; u16 _max1___7 ; u16 _max2___7 ; u16 _max1___9 ; u16 _max2___9 ; u16 _max1___10 ; u16 _max2___10 ; u16 _max1___12 ; u16 _max2___12 ; u16 _max1___13 ; u16 _max2___13 ; long tmp___5 ; long tmp___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; final = & intel_dp->pps_delays; if (debug_locks != 0) { tmp = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp == 0) { tmp___0 = 1; } else { tmp___0 = 0; } } else { tmp___0 = 0; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 5101, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned int )final->t11_t12 != 0U) { return; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { pp_ctrl_reg = 815620; pp_on_reg = 815624; pp_off_reg = 815628; pp_div_reg = 815632; } else { tmp___2 = vlv_power_sequencer_pipe(intel_dp); pipe = tmp___2; pp_ctrl_reg = (int )pipe * 256 + 1970692; pp_on_reg = (int )pipe * 256 + 1970696; pp_off_reg = (int )pipe * 256 + 1970700; pp_div_reg = (int )pipe * 256 + 1970704; } pp = ironlake_get_pp_control(intel_dp); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_ctrl_reg, pp, 1); pp_on = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_on_reg, 1); pp_off = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_off_reg, 1); pp_div = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_div_reg, 1); cur.t1_t3 = (u16 )((pp_on & 536805376U) >> 16); cur.t8 = (unsigned int )((u16 )pp_on) & 8191U; cur.t9 = (unsigned int )((u16 )pp_off) & 8191U; cur.t10 = (u16 )((pp_off & 536805376U) >> 16); cur.t11_t12 = ((unsigned int )((u16 )pp_div) & 31U) * 1000U; tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_dp_init_panel_power_sequencer", "cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", (int )cur.t1_t3, (int )cur.t8, (int )cur.t9, (int )cur.t10, (int )cur.t11_t12); } else { } vbt = dev_priv->vbt.edp_pps; spec.t1_t3 = 2100U; spec.t8 = 500U; spec.t9 = 500U; spec.t10 = 5000U; spec.t11_t12 = 6100U; tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_dp_init_panel_power_sequencer", "vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", (int )vbt.t1_t3, (int )vbt.t8, (int )vbt.t9, (int )vbt.t10, (int )vbt.t11_t12); } else { } _max1___1 = cur.t1_t3; _max2___1 = vbt.t1_t3; if (((int )_max1___1 > (int )_max2___1 ? _max1___1 : _max2___1) == 0) { final->t1_t3 = spec.t1_t3; } else { _max1___0 = cur.t1_t3; _max2___0 = vbt.t1_t3; final->t1_t3 = (u16 )((int )_max1___0 > (int )_max2___0 ? _max1___0 : _max2___0); } _max1___4 = cur.t8; _max2___4 = vbt.t8; if (((int )_max1___4 > (int )_max2___4 ? _max1___4 : _max2___4) == 0) { final->t8 = spec.t8; } else { _max1___3 = cur.t8; _max2___3 = vbt.t8; final->t8 = (u16 )((int )_max1___3 > (int )_max2___3 ? _max1___3 : _max2___3); } _max1___7 = cur.t9; _max2___7 = vbt.t9; if (((int )_max1___7 > (int )_max2___7 ? _max1___7 : _max2___7) == 0) { final->t9 = spec.t9; } else { _max1___6 = cur.t9; _max2___6 = vbt.t9; final->t9 = (u16 )((int )_max1___6 > (int )_max2___6 ? _max1___6 : _max2___6); } _max1___10 = cur.t10; _max2___10 = vbt.t10; if (((int )_max1___10 > (int )_max2___10 ? _max1___10 : _max2___10) == 0) { final->t10 = spec.t10; } else { _max1___9 = cur.t10; _max2___9 = vbt.t10; final->t10 = (u16 )((int )_max1___9 > (int )_max2___9 ? _max1___9 : _max2___9); } _max1___13 = cur.t11_t12; _max2___13 = vbt.t11_t12; if (((int )_max1___13 > (int )_max2___13 ? _max1___13 : _max2___13) == 0) { final->t11_t12 = spec.t11_t12; } else { _max1___12 = cur.t11_t12; _max2___12 = vbt.t11_t12; final->t11_t12 = (u16 )((int )_max1___12 > (int )_max2___12 ? _max1___12 : _max2___12); } intel_dp->panel_power_up_delay = ((int )final->t1_t3 + 9) / 10; intel_dp->backlight_on_delay = ((int )final->t8 + 9) / 10; intel_dp->backlight_off_delay = ((int )final->t9 + 9) / 10; intel_dp->panel_power_down_delay = ((int )final->t10 + 9) / 10; intel_dp->panel_power_cycle_delay = ((int )final->t11_t12 + 9) / 10; tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_init_panel_power_sequencer", "panel power up delay %d, power down delay %d, power cycle delay %d\n", intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, intel_dp->panel_power_cycle_delay); } else { } tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_dp_init_panel_power_sequencer", "backlight on delay %d, off delay %d\n", intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); } else { } return; } } static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev , struct intel_dp *intel_dp ) { struct drm_i915_private *dev_priv ; u32 pp_on ; u32 pp_off ; u32 pp_div ; u32 port_sel ; int div ; int tmp ; int tmp___0 ; int tmp___1 ; struct drm_i915_private *__p___0 ; int pp_on_reg ; int pp_off_reg ; int pp_div_reg ; enum port port ; struct intel_digital_port *tmp___2 ; struct edp_power_seq const *seq ; int __ret_warn_on ; int tmp___3 ; int tmp___4 ; long tmp___5 ; enum pipe pipe ; enum pipe tmp___6 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; uint32_t tmp___7 ; uint32_t tmp___8 ; uint32_t tmp___9 ; long tmp___10 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; port_sel = 0U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 0U) { tmp = intel_pch_rawclk(dev); tmp___1 = tmp; } else { tmp___0 = intel_hrawclk(dev); tmp___1 = tmp___0; } div = tmp___1; tmp___2 = dp_to_dig_port(intel_dp); port = tmp___2->port; seq = (struct edp_power_seq const *)(& intel_dp->pps_delays); if (debug_locks != 0) { tmp___3 = lock_is_held(& dev_priv->pps_mutex.dep_map); if (tmp___3 == 0) { tmp___4 = 1; } else { tmp___4 = 0; } } else { tmp___4 = 0; } __ret_warn_on = tmp___4; tmp___5 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 5205, "WARN_ON(debug_locks && !lockdep_is_held(&dev_priv->pps_mutex))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type != 0U) { pp_on_reg = 815624; pp_off_reg = 815628; pp_div_reg = 815632; } else { tmp___6 = vlv_power_sequencer_pipe(intel_dp); pipe = tmp___6; pp_on_reg = (int )pipe * 256 + 1970696; pp_off_reg = (int )pipe * 256 + 1970700; pp_div_reg = (int )pipe * 256 + 1970704; } pp_on = (u32 )(((int )seq->t1_t3 << 16) | 1); pp_off = (u32 )(((int )seq->t10 << 16) | 1); pp_div = (u32 )(((div * 100) / 2 + -1) << 8); pp_div = (u32 )(((int )seq->t11_t12 + 999) / 1000) | pp_div; __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { port_sel = (unsigned int )port << 30; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___2->pch_type == 1U) { goto _L; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type == 2U) { _L: /* CIL Label */ if ((unsigned int )port == 0U) { port_sel = 1073741824U; } else { port_sel = 3221225472U; } } else { } } } pp_on = pp_on | port_sel; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_on_reg, pp_on, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_off_reg, pp_off, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )pp_div_reg, pp_div, 1); tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_div_reg, 1); tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_off_reg, 1); tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )pp_on_reg, 1); drm_ut_debug_printk("intel_dp_init_panel_power_sequencer_registers", "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", tmp___9, tmp___8, tmp___7); } else { } return; } } static void intel_dp_set_drrs_state(struct drm_device *dev , int refresh_rate ) { struct drm_i915_private *dev_priv ; struct intel_encoder *encoder ; struct intel_digital_port *dig_port ; struct intel_dp *intel_dp ; struct intel_crtc_state *config ; struct intel_crtc *intel_crtc ; u32 reg ; u32 val ; enum drrs_refresh_rate_type index ; long tmp ; long tmp___0 ; struct drm_crtc const *__mptr ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; long tmp___5 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dig_port = (struct intel_digital_port *)0; intel_dp = dev_priv->drrs.dp; config = (struct intel_crtc_state *)0; intel_crtc = (struct intel_crtc *)0; index = 0; if (refresh_rate <= 0) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dp_set_drrs_state", "Refresh rate should be positive non-zero.\n"); } else { } return; } else { } if ((unsigned long )intel_dp == (unsigned long )((struct intel_dp *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dp_set_drrs_state", "DRRS not supported.\n"); } else { } return; } else { } dig_port = dp_to_dig_port(intel_dp); encoder = & dig_port->base; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; if ((unsigned long )intel_crtc == (unsigned long )((struct intel_crtc *)0)) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_set_drrs_state", "DRRS: intel_crtc not initialized\n"); } else { } return; } else { } config = intel_crtc->config; if ((unsigned int )dev_priv->drrs.type <= 1U) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_dp_set_drrs_state", "Only Seamless DRRS supported.\n"); } else { } return; } else { } if (((intel_dp->attached_connector)->panel.downclock_mode)->vrefresh == refresh_rate) { index = 1; } else { } if ((unsigned int )dev_priv->drrs.refresh_rate_type == (unsigned int )index) { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_dp_set_drrs_state", "DRRS requested for previously set RR...ignoring\n"); } else { } return; } else { } if (! intel_crtc->active) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_dp_set_drrs_state", "eDP encoder disabled. CRTC not Active\n"); } else { } return; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) > 7U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { goto _L___0; } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) != 8U) { _L___0: /* CIL Label */ switch ((unsigned int )index) { case 0U: intel_dp_set_m_n(intel_crtc, 0); goto ldv_50894; case 1U: intel_dp_set_m_n(intel_crtc, 1); goto ldv_50894; case 2U: ; default: drm_err("Unsupported refreshrate type\n"); } ldv_50894: ; } else { goto _L; } } } else { _L: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 6U) { reg = ((unsigned int )(dev_priv->info.pipe_offsets[(unsigned int )(intel_crtc->config)->cpu_transcoder] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U; val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); if ((unsigned int )index != 0U) { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { val = val | 16384U; } else { val = val | 1048576U; } } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { val = val & 4294950911U; } else { val = val & 4293918719U; } } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); } else { } } dev_priv->drrs.refresh_rate_type = index; tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_dp_set_drrs_state", "eDP Refresh Rate set to : %dHz\n", refresh_rate); } else { } return; } } void intel_edp_drrs_enable(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; struct intel_digital_port *dig_port ; struct intel_digital_port *tmp___0 ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; long tmp___1 ; int __ret_warn_on ; long tmp___2 ; long tmp___3 ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = dp_to_dig_port(intel_dp); dig_port = tmp___0; crtc = dig_port->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; if (! (intel_crtc->config)->has_drrs) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_edp_drrs_enable", "Panel doesn\'t support DRRS\n"); } else { } return; } else { } mutex_lock_nested(& dev_priv->drrs.mutex, 0U); __ret_warn_on = (unsigned long )dev_priv->drrs.dp != (unsigned long )((struct intel_dp *)0); tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 5384, "WARN_ON(dev_priv->drrs.dp)"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { drm_err("DRRS already enabled\n"); goto unlock; } else { } dev_priv->drrs.busy_frontbuffer_bits = 0U; dev_priv->drrs.dp = intel_dp; unlock: mutex_unlock(& dev_priv->drrs.mutex); return; } } void intel_edp_drrs_disable(struct intel_dp *intel_dp ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; struct intel_digital_port *dig_port ; struct intel_digital_port *tmp___0 ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; { tmp = intel_dp_to_dev(intel_dp); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = dp_to_dig_port(intel_dp); dig_port = tmp___0; crtc = dig_port->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; if (! (intel_crtc->config)->has_drrs) { return; } else { } mutex_lock_nested(& dev_priv->drrs.mutex, 0U); if ((unsigned long )dev_priv->drrs.dp == (unsigned long )((struct intel_dp *)0)) { mutex_unlock(& dev_priv->drrs.mutex); return; } else { } if ((unsigned int )dev_priv->drrs.refresh_rate_type == 1U) { intel_dp_set_drrs_state(dev_priv->dev, ((intel_dp->attached_connector)->panel.fixed_mode)->vrefresh); } else { } dev_priv->drrs.dp = (struct intel_dp *)0; mutex_unlock(& dev_priv->drrs.mutex); ldv_cancel_delayed_work_sync_875(& dev_priv->drrs.work); return; } } static void intel_edp_drrs_downclock_work(struct work_struct *work ) { struct drm_i915_private *dev_priv ; struct work_struct const *__mptr ; struct intel_dp *intel_dp ; { __mptr = (struct work_struct const *)work; dev_priv = (struct drm_i915_private *)__mptr + 0xffffffffffff6050UL; mutex_lock_nested(& dev_priv->drrs.mutex, 0U); intel_dp = dev_priv->drrs.dp; if ((unsigned long )intel_dp == (unsigned long )((struct intel_dp *)0)) { goto unlock; } else { } if (dev_priv->drrs.busy_frontbuffer_bits != 0U) { goto unlock; } else { } if ((unsigned int )dev_priv->drrs.refresh_rate_type != 1U) { intel_dp_set_drrs_state(dev_priv->dev, ((intel_dp->attached_connector)->panel.downclock_mode)->vrefresh); } else { } unlock: mutex_unlock(& dev_priv->drrs.mutex); return; } } void intel_edp_drrs_invalidate(struct drm_device *dev , unsigned int frontbuffer_bits ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; enum pipe pipe ; struct intel_digital_port *tmp ; struct drm_crtc const *__mptr ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned int )dev_priv->drrs.type == 0U) { return; } else { } ldv_cancel_delayed_work_876(& dev_priv->drrs.work); mutex_lock_nested(& dev_priv->drrs.mutex, 0U); if ((unsigned long )dev_priv->drrs.dp == (unsigned long )((struct intel_dp *)0)) { mutex_unlock(& dev_priv->drrs.mutex); return; } else { } tmp = dp_to_dig_port(dev_priv->drrs.dp); crtc = tmp->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; if ((unsigned int )dev_priv->drrs.refresh_rate_type == 1U) { intel_dp_set_drrs_state(dev_priv->dev, (((dev_priv->drrs.dp)->attached_connector)->panel.fixed_mode)->vrefresh); } else { } frontbuffer_bits = (unsigned int )(15 << (int )pipe * 4) & frontbuffer_bits; dev_priv->drrs.busy_frontbuffer_bits = dev_priv->drrs.busy_frontbuffer_bits | frontbuffer_bits; mutex_unlock(& dev_priv->drrs.mutex); return; } } void intel_edp_drrs_flush(struct drm_device *dev , unsigned int frontbuffer_bits ) { struct drm_i915_private *dev_priv ; struct drm_crtc *crtc ; enum pipe pipe ; struct intel_digital_port *tmp ; struct drm_crtc const *__mptr ; unsigned long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned int )dev_priv->drrs.type == 0U) { return; } else { } ldv_cancel_delayed_work_877(& dev_priv->drrs.work); mutex_lock_nested(& dev_priv->drrs.mutex, 0U); if ((unsigned long )dev_priv->drrs.dp == (unsigned long )((struct intel_dp *)0)) { mutex_unlock(& dev_priv->drrs.mutex); return; } else { } tmp = dp_to_dig_port(dev_priv->drrs.dp); crtc = tmp->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; pipe = ((struct intel_crtc *)__mptr)->pipe; dev_priv->drrs.busy_frontbuffer_bits = dev_priv->drrs.busy_frontbuffer_bits & ~ frontbuffer_bits; if ((unsigned int )dev_priv->drrs.refresh_rate_type != 1U && dev_priv->drrs.busy_frontbuffer_bits == 0U) { tmp___0 = msecs_to_jiffies(1000U); schedule_delayed_work___2(& dev_priv->drrs.work, tmp___0); } else { } mutex_unlock(& dev_priv->drrs.mutex); return; } } static struct drm_display_mode *intel_dp_drrs_init(struct intel_connector *intel_connector , struct drm_display_mode *fixed_mode ) { struct drm_connector *connector ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_display_mode *downclock_mode ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; long tmp ; struct drm_i915_private *__p ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { connector = & intel_connector->base; dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; downclock_mode = (struct drm_display_mode *)0; __init_work(& dev_priv->drrs.work.work, 0); __constr_expr_0___0.counter = 137438953408L; dev_priv->drrs.work.work.data = __constr_expr_0___0; lockdep_init_map(& dev_priv->drrs.work.work.lockdep_map, "(&(&dev_priv->drrs.work)->work)", & __key, 0); INIT_LIST_HEAD(& dev_priv->drrs.work.work.entry); dev_priv->drrs.work.work.func = & intel_edp_drrs_downclock_work; init_timer_key(& dev_priv->drrs.work.timer, 2097152U, "(&(&dev_priv->drrs.work)->timer)", & __key___0); dev_priv->drrs.work.timer.function = & delayed_work_timer_fn; dev_priv->drrs.work.timer.data = (unsigned long )(& dev_priv->drrs.work); __mutex_init(& dev_priv->drrs.mutex, "&dev_priv->drrs.mutex", & __key___1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 6U) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dp_drrs_init", "DRRS supported for Gen7 and above\n"); } else { } return ((struct drm_display_mode *)0); } else { } if ((unsigned int )dev_priv->vbt.drrs_type != 2U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dp_drrs_init", "VBT doesn\'t support DRRS\n"); } else { } return ((struct drm_display_mode *)0); } else { } downclock_mode = intel_find_panel_downclock(dev, fixed_mode, connector); if ((unsigned long )downclock_mode == (unsigned long )((struct drm_display_mode *)0)) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dp_drrs_init", "Downclock mode is not found. DRRS not supported\n"); } else { } return ((struct drm_display_mode *)0); } else { } dev_priv->drrs.type = dev_priv->vbt.drrs_type; dev_priv->drrs.refresh_rate_type = 0; tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_dp_drrs_init", "seamless DRRS supported for eDP panel.\n"); } else { } return (downclock_mode); } } static bool intel_edp_init_connector(struct intel_dp *intel_dp , struct intel_connector *intel_connector ) { struct drm_connector *connector ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_encoder *intel_encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_display_mode *fixed_mode ; struct drm_display_mode *downclock_mode ; bool has_dpcd ; struct drm_display_mode *scan ; struct edid *edid ; enum pipe pipe ; bool tmp___0 ; int tmp___1 ; void *tmp___2 ; int tmp___3 ; void *tmp___4 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; long tmp___5 ; struct drm_i915_private *__p___1 ; { connector = & intel_connector->base; tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; intel_encoder = & intel_dig_port->base; dev = intel_encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; fixed_mode = (struct drm_display_mode *)0; downclock_mode = (struct drm_display_mode *)0; pipe = -1; tmp___0 = is_edp(intel_dp); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (1); } else { } pps_lock(intel_dp); intel_edp_panel_vdd_sanitize(intel_dp); pps_unlock(intel_dp); has_dpcd = intel_dp_get_dpcd(intel_dp); if ((int )has_dpcd) { if ((unsigned int )intel_dp->dpcd[0] > 16U) { dev_priv->no_aux_handshake = ((int )intel_dp->dpcd[3] & 64) != 0; } else { } } else { printk("\016[drm] failed to retrieve link info, disabling eDP\n"); return (0); } pps_lock(intel_dp); intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); pps_unlock(intel_dp); mutex_lock_nested(& dev->mode_config.mutex, 0U); edid = drm_get_edid(connector, & intel_dp->aux.ddc); if ((unsigned long )edid != (unsigned long )((struct edid *)0)) { tmp___3 = drm_add_edid_modes(connector, edid); if (tmp___3 != 0) { drm_mode_connector_update_edid_property(connector, (struct edid const *)edid); drm_edid_to_eld(connector, edid); } else { kfree((void const *)edid); tmp___2 = ERR_PTR(-22L); edid = (struct edid *)tmp___2; } } else { tmp___4 = ERR_PTR(-2L); edid = (struct edid *)tmp___4; } intel_connector->edid = edid; __mptr = (struct list_head const *)connector->probed_modes.next; scan = (struct drm_display_mode *)__mptr; goto ldv_51006; ldv_51005: ; if ((scan->type & 8U) != 0U) { fixed_mode = drm_mode_duplicate(dev, (struct drm_display_mode const *)scan); downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); goto ldv_51004; } else { } __mptr___0 = (struct list_head const *)scan->head.next; scan = (struct drm_display_mode *)__mptr___0; ldv_51006: ; if ((unsigned long )(& scan->head) != (unsigned long )(& connector->probed_modes)) { goto ldv_51005; } else { } ldv_51004: ; if ((unsigned long )fixed_mode == (unsigned long )((struct drm_display_mode *)0) && (unsigned long )dev_priv->vbt.lfp_lvds_vbt_mode != (unsigned long )((struct drm_display_mode *)0)) { fixed_mode = drm_mode_duplicate(dev, (struct drm_display_mode const *)dev_priv->vbt.lfp_lvds_vbt_mode); if ((unsigned long )fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { fixed_mode->type = fixed_mode->type | 8U; } else { } } else { } mutex_unlock(& dev->mode_config.mutex); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { intel_dp->edp_notifier.notifier_call = & edp_notify_handler; register_reboot_notifier(& intel_dp->edp_notifier); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { pipe = (enum pipe )((intel_dp->DP & 196608U) >> 16); } else { pipe = (enum pipe )((intel_dp->DP & 1073741824U) >> 30); } } else { pipe = (enum pipe )((intel_dp->DP & 1073741824U) >> 30); } if ((int )pipe != 0 && (int )pipe != 1) { pipe = intel_dp->pps_pipe; } else { } if ((int )pipe != 0 && (int )pipe != 1) { pipe = 0; } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_edp_init_connector", "using pipe %c for initial backlight setup\n", (int )pipe + 65); } else { } } else { } intel_panel_init(& intel_connector->panel, fixed_mode, downclock_mode); intel_connector->panel.backlight_power = & intel_edp_backlight_power; intel_panel_setup_backlight(connector, pipe); return (1); } } bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port , struct intel_connector *intel_connector ) { struct drm_connector *connector ; struct intel_dp *intel_dp ; struct intel_encoder *intel_encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; int type ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; bool tmp ; int __ret_warn_on ; struct drm_i915_private *__p___6 ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0___0 ; struct lock_class_key __key___0 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; bool tmp___5 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; struct drm_i915_private *__p___12 ; bool tmp___6 ; bool tmp___7 ; int tmp___8 ; u32 temp ; uint32_t tmp___9 ; struct drm_i915_private *__p___13 ; struct drm_i915_private *__p___14 ; { connector = & intel_connector->base; intel_dp = & intel_dig_port->dp; intel_encoder = & intel_dig_port->base; dev = intel_encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; port = intel_dig_port->port; intel_dp->pps_pipe = -1; __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) > 8U) { intel_dp->get_aux_clock_divider = & skl_get_aux_clock_divider; } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { intel_dp->get_aux_clock_divider = & vlv_get_aux_clock_divider; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { intel_dp->get_aux_clock_divider = & hsw_get_aux_clock_divider; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { intel_dp->get_aux_clock_divider = & hsw_get_aux_clock_divider; } else { goto _L; } } else { _L: /* CIL Label */ __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { intel_dp->get_aux_clock_divider = & ilk_get_aux_clock_divider; } else { intel_dp->get_aux_clock_divider = & i9xx_get_aux_clock_divider; } } } } } __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) > 8U) { intel_dp->get_aux_send_ctl = & skl_get_aux_send_ctl; } else { intel_dp->get_aux_send_ctl = & i9xx_get_aux_send_ctl; } intel_dp->DP = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dp->output_reg, 1); intel_dp->attached_connector = intel_connector; tmp = intel_dp_is_edp(dev, port); if ((int )tmp) { type = 14; } else { type = 10; } if (type == 14) { intel_encoder->type = 8; } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 45UL) != 0U) { tmp___0 = is_edp(intel_dp); if ((int )tmp___0) { if ((unsigned int )port != 1U) { if ((unsigned int )port != 2U) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } } else { tmp___1 = 0; } } else { tmp___1 = 0; } __ret_warn_on = tmp___1; tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c", 5788, "WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) && port != PORT_B && port != PORT_C)"); } else { } tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { return (0); } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_dp_init_connector", "Adding %s connector on port %c\n", type == 14 ? (char *)"eDP" : (char *)"DP", (unsigned int )port + 65U); } else { } drm_connector_init(dev, connector, & intel_dp_connector_funcs, type); drm_connector_helper_add(connector, & intel_dp_connector_helper_funcs); connector->interlace_allowed = 1; connector->doublescan_allowed = 0; __init_work(& intel_dp->panel_vdd_work.work, 0); __constr_expr_0___0.counter = 137438953408L; intel_dp->panel_vdd_work.work.data = __constr_expr_0___0; lockdep_init_map(& intel_dp->panel_vdd_work.work.lockdep_map, "(&(&intel_dp->panel_vdd_work)->work)", & __key, 0); INIT_LIST_HEAD(& intel_dp->panel_vdd_work.work.entry); intel_dp->panel_vdd_work.work.func = & edp_panel_vdd_work; init_timer_key(& intel_dp->panel_vdd_work.timer, 2097152U, "(&(&intel_dp->panel_vdd_work)->timer)", & __key___0); intel_dp->panel_vdd_work.timer.function = & delayed_work_timer_fn; intel_dp->panel_vdd_work.timer.data = (unsigned long )(& intel_dp->panel_vdd_work); intel_connector_attach_encoder(intel_connector, intel_encoder); drm_connector_register(connector); __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 46UL) != 0U) { intel_connector->get_hw_state = & intel_ddi_connector_get_hw_state; } else { intel_connector->get_hw_state = & intel_connector_get_hw_state; } intel_connector->unregister = & intel_dp_connector_unregister; switch ((unsigned int )port) { case 0U: intel_encoder->hpd_pin = 0; goto ldv_51098; case 1U: intel_encoder->hpd_pin = 4; goto ldv_51098; case 2U: intel_encoder->hpd_pin = 5; goto ldv_51098; case 3U: intel_encoder->hpd_pin = 6; goto ldv_51098; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dp.c"), "i" (5828), "i" (12UL)); ldv_51103: ; goto ldv_51103; } ldv_51098: tmp___5 = is_edp(intel_dp); if ((int )tmp___5) { pps_lock(intel_dp); intel_dp_init_panel_power_timestamps(intel_dp); __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 45UL) != 0U) { vlv_initial_power_sequencer_setup(intel_dp); } else { intel_dp_init_panel_power_sequencer(dev, intel_dp); } pps_unlock(intel_dp); } else { } intel_dp_aux_init(intel_dp, intel_connector); __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 45UL) != 0U) { goto _L___0; } else { __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 45UL) == 0U) { __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___11->info.gen) == 8U) { goto _L___0; } else { goto _L___1; } } else { _L___1: /* CIL Label */ __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___12->info.gen) > 8U) { _L___0: /* CIL Label */ if (((unsigned int )port == 1U || (unsigned int )port == 2U) || (unsigned int )port == 3U) { intel_dp_mst_encoder_init(intel_dig_port, (int )intel_connector->base.base.id); } else { } } else { } } } tmp___7 = intel_edp_init_connector(intel_dp, intel_connector); if (tmp___7) { tmp___8 = 0; } else { tmp___8 = 1; } if (tmp___8) { drm_dp_aux_unregister(& intel_dp->aux); tmp___6 = is_edp(intel_dp); if ((int )tmp___6) { ldv_cancel_delayed_work_sync_878(& intel_dp->panel_vdd_work); pps_lock(intel_dp); edp_panel_vdd_off_sync(intel_dp); pps_unlock(intel_dp); } else { } drm_connector_unregister(connector); drm_connector_cleanup(connector); return (0); } else { } intel_dp_add_properties(intel_dp, connector); __p___13 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___13 + 44UL) != 0U) { __p___14 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___14->info.device_id) != 10818U) { tmp___9 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 85352L, 1); temp = tmp___9; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 85352L, (temp & 4294967280U) | 13U, 1); } else { } } else { } i915_debugfs_connector_add(connector); return (1); } } void intel_dp_init(struct drm_device *dev , int output_reg , enum port port ) { struct drm_i915_private *dev_priv ; struct intel_digital_port *intel_dig_port ; struct intel_encoder *intel_encoder ; struct drm_encoder *encoder ; struct intel_connector *intel_connector ; void *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; bool tmp___0 ; int tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = kzalloc(4744UL, 208U); intel_dig_port = (struct intel_digital_port *)tmp; if ((unsigned long )intel_dig_port == (unsigned long )((struct intel_digital_port *)0)) { return; } else { } intel_connector = intel_connector_alloc(); if ((unsigned long )intel_connector == (unsigned long )((struct intel_connector *)0)) { kfree((void const *)intel_dig_port); return; } else { } intel_encoder = & intel_dig_port->base; encoder = & intel_encoder->base; drm_encoder_init(dev, & intel_encoder->base, & intel_dp_enc_funcs, 2); intel_encoder->compute_config = & intel_dp_compute_config; intel_encoder->disable = & intel_disable_dp; intel_encoder->get_hw_state = & intel_dp_get_hw_state; intel_encoder->get_config = & intel_dp_get_config; intel_encoder->suspend = & intel_dp_encoder_suspend; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { intel_encoder->pre_pll_enable = & chv_dp_pre_pll_enable; intel_encoder->pre_enable = & chv_pre_enable_dp; intel_encoder->enable = & vlv_enable_dp; intel_encoder->post_disable = & chv_post_disable_dp; } else { goto _L; } } else { _L: /* CIL Label */ __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { intel_encoder->pre_pll_enable = & vlv_dp_pre_pll_enable; intel_encoder->pre_enable = & vlv_pre_enable_dp; intel_encoder->enable = & vlv_enable_dp; intel_encoder->post_disable = & vlv_post_disable_dp; } else { intel_encoder->pre_enable = & g4x_pre_enable_dp; intel_encoder->enable = & g4x_enable_dp; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 4U) { intel_encoder->post_disable = & ilk_post_disable_dp; } else { } } } intel_dig_port->port = port; intel_dig_port->dp.output_reg = (uint32_t )output_reg; intel_encoder->type = 7; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 8U) { if ((unsigned int )port == 3U) { intel_encoder->crtc_mask = 4; } else { intel_encoder->crtc_mask = 3; } } else { intel_encoder->crtc_mask = 7; } } else { intel_encoder->crtc_mask = 7; } intel_encoder->cloneable = 0U; intel_encoder->hot_plug = & intel_dp_hot_plug; intel_dig_port->hpd_pulse = & intel_dp_hpd_pulse; dev_priv->hpd_irq_port[(unsigned int )port] = intel_dig_port; tmp___0 = intel_dp_init_connector(intel_dig_port, intel_connector); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { drm_encoder_cleanup(encoder); kfree((void const *)intel_dig_port); kfree((void const *)intel_connector); } else { } return; } } void intel_dp_mst_suspend(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int i ; struct intel_digital_port *intel_dig_port ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_51201; ldv_51200: intel_dig_port = dev_priv->hpd_irq_port[i]; if ((unsigned long )intel_dig_port == (unsigned long )((struct intel_digital_port *)0)) { goto ldv_51199; } else { } if ((unsigned int )intel_dig_port->base.type == 7U) { if (! intel_dig_port->dp.can_mst) { goto ldv_51199; } else { } if ((int )intel_dig_port->dp.is_mst) { drm_dp_mst_topology_mgr_suspend(& intel_dig_port->dp.mst_mgr); } else { } } else { } ldv_51199: i = i + 1; ldv_51201: ; if (i <= 4) { goto ldv_51200; } else { } return; } } void intel_dp_mst_resume(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int i ; struct intel_digital_port *intel_dig_port ; int ret ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i = 0; goto ldv_51212; ldv_51211: intel_dig_port = dev_priv->hpd_irq_port[i]; if ((unsigned long )intel_dig_port == (unsigned long )((struct intel_digital_port *)0)) { goto ldv_51209; } else { } if ((unsigned int )intel_dig_port->base.type == 7U) { if (! intel_dig_port->dp.can_mst) { goto ldv_51209; } else { } ret = drm_dp_mst_topology_mgr_resume(& intel_dig_port->dp.mst_mgr); if (ret != 0) { intel_dp_check_mst_status(& intel_dig_port->dp); } else { } } else { } ldv_51209: i = i + 1; ldv_51212: ; if (i <= 4) { goto ldv_51211; } else { } return; } } extern int ldv_probe_53(void) ; extern int ldv_probe_51(void) ; void invoke_work_19(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_19_0 == 2 || ldv_work_19_0 == 3) { ldv_work_19_0 = 4; intel_edp_drrs_downclock_work(ldv_work_struct_19_0); ldv_work_19_0 = 1; } else { } goto ldv_51224; case 1: ; if (ldv_work_19_1 == 2 || ldv_work_19_1 == 3) { ldv_work_19_1 = 4; intel_edp_drrs_downclock_work(ldv_work_struct_19_0); ldv_work_19_1 = 1; } else { } goto ldv_51224; case 2: ; if (ldv_work_19_2 == 2 || ldv_work_19_2 == 3) { ldv_work_19_2 = 4; intel_edp_drrs_downclock_work(ldv_work_struct_19_0); ldv_work_19_2 = 1; } else { } goto ldv_51224; case 3: ; if (ldv_work_19_3 == 2 || ldv_work_19_3 == 3) { ldv_work_19_3 = 4; intel_edp_drrs_downclock_work(ldv_work_struct_19_0); ldv_work_19_3 = 1; } else { } goto ldv_51224; default: ldv_stop(); } ldv_51224: ; return; } } void disable_work_19(struct work_struct *work ) { { if ((ldv_work_19_0 == 3 || ldv_work_19_0 == 2) && (unsigned long )ldv_work_struct_19_0 == (unsigned long )work) { ldv_work_19_0 = 1; } else { } if ((ldv_work_19_1 == 3 || ldv_work_19_1 == 2) && (unsigned long )ldv_work_struct_19_1 == (unsigned long )work) { ldv_work_19_1 = 1; } else { } if ((ldv_work_19_2 == 3 || ldv_work_19_2 == 2) && (unsigned long )ldv_work_struct_19_2 == (unsigned long )work) { ldv_work_19_2 = 1; } else { } if ((ldv_work_19_3 == 3 || ldv_work_19_3 == 2) && (unsigned long )ldv_work_struct_19_3 == (unsigned long )work) { ldv_work_19_3 = 1; } else { } return; } } void work_init_19(void) { { ldv_work_19_0 = 0; ldv_work_19_1 = 0; ldv_work_19_2 = 0; ldv_work_19_3 = 0; return; } } void call_and_disable_all_19(int state ) { { if (ldv_work_19_0 == state) { call_and_disable_work_19(ldv_work_struct_19_0); } else { } if (ldv_work_19_1 == state) { call_and_disable_work_19(ldv_work_struct_19_1); } else { } if (ldv_work_19_2 == state) { call_and_disable_work_19(ldv_work_struct_19_2); } else { } if (ldv_work_19_3 == state) { call_and_disable_work_19(ldv_work_struct_19_3); } else { } return; } } void ldv_initialize_drm_encoder_funcs_51(void) { void *tmp ; { tmp = ldv_init_zalloc(96UL); intel_dp_enc_funcs_group0 = (struct drm_encoder *)tmp; return; } } void invoke_work_20(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_20_0 == 2 || ldv_work_20_0 == 3) { ldv_work_20_0 = 4; edp_panel_vdd_work(ldv_work_struct_20_0); ldv_work_20_0 = 1; } else { } goto ldv_51247; case 1: ; if (ldv_work_20_1 == 2 || ldv_work_20_1 == 3) { ldv_work_20_1 = 4; edp_panel_vdd_work(ldv_work_struct_20_0); ldv_work_20_1 = 1; } else { } goto ldv_51247; case 2: ; if (ldv_work_20_2 == 2 || ldv_work_20_2 == 3) { ldv_work_20_2 = 4; edp_panel_vdd_work(ldv_work_struct_20_0); ldv_work_20_2 = 1; } else { } goto ldv_51247; case 3: ; if (ldv_work_20_3 == 2 || ldv_work_20_3 == 3) { ldv_work_20_3 = 4; edp_panel_vdd_work(ldv_work_struct_20_0); ldv_work_20_3 = 1; } else { } goto ldv_51247; default: ldv_stop(); } ldv_51247: ; return; } } void activate_work_19(struct work_struct *work , int state ) { { if (ldv_work_19_0 == 0) { ldv_work_struct_19_0 = work; ldv_work_19_0 = state; return; } else { } if (ldv_work_19_1 == 0) { ldv_work_struct_19_1 = work; ldv_work_19_1 = state; return; } else { } if (ldv_work_19_2 == 0) { ldv_work_struct_19_2 = work; ldv_work_19_2 = state; return; } else { } if (ldv_work_19_3 == 0) { ldv_work_struct_19_3 = work; ldv_work_19_3 = state; return; } else { } return; } } void call_and_disable_work_19(struct work_struct *work ) { { if ((ldv_work_19_0 == 2 || ldv_work_19_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_19_0) { intel_edp_drrs_downclock_work(work); ldv_work_19_0 = 1; return; } else { } if ((ldv_work_19_1 == 2 || ldv_work_19_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_19_1) { intel_edp_drrs_downclock_work(work); ldv_work_19_1 = 1; return; } else { } if ((ldv_work_19_2 == 2 || ldv_work_19_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_19_2) { intel_edp_drrs_downclock_work(work); ldv_work_19_2 = 1; return; } else { } if ((ldv_work_19_3 == 2 || ldv_work_19_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_19_3) { intel_edp_drrs_downclock_work(work); ldv_work_19_3 = 1; return; } else { } return; } } void call_and_disable_work_20(struct work_struct *work ) { { if ((ldv_work_20_0 == 2 || ldv_work_20_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_20_0) { edp_panel_vdd_work(work); ldv_work_20_0 = 1; return; } else { } if ((ldv_work_20_1 == 2 || ldv_work_20_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_20_1) { edp_panel_vdd_work(work); ldv_work_20_1 = 1; return; } else { } if ((ldv_work_20_2 == 2 || ldv_work_20_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_20_2) { edp_panel_vdd_work(work); ldv_work_20_2 = 1; return; } else { } if ((ldv_work_20_3 == 2 || ldv_work_20_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_20_3) { edp_panel_vdd_work(work); ldv_work_20_3 = 1; return; } else { } return; } } void ldv_initialize_drm_connector_funcs_53(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(104UL); intel_dp_connector_funcs_group0 = (struct drm_property *)tmp; tmp___0 = ldv_init_zalloc(936UL); intel_dp_connector_funcs_group1 = (struct drm_connector *)tmp___0; return; } } void work_init_20(void) { { ldv_work_20_0 = 0; ldv_work_20_1 = 0; ldv_work_20_2 = 0; ldv_work_20_3 = 0; return; } } void disable_work_20(struct work_struct *work ) { { if ((ldv_work_20_0 == 3 || ldv_work_20_0 == 2) && (unsigned long )ldv_work_struct_20_0 == (unsigned long )work) { ldv_work_20_0 = 1; } else { } if ((ldv_work_20_1 == 3 || ldv_work_20_1 == 2) && (unsigned long )ldv_work_struct_20_1 == (unsigned long )work) { ldv_work_20_1 = 1; } else { } if ((ldv_work_20_2 == 3 || ldv_work_20_2 == 2) && (unsigned long )ldv_work_struct_20_2 == (unsigned long )work) { ldv_work_20_2 = 1; } else { } if ((ldv_work_20_3 == 3 || ldv_work_20_3 == 2) && (unsigned long )ldv_work_struct_20_3 == (unsigned long )work) { ldv_work_20_3 = 1; } else { } return; } } void ldv_initialize_drm_connector_helper_funcs_52(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_dp_connector_helper_funcs_group0 = (struct drm_connector *)tmp; return; } } void activate_work_20(struct work_struct *work , int state ) { { if (ldv_work_20_0 == 0) { ldv_work_struct_20_0 = work; ldv_work_20_0 = state; return; } else { } if (ldv_work_20_1 == 0) { ldv_work_struct_20_1 = work; ldv_work_20_1 = state; return; } else { } if (ldv_work_20_2 == 0) { ldv_work_struct_20_2 = work; ldv_work_20_2 = state; return; } else { } if (ldv_work_20_3 == 0) { ldv_work_struct_20_3 = work; ldv_work_20_3 = state; return; } else { } return; } } void call_and_disable_all_20(int state ) { { if (ldv_work_20_0 == state) { call_and_disable_work_20(ldv_work_struct_20_0); } else { } if (ldv_work_20_1 == state) { call_and_disable_work_20(ldv_work_struct_20_1); } else { } if (ldv_work_20_2 == state) { call_and_disable_work_20(ldv_work_struct_20_2); } else { } if (ldv_work_20_3 == state) { call_and_disable_work_20(ldv_work_struct_20_3); } else { } return; } } void ldv_main_exported_52(void) { struct drm_display_mode *ldvarg384 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(208UL); ldvarg384 = (struct drm_display_mode *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_52 == 1) { intel_dp_get_modes(intel_dp_connector_helper_funcs_group0); ldv_state_variable_52 = 1; } else { } goto ldv_51290; case 1: ; if (ldv_state_variable_52 == 1) { intel_dp_mode_valid(intel_dp_connector_helper_funcs_group0, ldvarg384); ldv_state_variable_52 = 1; } else { } goto ldv_51290; case 2: ; if (ldv_state_variable_52 == 1) { intel_best_encoder(intel_dp_connector_helper_funcs_group0); ldv_state_variable_52 = 1; } else { } goto ldv_51290; default: ldv_stop(); } ldv_51290: ; return; } } void ldv_main_exported_53(void) { uint32_t ldvarg513 ; bool ldvarg516 ; uint64_t *ldvarg512 ; void *tmp ; int ldvarg518 ; struct drm_connector_state *ldvarg517 ; void *tmp___0 ; struct drm_connector_state *ldvarg511 ; void *tmp___1 ; uint32_t ldvarg514 ; uint64_t ldvarg515 ; int tmp___2 ; { tmp = ldv_init_zalloc(8UL); ldvarg512 = (uint64_t *)tmp; tmp___0 = ldv_init_zalloc(32UL); ldvarg517 = (struct drm_connector_state *)tmp___0; tmp___1 = ldv_init_zalloc(32UL); ldvarg511 = (struct drm_connector_state *)tmp___1; ldv_memset((void *)(& ldvarg513), 0, 4UL); ldv_memset((void *)(& ldvarg516), 0, 1UL); ldv_memset((void *)(& ldvarg518), 0, 4UL); ldv_memset((void *)(& ldvarg514), 0, 4UL); ldv_memset((void *)(& ldvarg515), 0, 8UL); tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_53 == 2) { intel_connector_dpms(intel_dp_connector_funcs_group1, ldvarg518); ldv_state_variable_53 = 2; } else { } if (ldv_state_variable_53 == 1) { intel_connector_dpms(intel_dp_connector_funcs_group1, ldvarg518); ldv_state_variable_53 = 1; } else { } goto ldv_51306; case 1: ; if (ldv_state_variable_53 == 2) { drm_atomic_helper_connector_destroy_state(intel_dp_connector_funcs_group1, ldvarg517); ldv_state_variable_53 = 2; } else { } if (ldv_state_variable_53 == 1) { drm_atomic_helper_connector_destroy_state(intel_dp_connector_funcs_group1, ldvarg517); ldv_state_variable_53 = 1; } else { } goto ldv_51306; case 2: ; if (ldv_state_variable_53 == 2) { drm_atomic_helper_connector_duplicate_state(intel_dp_connector_funcs_group1); ldv_state_variable_53 = 2; } else { } if (ldv_state_variable_53 == 1) { drm_atomic_helper_connector_duplicate_state(intel_dp_connector_funcs_group1); ldv_state_variable_53 = 1; } else { } goto ldv_51306; case 3: ; if (ldv_state_variable_53 == 2) { intel_dp_force(intel_dp_connector_funcs_group1); ldv_state_variable_53 = 2; } else { } if (ldv_state_variable_53 == 1) { intel_dp_force(intel_dp_connector_funcs_group1); ldv_state_variable_53 = 1; } else { } goto ldv_51306; case 4: ; if (ldv_state_variable_53 == 2) { intel_dp_detect(intel_dp_connector_funcs_group1, (int )ldvarg516); ldv_state_variable_53 = 2; } else { } if (ldv_state_variable_53 == 1) { intel_dp_detect(intel_dp_connector_funcs_group1, (int )ldvarg516); ldv_state_variable_53 = 1; } else { } goto ldv_51306; case 5: ; if (ldv_state_variable_53 == 2) { intel_dp_set_property(intel_dp_connector_funcs_group1, intel_dp_connector_funcs_group0, ldvarg515); ldv_state_variable_53 = 2; } else { } if (ldv_state_variable_53 == 1) { intel_dp_set_property(intel_dp_connector_funcs_group1, intel_dp_connector_funcs_group0, ldvarg515); ldv_state_variable_53 = 1; } else { } goto ldv_51306; case 6: ; if (ldv_state_variable_53 == 2) { intel_dp_connector_destroy(intel_dp_connector_funcs_group1); ldv_state_variable_53 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51306; case 7: ; if (ldv_state_variable_53 == 2) { drm_helper_probe_single_connector_modes(intel_dp_connector_funcs_group1, ldvarg514, ldvarg513); ldv_state_variable_53 = 2; } else { } if (ldv_state_variable_53 == 1) { drm_helper_probe_single_connector_modes(intel_dp_connector_funcs_group1, ldvarg514, ldvarg513); ldv_state_variable_53 = 1; } else { } goto ldv_51306; case 8: ; if (ldv_state_variable_53 == 2) { intel_connector_atomic_get_property(intel_dp_connector_funcs_group1, (struct drm_connector_state const *)ldvarg511, intel_dp_connector_funcs_group0, ldvarg512); ldv_state_variable_53 = 2; } else { } if (ldv_state_variable_53 == 1) { intel_connector_atomic_get_property(intel_dp_connector_funcs_group1, (struct drm_connector_state const *)ldvarg511, intel_dp_connector_funcs_group0, ldvarg512); ldv_state_variable_53 = 1; } else { } goto ldv_51306; case 9: ; if (ldv_state_variable_53 == 1) { ldv_probe_53(); ldv_state_variable_53 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_51306; default: ldv_stop(); } ldv_51306: ; return; } } void ldv_main_exported_51(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_51 == 2) { intel_dp_encoder_destroy(intel_dp_enc_funcs_group0); ldv_state_variable_51 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_51321; case 1: ; if (ldv_state_variable_51 == 2) { intel_dp_encoder_reset(intel_dp_enc_funcs_group0); ldv_state_variable_51 = 2; } else { } if (ldv_state_variable_51 == 1) { intel_dp_encoder_reset(intel_dp_enc_funcs_group0); ldv_state_variable_51 = 1; } else { } goto ldv_51321; case 2: ; if (ldv_state_variable_51 == 1) { ldv_probe_51(); ldv_state_variable_51 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_51321; default: ldv_stop(); } ldv_51321: ; return; } } bool ldv_queue_work_on_867(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_868(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_869(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_870(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_871(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_cancel_delayed_work_872(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_cancel_delayed_work_sync_873(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_cancel_delayed_work_sync_874(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_cancel_delayed_work_sync_875(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___6 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_cancel_delayed_work_876(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___7 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_cancel_delayed_work_877(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___8 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } bool ldv_cancel_delayed_work_sync_878(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___11 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___22(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static bool IS_ERR_OR_NULL(void const *ptr ) ; bool ldv_queue_work_on_895(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_897(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_896(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_899(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_898(struct workqueue_struct *ldv_func_arg1 ) ; extern void drm_reinit_primary_mode_group(struct drm_device * ) ; extern int drm_mode_connector_set_path_property(struct drm_connector * , char const * ) ; __inline static bool drm_can_sleep___13(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; default: __bad_percpu_size(); } ldv_39629: pscr_ret__ = pfo_ret__; goto ldv_39635; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; default: __bad_percpu_size(); } ldv_39639: pscr_ret__ = pfo_ret_____0; goto ldv_39635; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; default: __bad_percpu_size(); } ldv_39648: pscr_ret__ = pfo_ret_____1; goto ldv_39635; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; default: __bad_percpu_size(); } ldv_39657: pscr_ret__ = pfo_ret_____2; goto ldv_39635; default: __bad_size_call_parameter(); goto ldv_39635; } ldv_39635: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___22(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } extern int drm_fb_helper_add_one_connector(struct drm_fb_helper * , struct drm_connector * ) ; extern int drm_fb_helper_remove_one_connector(struct drm_fb_helper * , struct drm_connector * ) ; extern int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr * , struct device * , struct drm_dp_aux * , int , int , int ) ; extern void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr * ) ; extern enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector * , struct drm_dp_mst_topology_mgr * , struct drm_dp_mst_port * ) ; extern struct edid *drm_dp_mst_get_edid(struct drm_connector * , struct drm_dp_mst_topology_mgr * , struct drm_dp_mst_port * ) ; extern int drm_dp_calc_pbn_mode(int , int ) ; extern bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr * , struct drm_dp_mst_port * , int , int * ) ; extern void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr * , struct drm_dp_mst_port * ) ; extern void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr * , struct drm_dp_mst_port * ) ; extern int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr * , int ) ; extern int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr * ) ; extern int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr * ) ; extern int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr * ) ; static bool intel_dp_mst_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct intel_dp_mst_encoder *intel_mst ; struct intel_dp_mst_encoder *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_dp *intel_dp ; struct drm_atomic_state *state ; int bpp ; int i ; int lane_count ; int slots ; int rate ; struct drm_display_mode *adjusted_mode ; struct drm_connector *drm_connector ; struct intel_connector *connector ; struct intel_connector *found ; struct drm_connector_state *connector_state ; int mst_pbn ; u8 tmp___0 ; int tmp___1 ; struct drm_connector const *__mptr ; { tmp = enc_to_mst(& encoder->base); intel_mst = tmp; intel_dig_port = intel_mst->primary; intel_dp = & intel_dig_port->dp; adjusted_mode = & pipe_config->base.adjusted_mode; found = (struct intel_connector *)0; pipe_config->dp_encoder_is_mst = 1; pipe_config->has_pch_encoder = 0; pipe_config->has_dp_encoder = 1; bpp = 24; tmp___0 = drm_dp_max_lane_count((u8 const *)(& intel_dp->dpcd)); lane_count = (int )tmp___0; rate = intel_dp_max_link_rate(intel_dp); if ((unsigned int )intel_dp->num_sink_rates != 0U) { intel_dp->link_bw = 0U; tmp___1 = intel_dp_rate_select(intel_dp, rate); intel_dp->rate_select = (uint8_t )tmp___1; } else { intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate); intel_dp->rate_select = 0U; } intel_dp->lane_count = (uint8_t )lane_count; pipe_config->pipe_bpp = 24; pipe_config->port_clock = rate; state = pipe_config->base.state; i = 0; goto ldv_48277; ldv_48276: ; if ((unsigned long )drm_connector != (unsigned long )((struct drm_connector *)0)) { __mptr = (struct drm_connector const *)drm_connector; connector = (struct intel_connector *)__mptr; if ((unsigned long )connector_state->best_encoder == (unsigned long )(& encoder->base)) { found = connector; goto ldv_48275; } else { } } else { } i = i + 1; ldv_48277: ; if (state->num_connector > i) { drm_connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_48276; } else { } ldv_48275: ; if ((unsigned long )found == (unsigned long )((struct intel_connector *)0)) { drm_err("can\'t find connector\n"); return (0); } else { } mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp); pipe_config->pbn = mst_pbn; slots = drm_dp_find_vcpi_slots(& intel_dp->mst_mgr, mst_pbn); intel_link_compute_m_n(bpp, lane_count, adjusted_mode->crtc_clock, pipe_config->port_clock, & pipe_config->dp_m_n); pipe_config->dp_m_n.tu = (uint32_t )slots; return (1); } } static void intel_mst_disable_dp(struct intel_encoder *encoder ) { struct intel_dp_mst_encoder *intel_mst ; struct intel_dp_mst_encoder *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_dp *intel_dp ; int ret ; long tmp___0 ; { tmp = enc_to_mst(& encoder->base); intel_mst = tmp; intel_dig_port = intel_mst->primary; intel_dp = & intel_dig_port->dp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_mst_disable_dp", "%d\n", intel_dp->active_mst_links); } else { } drm_dp_mst_reset_vcpi_slots(& intel_dp->mst_mgr, (struct drm_dp_mst_port *)intel_mst->port); ret = drm_dp_update_payload_part1(& intel_dp->mst_mgr); if (ret != 0) { drm_err("failed to update payload %d\n", ret); } else { } return; } } static void intel_mst_post_disable_dp(struct intel_encoder *encoder ) { struct intel_dp_mst_encoder *intel_mst ; struct intel_dp_mst_encoder *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_dp *intel_dp ; long tmp___0 ; { tmp = enc_to_mst(& encoder->base); intel_mst = tmp; intel_dig_port = intel_mst->primary; intel_dp = & intel_dig_port->dp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_mst_post_disable_dp", "%d\n", intel_dp->active_mst_links); } else { } drm_dp_check_act_status(& intel_dp->mst_mgr); drm_dp_update_payload_part2(& intel_dp->mst_mgr); drm_dp_mst_deallocate_vcpi(& intel_dp->mst_mgr, (struct drm_dp_mst_port *)intel_mst->port); intel_dp->active_mst_links = intel_dp->active_mst_links - 1; intel_mst->port = (void *)0; if (intel_dp->active_mst_links == 0) { (*(intel_dig_port->base.post_disable))(& intel_dig_port->base); intel_dp_sink_dpms(intel_dp, 3); } else { } return; } } static void intel_mst_pre_enable_dp(struct intel_encoder *encoder ) { struct intel_dp_mst_encoder *intel_mst ; struct intel_dp_mst_encoder *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_dp *intel_dp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; int ret ; uint32_t temp ; struct intel_connector *found ; struct intel_connector *connector ; int slots ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; long tmp___0 ; enum port port___0 ; enum port tmp___1 ; struct drm_i915_private *__p ; bool tmp___2 ; { tmp = enc_to_mst(& encoder->base); intel_mst = tmp; intel_dig_port = intel_mst->primary; intel_dp = & intel_dig_port->dp; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; port = intel_dig_port->port; found = (struct intel_connector *)0; crtc = encoder->base.crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; __mptr___0 = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; goto ldv_48317; ldv_48316: ; if ((unsigned long )(connector->base.state)->best_encoder == (unsigned long )(& encoder->base)) { found = connector; goto ldv_48315; } else { } __mptr___1 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_48317: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_48316; } else { } ldv_48315: ; if ((unsigned long )found == (unsigned long )((struct intel_connector *)0)) { drm_err("can\'t find connector\n"); return; } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_mst_pre_enable_dp", "%d\n", intel_dp->active_mst_links); } else { } intel_mst->port = found->port; if (intel_dp->active_mst_links == 0) { tmp___1 = intel_ddi_get_encoder_port(encoder); port___0 = tmp___1; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 8U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )port___0 + 71744U) * 4U), (intel_crtc->config)->ddi_pll_sel, 1); } else { } intel_ddi_init_dp_buf_reg(& intel_dig_port->base); intel_dp_sink_dpms(intel_dp, 0); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); } else { } tmp___2 = drm_dp_mst_allocate_vcpi(& intel_dp->mst_mgr, (struct drm_dp_mst_port *)intel_mst->port, (intel_crtc->config)->pbn, & slots); ret = (int )tmp___2; if (ret == 0) { drm_err("failed to allocate vcpi\n"); return; } else { } intel_dp->active_mst_links = intel_dp->active_mst_links + 1; temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409668U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )port * 256U + 409668U), temp, 1); ret = drm_dp_update_payload_part1(& intel_dp->mst_mgr); return; } } static void intel_mst_enable_dp(struct intel_encoder *encoder ) { struct intel_dp_mst_encoder *intel_mst ; struct intel_dp_mst_encoder *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_dp *intel_dp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; int ret ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; { tmp = enc_to_mst(& encoder->base); intel_mst = tmp; intel_dig_port = intel_mst->primary; intel_dp = & intel_dig_port->dp; dev = intel_dig_port->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; port = intel_dig_port->port; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_mst_enable_dp", "%d\n", intel_dp->active_mst_links); } else { } tmp___1 = msecs_to_jiffies(1U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48347; ldv_48346: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409668U), 1); if ((tmp___2 & 16777216U) == 0U) { ret__ = -110; } else { } goto ldv_48345; } else { } tmp___3 = drm_can_sleep___13(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48347: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )port * 256U + 409668U), 1); if ((tmp___4 & 16777216U) == 0U) { goto ldv_48346; } else { } ldv_48345: ; if (ret__ != 0) { drm_err("Timed out waiting for ACT sent\n"); } else { } ret = drm_dp_check_act_status(& intel_dp->mst_mgr); ret = drm_dp_update_payload_part2(& intel_dp->mst_mgr); return; } } static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct intel_dp_mst_encoder *intel_mst ; struct intel_dp_mst_encoder *tmp ; { tmp = enc_to_mst(& encoder->base); intel_mst = tmp; *pipe = intel_mst->pipe; if ((unsigned long )intel_mst->port != (unsigned long )((void *)0)) { return (1); } else { } return (0); } } static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct intel_dp_mst_encoder *intel_mst ; struct intel_dp_mst_encoder *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum transcoder cpu_transcoder ; u32 temp ; u32 flags ; { tmp = enc_to_mst(& encoder->base); intel_mst = tmp; intel_dig_port = intel_mst->primary; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; cpu_transcoder = pipe_config->cpu_transcoder; flags = 0U; pipe_config->has_dp_encoder = 1; temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 394240U), 1); if ((temp & 65536U) != 0U) { flags = flags | 1U; } else { flags = flags | 2U; } if ((temp & 131072U) != 0U) { flags = flags | 4U; } else { flags = flags | 8U; } switch (temp & 7340032U) { case 2097152U: pipe_config->pipe_bpp = 18; goto ldv_48369; case 0U: pipe_config->pipe_bpp = 24; goto ldv_48369; case 1048576U: pipe_config->pipe_bpp = 30; goto ldv_48369; case 3145728U: pipe_config->pipe_bpp = 36; goto ldv_48369; default: ; goto ldv_48369; } ldv_48369: pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | flags; intel_dp_get_m_n(crtc, pipe_config); intel_ddi_clock_get(& intel_dig_port->base, pipe_config); return; } } static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct intel_dp *intel_dp ; struct edid *edid ; int ret ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; intel_dp = intel_connector->mst_port; edid = drm_dp_mst_get_edid(connector, & intel_dp->mst_mgr, (struct drm_dp_mst_port *)intel_connector->port); if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { return (0); } else { } ret = intel_connector_update_modes(connector, edid); kfree((void const *)edid); return (ret); } } static enum drm_connector_status intel_dp_mst_detect(struct drm_connector *connector , bool force ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct intel_dp *intel_dp ; enum drm_connector_status tmp ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; intel_dp = intel_connector->mst_port; tmp = drm_dp_mst_detect_port(connector, & intel_dp->mst_mgr, (struct drm_dp_mst_port *)intel_connector->port); return (tmp); } } static int intel_dp_mst_set_property(struct drm_connector *connector , struct drm_property *property , uint64_t val ) { { return (0); } } static void intel_dp_mst_connector_destroy(struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; bool tmp ; int tmp___0 ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; tmp = IS_ERR_OR_NULL((void const *)intel_connector->edid); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { kfree((void const *)intel_connector->edid); } else { } drm_connector_cleanup(connector); kfree((void const *)connector); return; } } static struct drm_connector_funcs const intel_dp_mst_connector_funcs = {& intel_connector_dpms, 0, 0, 0, & intel_dp_mst_detect, & drm_helper_probe_single_connector_modes, & intel_dp_mst_set_property, & intel_dp_mst_connector_destroy, 0, & drm_atomic_helper_connector_duplicate_state, & drm_atomic_helper_connector_destroy_state, 0, & intel_connector_atomic_get_property}; static int intel_dp_mst_get_modes(struct drm_connector *connector ) { int tmp ; { tmp = intel_dp_mst_get_ddc_modes(connector); return (tmp); } } static enum drm_mode_status intel_dp_mst_mode_valid(struct drm_connector *connector , struct drm_display_mode *mode ) { { if (mode->clock <= 9999) { return (16); } else { } if ((mode->flags & 4096U) != 0U) { return (3); } else { } return (0); } } static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct intel_dp *intel_dp ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; intel_dp = intel_connector->mst_port; return (& (intel_dp->mst_encoders[0])->base.base); } } static struct drm_connector_helper_funcs const intel_dp_mst_connector_helper_funcs = {& intel_dp_mst_get_modes, & intel_dp_mst_mode_valid, & intel_mst_best_encoder}; static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder ) { struct intel_dp_mst_encoder *intel_mst ; struct intel_dp_mst_encoder *tmp ; { tmp = enc_to_mst(encoder); intel_mst = tmp; drm_encoder_cleanup(encoder); kfree((void const *)intel_mst); return; } } static struct drm_encoder_funcs const intel_dp_mst_enc_funcs = {0, & intel_dp_mst_encoder_destroy}; static bool intel_dp_mst_get_hw_state(struct intel_connector *connector ) { enum pipe pipe ; bool tmp ; int tmp___0 ; { if ((unsigned long )connector->encoder != (unsigned long )((struct intel_encoder *)0)) { tmp = (*((connector->encoder)->get_hw_state))(connector->encoder, & pipe); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } return (1); } else { } return (0); } } static void intel_connector_add_to_fbdev(struct intel_connector *connector ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; { tmp = to_i915((struct drm_device const *)connector->base.dev); dev_priv = tmp; drm_fb_helper_add_one_connector(& (dev_priv->fbdev)->helper, & connector->base); return; } } static void intel_connector_remove_from_fbdev(struct intel_connector *connector ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; { tmp = to_i915((struct drm_device const *)connector->base.dev); dev_priv = tmp; drm_fb_helper_remove_one_connector(& (dev_priv->fbdev)->helper, & connector->base); return; } } static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr , struct drm_dp_mst_port *port , char const *pathprop ) { struct intel_dp *intel_dp ; struct drm_dp_mst_topology_mgr const *__mptr ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct intel_connector *intel_connector ; struct drm_connector *connector ; int i ; { __mptr = (struct drm_dp_mst_topology_mgr const *)mgr; intel_dp = (struct intel_dp *)__mptr + 0xfffffffffffff5e8UL; tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; intel_connector = intel_connector_alloc(); if ((unsigned long )intel_connector == (unsigned long )((struct intel_connector *)0)) { return ((struct drm_connector *)0); } else { } connector = & intel_connector->base; drm_connector_init(dev, connector, & intel_dp_mst_connector_funcs, 10); drm_connector_helper_add(connector, & intel_dp_mst_connector_helper_funcs); intel_connector->unregister = & intel_connector_unregister; intel_connector->get_hw_state = & intel_dp_mst_get_hw_state; intel_connector->mst_port = intel_dp; intel_connector->port = (void *)port; i = 0; goto ldv_48449; ldv_48448: drm_mode_connector_attach_encoder(& intel_connector->base, & (intel_dp->mst_encoders[i])->base.base); i = i + 1; ldv_48449: ; if (i <= 2) { goto ldv_48448; } else { } intel_dp_add_properties(intel_dp, connector); drm_object_attach_property(& connector->base, dev->mode_config.path_property, 0ULL); drm_object_attach_property(& connector->base, dev->mode_config.tile_property, 0ULL); drm_mode_connector_set_path_property(connector, pathprop); drm_reinit_primary_mode_group(dev); mutex_lock_nested(& dev->mode_config.mutex, 0U); intel_connector_add_to_fbdev(intel_connector); mutex_unlock(& dev->mode_config.mutex); drm_connector_register(& intel_connector->base); return (connector); } } static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr , struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct drm_device *dev ; long tmp ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; dev = connector->dev; mutex_lock_nested(& dev->mode_config.mutex, 0U); intel_connector_dpms(connector, 3); mutex_unlock(& dev->mode_config.mutex); (*(intel_connector->unregister))(intel_connector); mutex_lock_nested(& dev->mode_config.mutex, 0U); intel_connector_remove_from_fbdev(intel_connector); drm_connector_cleanup(connector); mutex_unlock(& dev->mode_config.mutex); drm_reinit_primary_mode_group(dev); kfree((void const *)intel_connector); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dp_destroy_mst_connector", "\n"); } else { } return; } } static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr ) { struct intel_dp *intel_dp ; struct drm_dp_mst_topology_mgr const *__mptr ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct drm_device *dev ; { __mptr = (struct drm_dp_mst_topology_mgr const *)mgr; intel_dp = (struct intel_dp *)__mptr + 0xfffffffffffff5e8UL; tmp = dp_to_dig_port(intel_dp); intel_dig_port = tmp; dev = intel_dig_port->base.base.dev; drm_kms_helper_hotplug_event(dev); return; } } static struct drm_dp_mst_topology_cbs mst_cbs = {& intel_dp_add_mst_connector, & intel_dp_destroy_mst_connector, & intel_dp_mst_hotplug}; static struct intel_dp_mst_encoder *intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port , enum pipe pipe ) { struct intel_dp_mst_encoder *intel_mst ; struct intel_encoder *intel_encoder ; struct drm_device *dev ; void *tmp ; { dev = intel_dig_port->base.base.dev; tmp = kzalloc(240UL, 208U); intel_mst = (struct intel_dp_mst_encoder *)tmp; if ((unsigned long )intel_mst == (unsigned long )((struct intel_dp_mst_encoder *)0)) { return ((struct intel_dp_mst_encoder *)0); } else { } intel_mst->pipe = pipe; intel_encoder = & intel_mst->base; intel_mst->primary = intel_dig_port; drm_encoder_init(dev, & intel_encoder->base, & intel_dp_mst_enc_funcs, 7); intel_encoder->type = 11; intel_encoder->crtc_mask = 7; intel_encoder->cloneable = 0U; intel_encoder->compute_config = & intel_dp_mst_compute_config; intel_encoder->disable = & intel_mst_disable_dp; intel_encoder->post_disable = & intel_mst_post_disable_dp; intel_encoder->pre_enable = & intel_mst_pre_enable_dp; intel_encoder->enable = & intel_mst_enable_dp; intel_encoder->get_hw_state = & intel_dp_mst_enc_get_hw_state; intel_encoder->get_config = & intel_dp_mst_enc_get_config; return (intel_mst); } } static bool intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port ) { int i ; struct intel_dp *intel_dp ; { intel_dp = & intel_dig_port->dp; i = 0; goto ldv_48482; ldv_48481: intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, (enum pipe )i); i = i + 1; ldv_48482: ; if (i <= 2) { goto ldv_48481; } else { } return (1); } } int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port , int conn_base_id ) { struct intel_dp *intel_dp ; struct drm_device *dev ; int ret ; { intel_dp = & intel_dig_port->dp; dev = intel_dig_port->base.base.dev; intel_dp->can_mst = 1; intel_dp->mst_mgr.cbs = & mst_cbs; intel_dp_create_fake_mst_encoders(intel_dig_port); ret = drm_dp_mst_topology_mgr_init(& intel_dp->mst_mgr, dev->dev, & intel_dp->aux, 16, 3, conn_base_id); if (ret != 0) { intel_dp->can_mst = 0; return (ret); } else { } return (0); } } void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port ) { struct intel_dp *intel_dp ; { intel_dp = & intel_dig_port->dp; if (! intel_dp->can_mst) { return; } else { } drm_dp_mst_topology_mgr_destroy(& intel_dp->mst_mgr); return; } } extern int ldv_probe_50(void) ; extern int ldv_probe_48(void) ; void ldv_initialize_drm_connector_funcs_50(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(104UL); intel_dp_mst_connector_funcs_group0 = (struct drm_property *)tmp; tmp___0 = ldv_init_zalloc(936UL); intel_dp_mst_connector_funcs_group1 = (struct drm_connector *)tmp___0; return; } } void ldv_initialize_drm_connector_helper_funcs_49(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_dp_mst_connector_helper_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_initialize_drm_dp_mst_topology_cbs_47(void) { void *tmp ; { tmp = ldv_init_zalloc(1824UL); mst_cbs_group0 = (struct drm_dp_mst_topology_mgr *)tmp; return; } } void ldv_main_exported_50(void) { uint32_t ldvarg365 ; bool ldvarg367 ; uint64_t ldvarg366 ; uint64_t *ldvarg362 ; void *tmp ; struct drm_connector_state *ldvarg368 ; void *tmp___0 ; int ldvarg369 ; struct drm_connector_state *ldvarg363 ; void *tmp___1 ; uint32_t ldvarg364 ; int tmp___2 ; { tmp = ldv_init_zalloc(8UL); ldvarg362 = (uint64_t *)tmp; tmp___0 = ldv_init_zalloc(32UL); ldvarg368 = (struct drm_connector_state *)tmp___0; tmp___1 = ldv_init_zalloc(32UL); ldvarg363 = (struct drm_connector_state *)tmp___1; ldv_memset((void *)(& ldvarg365), 0, 4UL); ldv_memset((void *)(& ldvarg367), 0, 1UL); ldv_memset((void *)(& ldvarg366), 0, 8UL); ldv_memset((void *)(& ldvarg369), 0, 4UL); ldv_memset((void *)(& ldvarg364), 0, 4UL); tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_50 == 2) { intel_connector_dpms(intel_dp_mst_connector_funcs_group1, ldvarg369); ldv_state_variable_50 = 2; } else { } if (ldv_state_variable_50 == 1) { intel_connector_dpms(intel_dp_mst_connector_funcs_group1, ldvarg369); ldv_state_variable_50 = 1; } else { } goto ldv_48520; case 1: ; if (ldv_state_variable_50 == 2) { drm_atomic_helper_connector_destroy_state(intel_dp_mst_connector_funcs_group1, ldvarg368); ldv_state_variable_50 = 2; } else { } if (ldv_state_variable_50 == 1) { drm_atomic_helper_connector_destroy_state(intel_dp_mst_connector_funcs_group1, ldvarg368); ldv_state_variable_50 = 1; } else { } goto ldv_48520; case 2: ; if (ldv_state_variable_50 == 2) { drm_atomic_helper_connector_duplicate_state(intel_dp_mst_connector_funcs_group1); ldv_state_variable_50 = 2; } else { } if (ldv_state_variable_50 == 1) { drm_atomic_helper_connector_duplicate_state(intel_dp_mst_connector_funcs_group1); ldv_state_variable_50 = 1; } else { } goto ldv_48520; case 3: ; if (ldv_state_variable_50 == 2) { intel_dp_mst_detect(intel_dp_mst_connector_funcs_group1, (int )ldvarg367); ldv_state_variable_50 = 2; } else { } if (ldv_state_variable_50 == 1) { intel_dp_mst_detect(intel_dp_mst_connector_funcs_group1, (int )ldvarg367); ldv_state_variable_50 = 1; } else { } goto ldv_48520; case 4: ; if (ldv_state_variable_50 == 2) { intel_dp_mst_set_property(intel_dp_mst_connector_funcs_group1, intel_dp_mst_connector_funcs_group0, ldvarg366); ldv_state_variable_50 = 2; } else { } if (ldv_state_variable_50 == 1) { intel_dp_mst_set_property(intel_dp_mst_connector_funcs_group1, intel_dp_mst_connector_funcs_group0, ldvarg366); ldv_state_variable_50 = 1; } else { } goto ldv_48520; case 5: ; if (ldv_state_variable_50 == 2) { intel_dp_mst_connector_destroy(intel_dp_mst_connector_funcs_group1); ldv_state_variable_50 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48520; case 6: ; if (ldv_state_variable_50 == 2) { drm_helper_probe_single_connector_modes(intel_dp_mst_connector_funcs_group1, ldvarg365, ldvarg364); ldv_state_variable_50 = 2; } else { } if (ldv_state_variable_50 == 1) { drm_helper_probe_single_connector_modes(intel_dp_mst_connector_funcs_group1, ldvarg365, ldvarg364); ldv_state_variable_50 = 1; } else { } goto ldv_48520; case 7: ; if (ldv_state_variable_50 == 2) { intel_connector_atomic_get_property(intel_dp_mst_connector_funcs_group1, (struct drm_connector_state const *)ldvarg363, intel_dp_mst_connector_funcs_group0, ldvarg362); ldv_state_variable_50 = 2; } else { } if (ldv_state_variable_50 == 1) { intel_connector_atomic_get_property(intel_dp_mst_connector_funcs_group1, (struct drm_connector_state const *)ldvarg363, intel_dp_mst_connector_funcs_group0, ldvarg362); ldv_state_variable_50 = 1; } else { } goto ldv_48520; case 8: ; if (ldv_state_variable_50 == 1) { ldv_probe_50(); ldv_state_variable_50 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48520; default: ldv_stop(); } ldv_48520: ; return; } } void ldv_main_exported_49(void) { struct drm_display_mode *ldvarg174 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(208UL); ldvarg174 = (struct drm_display_mode *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_49 == 1) { intel_dp_mst_get_modes(intel_dp_mst_connector_helper_funcs_group0); ldv_state_variable_49 = 1; } else { } goto ldv_48535; case 1: ; if (ldv_state_variable_49 == 1) { intel_dp_mst_mode_valid(intel_dp_mst_connector_helper_funcs_group0, ldvarg174); ldv_state_variable_49 = 1; } else { } goto ldv_48535; case 2: ; if (ldv_state_variable_49 == 1) { intel_mst_best_encoder(intel_dp_mst_connector_helper_funcs_group0); ldv_state_variable_49 = 1; } else { } goto ldv_48535; default: ldv_stop(); } ldv_48535: ; return; } } void ldv_main_exported_48(void) { struct drm_encoder *ldvarg337 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(96UL); ldvarg337 = (struct drm_encoder *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_48 == 2) { intel_dp_mst_encoder_destroy(ldvarg337); ldv_state_variable_48 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48544; case 1: ; if (ldv_state_variable_48 == 1) { ldv_probe_48(); ldv_state_variable_48 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48544; default: ldv_stop(); } ldv_48544: ; return; } } void ldv_main_exported_47(void) { struct drm_connector *ldvarg227 ; void *tmp ; char *ldvarg228 ; void *tmp___0 ; struct drm_dp_mst_port *ldvarg229 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(936UL); ldvarg227 = (struct drm_connector *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg228 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(2232UL); ldvarg229 = (struct drm_dp_mst_port *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_47 == 1) { intel_dp_add_mst_connector(mst_cbs_group0, ldvarg229, (char const *)ldvarg228); ldv_state_variable_47 = 1; } else { } goto ldv_48554; case 1: ; if (ldv_state_variable_47 == 1) { intel_dp_mst_hotplug(mst_cbs_group0); ldv_state_variable_47 = 1; } else { } goto ldv_48554; case 2: ; if (ldv_state_variable_47 == 1) { intel_dp_destroy_mst_connector(mst_cbs_group0, ldvarg227); ldv_state_variable_47 = 1; } else { } goto ldv_48554; default: ldv_stop(); } ldv_48554: ; return; } } bool ldv_queue_work_on_895(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_896(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_897(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_898(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_899(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___23(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } bool ldv_queue_work_on_909(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_911(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_910(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_913(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_912(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool drm_can_sleep___14(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39629; default: __bad_percpu_size(); } ldv_39629: pscr_ret__ = pfo_ret__; goto ldv_39635; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39639; default: __bad_percpu_size(); } ldv_39639: pscr_ret__ = pfo_ret_____0; goto ldv_39635; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39648; default: __bad_percpu_size(); } ldv_39648: pscr_ret__ = pfo_ret_____1; goto ldv_39635; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39657; default: __bad_percpu_size(); } ldv_39657: pscr_ret__ = pfo_ret_____2; goto ldv_39635; default: __bad_size_call_parameter(); goto ldv_39635; } ldv_39635: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___23(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } __inline static int drm_panel_unprepare(struct drm_panel *panel ) { int tmp ; { if (((unsigned long )panel != (unsigned long )((struct drm_panel *)0) && (unsigned long )panel->funcs != (unsigned long )((struct drm_panel_funcs const *)0)) && (unsigned long )(panel->funcs)->unprepare != (unsigned long )((int (*/* const */)(struct drm_panel * ))0)) { tmp = (*((panel->funcs)->unprepare))(panel); return (tmp); } else { } return ((unsigned long )panel != (unsigned long )((struct drm_panel *)0) ? -38 : -22); } } __inline static int drm_panel_disable(struct drm_panel *panel ) { int tmp ; { if (((unsigned long )panel != (unsigned long )((struct drm_panel *)0) && (unsigned long )panel->funcs != (unsigned long )((struct drm_panel_funcs const *)0)) && (unsigned long )(panel->funcs)->disable != (unsigned long )((int (*/* const */)(struct drm_panel * ))0)) { tmp = (*((panel->funcs)->disable))(panel); return (tmp); } else { } return ((unsigned long )panel != (unsigned long )((struct drm_panel *)0) ? -38 : -22); } } __inline static int drm_panel_prepare(struct drm_panel *panel ) { int tmp ; { if (((unsigned long )panel != (unsigned long )((struct drm_panel *)0) && (unsigned long )panel->funcs != (unsigned long )((struct drm_panel_funcs const *)0)) && (unsigned long )(panel->funcs)->prepare != (unsigned long )((int (*/* const */)(struct drm_panel * ))0)) { tmp = (*((panel->funcs)->prepare))(panel); return (tmp); } else { } return ((unsigned long )panel != (unsigned long )((struct drm_panel *)0) ? -38 : -22); } } __inline static int drm_panel_enable(struct drm_panel *panel ) { int tmp ; { if (((unsigned long )panel != (unsigned long )((struct drm_panel *)0) && (unsigned long )panel->funcs != (unsigned long )((struct drm_panel_funcs const *)0)) && (unsigned long )(panel->funcs)->enable != (unsigned long )((int (*/* const */)(struct drm_panel * ))0)) { tmp = (*((panel->funcs)->enable))(panel); return (tmp); } else { } return ((unsigned long )panel != (unsigned long )((struct drm_panel *)0) ? -38 : -22); } } __inline static int drm_panel_get_modes(struct drm_panel *panel ) { int tmp ; { if (((unsigned long )panel != (unsigned long )((struct drm_panel *)0) && (unsigned long )panel->funcs != (unsigned long )((struct drm_panel_funcs const *)0)) && (unsigned long )(panel->funcs)->get_modes != (unsigned long )((int (*/* const */)(struct drm_panel * ))0)) { tmp = (*((panel->funcs)->get_modes))(panel); return (tmp); } else { } return ((unsigned long )panel != (unsigned long )((struct drm_panel *)0) ? -38 : -22); } } extern void drm_panel_remove(struct drm_panel * ) ; extern int drm_panel_attach(struct drm_panel * , struct drm_connector * ) ; extern int drm_panel_detach(struct drm_panel * ) ; extern int mipi_dsi_create_packet(struct mipi_dsi_packet * , struct mipi_dsi_msg const * ) ; __inline static struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h ) { struct mipi_dsi_host const *__mptr ; { __mptr = (struct mipi_dsi_host const *)h; return ((struct intel_dsi_host *)__mptr); } } __inline static struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder ) { struct drm_encoder const *__mptr ; { __mptr = (struct drm_encoder const *)encoder; return ((struct intel_dsi *)__mptr); } } void vlv_enable_dsi_pll(struct intel_encoder *encoder ) ; void vlv_disable_dsi_pll(struct intel_encoder *encoder ) ; u32 vlv_get_dsi_pclk(struct intel_encoder *encoder , int pipe_bpp ) ; struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi , u16 panel_id ) ; static struct __anonstruct_intel_dsi_drivers_444 const intel_dsi_drivers[1U] = { {1U, & vbt_panel_init}}; static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi , enum port port ) { struct drm_encoder *encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 mask ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; { encoder = & intel_dsi->base.base; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; mask = 67372036U; tmp = msecs_to_jiffies(100U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48535; ldv_48534: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45172U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47220U) : 0L), 1); if ((tmp___0 & mask) != mask) { ret__ = -110; } else { } goto ldv_48533; } else { } tmp___1 = drm_can_sleep___14(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48535: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45172U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47220U) : 0L), 1); if ((tmp___2 & mask) != mask) { goto ldv_48534; } else { } ldv_48533: ; if (ret__ != 0) { drm_err("DPI FIFOs are not empty\n"); } else { } return; } } static void write_data(struct drm_i915_private *dev_priv , u32 reg , u8 const *data , u32 len ) { u32 i ; u32 j ; u32 val ; u8 const *tmp ; u32 __min1 ; u32 __min2 ; { i = 0U; goto ldv_48553; ldv_48552: val = 0U; j = 0U; goto ldv_48550; ldv_48549: tmp = data; data = data + 1; val = (u32 )((int )*tmp << (int )(j * 8U)) | val; j = j + 1U; ldv_48550: __min1 = len - i; __min2 = 4U; if ((__min1 < __min2 ? __min1 : __min2) > j) { goto ldv_48549; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); i = i + 4U; ldv_48553: ; if (i < len) { goto ldv_48552; } else { } return; } } static void read_data(struct drm_i915_private *dev_priv , u32 reg , u8 *data , u32 len ) { u32 i ; u32 j ; u32 val ; uint32_t tmp ; u8 *tmp___0 ; u32 __min1 ; u32 __min2 ; { i = 0U; goto ldv_48571; ldv_48570: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp; j = 0U; goto ldv_48568; ldv_48567: tmp___0 = data; data = data + 1; *tmp___0 = (u8 )(val >> (int )(j * 8U)); j = j + 1U; ldv_48568: __min1 = len - i; __min2 = 4U; if ((__min1 < __min2 ? __min1 : __min2) > j) { goto ldv_48567; } else { } i = i + 4U; ldv_48571: ; if (i < len) { goto ldv_48570; } else { } return; } } static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host , struct mipi_dsi_msg const *msg ) { struct intel_dsi_host *intel_dsi_host ; struct intel_dsi_host *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; struct mipi_dsi_packet packet ; ssize_t ret ; u8 const *header ; u8 const *data ; u32 data_reg ; u32 data_mask ; u32 ctrl_reg ; u32 ctrl_mask ; int tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; unsigned long timeout_____0 ; unsigned long tmp___5 ; int ret_____0 ; uint32_t tmp___6 ; bool tmp___7 ; uint32_t tmp___8 ; unsigned long timeout_____1 ; unsigned long tmp___9 ; int ret_____1 ; uint32_t tmp___10 ; bool tmp___11 ; uint32_t tmp___12 ; { tmp = to_intel_dsi_host(host); intel_dsi_host = tmp; dev = (intel_dsi_host->intel_dsi)->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; port = intel_dsi_host->port; tmp___0 = mipi_dsi_create_packet(& packet, msg); ret = (ssize_t )tmp___0; if (ret < 0L) { return (ret); } else { } header = (u8 const *)(& packet.header); data = packet.payload; if (((unsigned long )msg->flags & 2UL) != 0UL) { data_reg = (unsigned int )port == 0U ? dev_priv->mipi_mmio_base + 45156U : ((unsigned int )port != 1U ? dev_priv->mipi_mmio_base + 47204U : 0U); data_mask = 256U; ctrl_reg = (unsigned int )port == 0U ? dev_priv->mipi_mmio_base + 45164U : ((unsigned int )port != 1U ? dev_priv->mipi_mmio_base + 47212U : 0U); ctrl_mask = 16777216U; } else { data_reg = (unsigned int )port == 0U ? dev_priv->mipi_mmio_base + 45160U : ((unsigned int )port != 1U ? dev_priv->mipi_mmio_base + 47208U : 0U); data_mask = 1U; ctrl_reg = (unsigned int )port == 0U ? dev_priv->mipi_mmio_base + 45168U : ((unsigned int )port != 1U ? dev_priv->mipi_mmio_base + 47216U : 0U); ctrl_mask = 65536U; } if (packet.payload_length != 0UL) { tmp___1 = msecs_to_jiffies(50U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48599; ldv_48598: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45172U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47220U) : 0L), 1); if ((tmp___2 & data_mask) != 0U) { ret__ = -110; } else { } goto ldv_48597; } else { } tmp___3 = drm_can_sleep___14(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48599: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45172U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47220U) : 0L), 1); if ((tmp___4 & data_mask) != 0U) { goto ldv_48598; } else { } ldv_48597: ; if (ret__ != 0) { drm_err("Timeout waiting for HS/LP DATA FIFO !full\n"); } else { } write_data(dev_priv, data_reg, packet.payload, (u32 )packet.payload_length); } else { } if ((unsigned long )msg->rx_len != 0UL) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45060U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47108U) : 0L), 536870912U, 1); } else { } tmp___5 = msecs_to_jiffies(50U); timeout_____0 = (tmp___5 + (unsigned long )jiffies) + 1UL; ret_____0 = 0; goto ldv_48611; ldv_48610: ; if ((long )(timeout_____0 - (unsigned long )jiffies) < 0L) { tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45172U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47220U) : 0L), 1); if ((tmp___6 & ctrl_mask) != 0U) { ret_____0 = -110; } else { } goto ldv_48609; } else { } tmp___7 = drm_can_sleep___14(); if ((int )tmp___7) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48611: tmp___8 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45172U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47220U) : 0L), 1); if ((tmp___8 & ctrl_mask) != 0U) { goto ldv_48610; } else { } ldv_48609: ; if (ret_____0 != 0) { drm_err("Timeout waiting for HS/LP CTRL FIFO !full\n"); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )ctrl_reg, (uint32_t )((((int )*(header + 2UL) << 16) | ((int )*(header + 1UL) << 8)) | (int )*header), 1); if ((unsigned long )msg->rx_len != 0UL) { data_mask = 536870912U; tmp___9 = msecs_to_jiffies(50U); timeout_____1 = (tmp___9 + (unsigned long )jiffies) + 1UL; ret_____1 = 0; goto ldv_48623; ldv_48622: ; if ((long )(timeout_____1 - (unsigned long )jiffies) < 0L) { tmp___10 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45060U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47108U) : 0L), 1); if ((tmp___10 & data_mask) != data_mask) { ret_____1 = -110; } else { } goto ldv_48621; } else { } tmp___11 = drm_can_sleep___14(); if ((int )tmp___11) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48623: tmp___12 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45060U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47108U) : 0L), 1); if ((tmp___12 & data_mask) != data_mask) { goto ldv_48622; } else { } ldv_48621: ; if (ret_____1 != 0) { drm_err("Timeout waiting for read data.\n"); } else { } read_data(dev_priv, data_reg, (u8 *)msg->rx_buf, (u32 )msg->rx_len); } else { } return ((ssize_t )(packet.payload_length + 4UL)); } } static int intel_dsi_host_attach(struct mipi_dsi_host *host , struct mipi_dsi_device *dsi ) { { return (0); } } static int intel_dsi_host_detach(struct mipi_dsi_host *host , struct mipi_dsi_device *dsi ) { { return (0); } } static struct mipi_dsi_host_ops const intel_dsi_host_ops = {& intel_dsi_host_attach, & intel_dsi_host_detach, & intel_dsi_host_transfer}; static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi , enum port port ) { struct intel_dsi_host *host ; struct mipi_dsi_device *device ; void *tmp ; void *tmp___0 ; { tmp = kzalloc(40UL, 208U); host = (struct intel_dsi_host *)tmp; if ((unsigned long )host == (unsigned long )((struct intel_dsi_host *)0)) { return ((struct intel_dsi_host *)0); } else { } host->base.ops = & intel_dsi_host_ops; host->intel_dsi = intel_dsi; host->port = port; tmp___0 = kzalloc(1448UL, 208U); device = (struct mipi_dsi_device *)tmp___0; if ((unsigned long )device == (unsigned long )((struct mipi_dsi_device *)0)) { kfree((void const *)host); return ((struct intel_dsi_host *)0); } else { } device->host = & host->base; host->device = device; return (host); } } static int dpi_send_cmd(struct intel_dsi *intel_dsi , u32 cmd , bool hs , enum port port ) { struct drm_encoder *encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 mask ; uint32_t tmp ; unsigned long timeout__ ; unsigned long tmp___0 ; int ret__ ; uint32_t tmp___1 ; bool tmp___2 ; uint32_t tmp___3 ; { encoder = & intel_dsi->base.base; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((int )hs) { cmd = cmd & 4294967231U; } else { cmd = cmd | 64U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45060U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47108U) : 0L), 1073741824U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45128U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47176U) : 0L), 1); if (tmp == cmd) { drm_err("Same special packet %02x twice in a row.\n", cmd); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45128U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47176U) : 0L), cmd, 1); mask = 1073741824U; tmp___0 = msecs_to_jiffies(100U); timeout__ = (tmp___0 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48660; ldv_48659: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45060U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47108U) : 0L), 1); if ((tmp___1 & mask) != mask) { ret__ = -110; } else { } goto ldv_48658; } else { } tmp___2 = drm_can_sleep___14(); if ((int )tmp___2) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48660: tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45060U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47108U) : 0L), 1); if ((tmp___3 & mask) != mask) { goto ldv_48659; } else { } ldv_48658: ; if (ret__ != 0) { drm_err("Video mode command 0x%08x send failed.\n", cmd); } else { } return (0); } } static void band_gap_reset(struct drm_i915_private *dev_priv ) { { mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_flisdsi_write(dev_priv, 8U, 1U); vlv_flisdsi_write(dev_priv, 15U, 5U); vlv_flisdsi_write(dev_priv, 15U, 37U); __const_udelay(644250UL); vlv_flisdsi_write(dev_priv, 15U, 0U); vlv_flisdsi_write(dev_priv, 8U, 0U); mutex_unlock(& dev_priv->sb_lock); return; } } __inline static bool is_vid_mode(struct intel_dsi *intel_dsi ) { { return ((unsigned int )intel_dsi->operation_mode == 0U); } } __inline static bool is_cmd_mode(struct intel_dsi *intel_dsi ) { { return ((unsigned int )intel_dsi->operation_mode == 1U); } } static void intel_dsi_hot_plug(struct intel_encoder *encoder ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsi_hot_plug", "\n"); } else { } return; } } static bool intel_dsi_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *config ) { struct intel_dsi *intel_dsi ; struct intel_encoder const *__mptr ; struct intel_connector *intel_connector ; struct drm_display_mode *fixed_mode ; struct drm_display_mode *adjusted_mode ; long tmp ; { __mptr = (struct intel_encoder const *)encoder; intel_dsi = (struct intel_dsi *)__mptr; intel_connector = intel_dsi->attached_connector; fixed_mode = intel_connector->panel.fixed_mode; adjusted_mode = & config->base.adjusted_mode; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsi_compute_config", "\n"); } else { } if ((unsigned long )fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { intel_fixed_panel_mode((struct drm_display_mode const *)fixed_mode, adjusted_mode); } else { } adjusted_mode->flags = 0U; return (1); } } static void intel_dsi_port_enable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; enum port port ; u32 temp ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; if ((unsigned int )intel_dsi->dual_link == 1U) { temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 2032652L, 1); temp = (u32 )(((int )intel_dsi->pixel_overlap << 30) | 1073741823) & temp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 2032652L, temp, 1); } else { } port = 0; goto ldv_48698; ldv_48697: ; if (((int )intel_dsi->ports >> (int )port) & 1) { temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 0L : 1971968L) : 1970576L, 1); temp = temp & 4294967292U; temp = temp & 4227858431U; if ((unsigned int )intel_dsi->ports == 5U) { temp = (u32 )(((int )intel_dsi->dual_link + -1) << 26) | temp; temp = ((int )intel_crtc->pipe != 0 ? 2U : 1U) | temp; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 0L : 1971968L) : 1970576L, temp | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 0L : 1971968L) : 1970576L, 0); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48698: ; if ((unsigned int )port <= 4U) { goto ldv_48697; } else { } return; } } static void intel_dsi_port_disable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; enum port port ; u32 temp ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; port = 0; goto ldv_48709; ldv_48708: ; if (((int )intel_dsi->ports >> (int )port) & 1) { temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 0L : 1971968L) : 1970576L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 0L : 1971968L) : 1970576L, temp & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 0L : 1971968L) : 1970576L, 0); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48709: ; if ((unsigned int )port <= 4U) { goto ldv_48708; } else { } return; } } static void intel_dsi_device_ready(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; enum port port ; u32 val ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsi_device_ready", "\n"); } else { } mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_flisdsi_write(dev_priv, 4U, 4U); mutex_unlock(& dev_priv->sb_lock); band_gap_reset(dev_priv); port = 0; goto ldv_48720; ldv_48719: ; if (((int )intel_dsi->ports >> (int )port) & 1) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 4U, 1); usleep_range(2500UL, 3000UL); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1970576L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1970576L, val | 65536U, 1); usleep_range(1000UL, 1500UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 2U, 1); usleep_range(2500UL, 3000UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 1U, 1); usleep_range(2500UL, 3000UL); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48720: ; if ((unsigned int )port <= 4U) { goto ldv_48719; } else { } return; } } static void intel_dsi_enable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; enum port port ; long tmp___0 ; bool tmp___1 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsi_enable", "\n"); } else { } tmp___1 = is_cmd_mode(intel_dsi); if ((int )tmp___1) { port = 0; goto ldv_48731; ldv_48730: ; if (((int )intel_dsi->ports >> (int )port) & 1) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45140U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47188U) : 0L), 32U, 1); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48731: ; if ((unsigned int )port <= 4U) { goto ldv_48730; } else { } } else { msleep(20U); port = 0; goto ldv_48734; ldv_48733: ; if (((int )intel_dsi->ports >> (int )port) & 1) { dpi_send_cmd(intel_dsi, 2U, 0, port); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48734: ; if ((unsigned int )port <= 4U) { goto ldv_48733; } else { } msleep(100U); drm_panel_enable(intel_dsi->panel); port = 0; goto ldv_48737; ldv_48736: ; if (((int )intel_dsi->ports >> (int )port) & 1) { wait_for_dsi_fifo_empty(intel_dsi, port); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48737: ; if ((unsigned int )port <= 4U) { goto ldv_48736; } else { } intel_dsi_port_enable(encoder); } return; } } static void intel_dsi_pre_enable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum pipe pipe ; enum port port ; u32 tmp___0 ; long tmp___1 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; pipe = intel_crtc->pipe; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dsi_pre_enable", "\n"); } else { } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); tmp___0 = tmp___0 | 536870912U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), tmp___0, 1); (intel_crtc->config)->dpll_hw_state.dpll = 536879104U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), 1); tmp___0 = tmp___0 | 2048U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), tmp___0, 1); intel_dsi_device_ready(encoder); msleep((unsigned int )intel_dsi->panel_on_delay); drm_panel_prepare(intel_dsi->panel); port = 0; goto ldv_48753; ldv_48752: ; if (((int )intel_dsi->ports >> (int )port) & 1) { wait_for_dsi_fifo_empty(intel_dsi, port); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48753: ; if ((unsigned int )port <= 4U) { goto ldv_48752; } else { } intel_dsi_enable(encoder); return; } } static void intel_dsi_enable_nop(struct intel_encoder *encoder ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsi_enable_nop", "\n"); } else { } return; } } static void intel_dsi_pre_disable(struct intel_encoder *encoder ) { struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; enum port port ; long tmp___0 ; bool tmp___1 ; { tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsi_pre_disable", "\n"); } else { } tmp___1 = is_vid_mode(intel_dsi); if ((int )tmp___1) { port = 0; goto ldv_48766; ldv_48765: ; if (((int )intel_dsi->ports >> (int )port) & 1) { dpi_send_cmd(intel_dsi, 1U, 0, port); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48766: ; if ((unsigned int )port <= 4U) { goto ldv_48765; } else { } msleep(10U); } else { } return; } } static void intel_dsi_disable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; enum port port ; u32 temp ; long tmp___0 ; bool tmp___1 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsi_disable", "\n"); } else { } tmp___1 = is_vid_mode(intel_dsi); if ((int )tmp___1) { port = 0; goto ldv_48778; ldv_48777: ; if (((int )intel_dsi->ports >> (int )port) & 1) { wait_for_dsi_fifo_empty(intel_dsi, port); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48778: ; if ((unsigned int )port <= 4U) { goto ldv_48777; } else { } intel_dsi_port_disable(encoder); msleep(2U); } else { } port = 0; goto ldv_48781; ldv_48780: ; if (((int )intel_dsi->ports >> (int )port) & 1) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 0U, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45316U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47364U) : 0L), 1); temp = temp & 4294967199U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45316U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47364U) : 0L), (u32 )((int )intel_dsi->escape_clk_div << 5) | temp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45148U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47196U) : 0L), 2U, 1); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45068U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47116U) : 0L), 1); temp = temp & 4294965375U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45068U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47116U) : 0L), temp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 1U, 1); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48781: ; if ((unsigned int )port <= 4U) { goto ldv_48780; } else { } drm_panel_disable(intel_dsi->panel); port = 0; goto ldv_48784; ldv_48783: ; if (((int )intel_dsi->ports >> (int )port) & 1) { wait_for_dsi_fifo_empty(intel_dsi, port); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48784: ; if ((unsigned int )port <= 4U) { goto ldv_48783; } else { } return; } } static void intel_dsi_clear_device_ready(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; enum port port ; u32 val ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsi_clear_device_ready", "\n"); } else { } port = 0; goto ldv_48807; ldv_48806: ; if (((int )intel_dsi->ports >> (int )port) & 1) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 5U, 1); usleep_range(2000UL, 2500UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 3U, 1); usleep_range(2000UL, 2500UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 5U, 1); usleep_range(2000UL, 2500UL); tmp___1 = msecs_to_jiffies(30U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48804; ldv_48803: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1970576L, 1); if ((tmp___2 & 131072U) != 0U) { ret__ = -110; } else { } goto ldv_48802; } else { } tmp___3 = drm_can_sleep___14(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48804: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1970576L, 1); if ((tmp___4 & 131072U) != 0U) { goto ldv_48803; } else { } ldv_48802: ; if (ret__ != 0) { drm_err("DSI LP not going Low\n"); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1970576L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 1970576L, val & 4294901759U, 1); usleep_range(1000UL, 1500UL); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 0U, 1); usleep_range(2000UL, 2500UL); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48807: ; if ((unsigned int )port <= 4U) { goto ldv_48806; } else { } vlv_disable_dsi_pll(encoder); return; } } static void intel_dsi_post_disable(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; u32 val ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsi_post_disable", "\n"); } else { } intel_dsi_disable(encoder); intel_dsi_clear_device_ready(encoder); val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), 1); val = val & 4294965247U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), val, 1); drm_panel_unprepare(intel_dsi->panel); msleep((unsigned int )intel_dsi->panel_off_delay); msleep((unsigned int )intel_dsi->panel_pwr_cycle_delay); return; } } static bool intel_dsi_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; struct drm_device *dev ; enum intel_display_power_domain power_domain ; u32 dpi_enabled ; u32 func ; enum port port ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; uint32_t tmp___5 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; dev = encoder->base.dev; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsi_get_hw_state", "\n"); } else { } power_domain = intel_display_port_power_domain(encoder); tmp___1 = intel_display_power_is_enabled(dev_priv, power_domain); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } port = 0; goto ldv_48847; ldv_48846: ; if (((int )intel_dsi->ports >> (int )port) & 1) { func = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45068U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47116U) : 0L), 1); tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port != 0U ? ((unsigned int )port == 1U ? 0L : 1971968L) : 1970576L, 1); dpi_enabled = tmp___3 & 2147483648U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) == 0U) { goto _L; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) != 8U) { _L: /* CIL Label */ if ((unsigned int )port == 2U) { tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )(dev_priv->info.pipe_offsets[1] - dev_priv->info.pipe_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 458760U), 1); dpi_enabled = tmp___4 & 2147483648U; } else { } } else { } } } else { } if (dpi_enabled != 0U || (func & 57344U) != 0U) { tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45056U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47104U) : 0L), 1); if ((int )tmp___5 & 1) { *pipe = (unsigned int )port != 0U; return (1); } else { } } else { } } else { } port = (enum port )((unsigned int )port + 1U); ldv_48847: ; if ((unsigned int )port <= 4U) { goto ldv_48846; } else { } return (0); } } static void intel_dsi_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { u32 pclk ; long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsi_get_config", "\n"); } else { } pipe_config->dpll_hw_state.dpll_md = 0U; pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); if (pclk == 0U) { return; } else { } pipe_config->base.adjusted_mode.crtc_clock = (int )pclk; pipe_config->port_clock = (int )pclk; return; } } static enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector , struct drm_display_mode *mode ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct drm_display_mode *fixed_mode ; long tmp ; long tmp___0 ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; fixed_mode = intel_connector->panel.fixed_mode; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsi_mode_valid", "\n"); } else { } if ((mode->flags & 32U) != 0U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsi_mode_valid", "MODE_NO_DBLESCAN\n"); } else { } return (8); } else { } if ((unsigned long )fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { if (mode->hdisplay > fixed_mode->hdisplay) { return (29); } else { } if (mode->vdisplay > fixed_mode->vdisplay) { return (29); } else { } } else { } return (0); } } static u16 txclkesc(u32 divider , unsigned int us ) { { switch (divider) { case 0U: ; default: ; return ((unsigned int )((u16 )us) * 20U); case 32U: ; return ((unsigned int )((u16 )us) * 10U); case 64U: ; return ((unsigned int )((u16 )us) * 5U); } } } static u16 txbyteclkhs(u16 pixels , int bpp , int lane_count , u16 burst_mode_ratio ) { { return ((u16 )((((((int )pixels * bpp) * (int )burst_mode_ratio + 799) / 800 + lane_count) + -1) / lane_count)); } } static void set_dsi_timings(struct drm_encoder *encoder , struct drm_display_mode const *mode ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; enum port port ; unsigned int bpp ; unsigned int lane_count ; u16 hactive ; u16 hfp ; u16 hsync ; u16 hbp ; u16 vfp ; u16 vsync ; u16 vbp ; { dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_intel_dsi(encoder); intel_dsi = tmp; bpp = (unsigned int )(intel_crtc->config)->pipe_bpp; lane_count = intel_dsi->lane_count; hactive = (u16 )mode->hdisplay; hfp = (int )((u16 )mode->hsync_start) - (int )((u16 )mode->hdisplay); hsync = (int )((u16 )mode->hsync_end) - (int )((u16 )mode->hsync_start); hbp = (int )((u16 )mode->htotal) - (int )((u16 )mode->hsync_end); if ((unsigned int )intel_dsi->dual_link != 0U) { hactive = (u16 )((unsigned int )hactive / 2U); if ((unsigned int )intel_dsi->dual_link == 1U) { hactive = (int )((u16 )intel_dsi->pixel_overlap) + (int )hactive; } else { } hfp = (u16 )((unsigned int )hfp / 2U); hsync = (u16 )((unsigned int )hsync / 2U); hbp = (u16 )((unsigned int )hbp / 2U); } else { } vfp = (int )((u16 )mode->vsync_start) - (int )((u16 )mode->vdisplay); vsync = (int )((u16 )mode->vsync_end) - (int )((u16 )mode->vsync_start); vbp = (int )((u16 )mode->vtotal) - (int )((u16 )mode->vsync_end); hactive = txbyteclkhs((int )hactive, (int )bpp, (int )lane_count, (int )intel_dsi->burst_mode_ratio); hfp = txbyteclkhs((int )hfp, (int )bpp, (int )lane_count, (int )intel_dsi->burst_mode_ratio); hsync = txbyteclkhs((int )hsync, (int )bpp, (int )lane_count, (int )intel_dsi->burst_mode_ratio); hbp = txbyteclkhs((int )hbp, (int )bpp, (int )lane_count, (int )intel_dsi->burst_mode_ratio); port = 0; goto ldv_48899; ldv_48898: ; if (((int )intel_dsi->ports >> (int )port) & 1) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45108U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47156U) : 0L), (uint32_t )hactive, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45104U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47152U) : 0L), (uint32_t )hfp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45096U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47144U) : 0L), (uint32_t )hsync, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45100U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47148U) : 0L), (uint32_t )hbp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45120U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47168U) : 0L), (uint32_t )vfp, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45112U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47160U) : 0L), (uint32_t )vsync, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45116U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47164U) : 0L), (uint32_t )vbp, 1); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48899: ; if ((unsigned int )port <= 4U) { goto ldv_48898; } else { } return; } } static void intel_dsi_prepare(struct intel_encoder *intel_encoder ) { struct drm_encoder *encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; struct drm_display_mode *adjusted_mode ; enum port port ; unsigned int bpp ; u32 val ; u32 tmp___0 ; u16 mode_hdisplay ; long tmp___1 ; bool tmp___2 ; u16 tmp___3 ; u16 tmp___4 ; bool tmp___5 ; u16 tmp___6 ; bool tmp___7 ; { encoder = & intel_encoder->base; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_intel_dsi(encoder); intel_dsi = tmp; adjusted_mode = & (intel_crtc->config)->base.adjusted_mode; bpp = (unsigned int )(intel_crtc->config)->pipe_bpp; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dsi_prepare", "pipe %c\n", (int )intel_crtc->pipe + 65); } else { } mode_hdisplay = (u16 )adjusted_mode->hdisplay; if ((unsigned int )intel_dsi->dual_link != 0U) { mode_hdisplay = (u16 )((unsigned int )mode_hdisplay / 2U); if ((unsigned int )intel_dsi->dual_link == 1U) { mode_hdisplay = (int )((u16 )intel_dsi->pixel_overlap) + (int )mode_hdisplay; } else { } } else { } port = 0; goto ldv_48919; ldv_48918: ; if (((int )intel_dsi->ports >> (int )port) & 1) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(dev_priv->mipi_mmio_base + 45316U), 1); tmp___0 = tmp___0 & 4294967199U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(dev_priv->mipi_mmio_base + 45316U), tmp___0, 1); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45316U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47364U) : 0L), 1); tmp___0 = tmp___0 & 4294967271U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45316U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47364U) : 0L), tmp___0 | 24U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45060U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47108U) : 0L), 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45064U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47112U) : 0L), 4294967295U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45184U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47232U) : 0L), intel_dsi->dphy_reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45088U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47136U) : 0L), (uint32_t )((adjusted_mode->vdisplay << 16) | (int )mode_hdisplay), 1); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48919: ; if ((unsigned int )port <= 4U) { goto ldv_48918; } else { } set_dsi_timings(encoder, (struct drm_display_mode const *)adjusted_mode); val = intel_dsi->lane_count; tmp___2 = is_cmd_mode(intel_dsi); if ((int )tmp___2) { val = (u32 )(intel_dsi->channel << 5) | val; val = val | 24576U; } else { val = (u32 )(intel_dsi->channel << 3) | val; val = intel_dsi->pixel_format | val; } tmp___0 = 0U; if ((unsigned int )intel_dsi->eotp_pkt == 0U) { tmp___0 = tmp___0 | 1U; } else { } if ((unsigned int )intel_dsi->clock_stop != 0U) { tmp___0 = tmp___0 | 2U; } else { } port = 0; goto ldv_48922; ldv_48921: ; if (((int )intel_dsi->ports >> (int )port) & 1) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45068U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47116U) : 0L), val, 1); tmp___5 = is_vid_mode(intel_dsi); if ((int )tmp___5 && intel_dsi->video_mode_format == 3U) { tmp___3 = txbyteclkhs((int )((u16 )adjusted_mode->htotal), (int )bpp, (int )intel_dsi->lane_count, (int )intel_dsi->burst_mode_ratio); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45072U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47120U) : 0L), (uint32_t )((int )tmp___3 + 1), 1); } else { tmp___4 = txbyteclkhs((int )((u16 )adjusted_mode->vtotal) * (int )((u16 )adjusted_mode->htotal), (int )bpp, (int )intel_dsi->lane_count, (int )intel_dsi->burst_mode_ratio); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45072U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47120U) : 0L), (uint32_t )((int )tmp___4 + 1), 1); } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45076U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47124U) : 0L), (uint32_t )intel_dsi->lp_rx_timeout, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45080U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47128U) : 0L), (uint32_t )intel_dsi->turn_arnd_val, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45084U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47132U) : 0L), (uint32_t )intel_dsi->rst_timer_val, 1); tmp___6 = txclkesc((u32 )intel_dsi->escape_clk_div, 100U); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45136U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47184U) : 0L), (uint32_t )tmp___6, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45148U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47196U) : 0L), tmp___0, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45136U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47184U) : 0L), (uint32_t )intel_dsi->init_count, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45124U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47172U) : 0L), (uint32_t )intel_dsi->hs_to_lp_count, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45152U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47200U) : 0L), (uint32_t )intel_dsi->lp_byte_clk, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45188U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47236U) : 0L), intel_dsi->bw_timer, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45192U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47240U) : 0L), (uint32_t )(((int )intel_dsi->clk_lp_to_hs_count << 16) | (int )intel_dsi->clk_hs_to_lp_count), 1); tmp___7 = is_vid_mode(intel_dsi); if ((int )tmp___7) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (unsigned int )port == 0U ? (off_t )(dev_priv->mipi_mmio_base + 45144U) : ((unsigned int )port != 1U ? (off_t )(dev_priv->mipi_mmio_base + 47192U) : 0L), (intel_dsi->video_frmt_cfg_bits | intel_dsi->video_mode_format) | 20U, 1); } else { } } else { } port = (enum port )((unsigned int )port + 1U); ldv_48922: ; if ((unsigned int )port <= 4U) { goto ldv_48921; } else { } return; } } static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder ) { long tmp ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsi_pre_pll_enable", "\n"); } else { } intel_dsi_prepare(encoder); vlv_enable_dsi_pll(encoder); return; } } static enum drm_connector_status intel_dsi_detect(struct drm_connector *connector , bool force ) { { return (1); } } static int intel_dsi_get_modes(struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct drm_display_mode *mode ; long tmp ; long tmp___0 ; long tmp___1 ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsi_get_modes", "\n"); } else { } if ((unsigned long )intel_connector->panel.fixed_mode == (unsigned long )((struct drm_display_mode *)0)) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dsi_get_modes", "no fixed mode\n"); } else { } return (0); } else { } mode = drm_mode_duplicate(connector->dev, (struct drm_display_mode const *)intel_connector->panel.fixed_mode); if ((unsigned long )mode == (unsigned long )((struct drm_display_mode *)0)) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dsi_get_modes", "drm_mode_duplicate failed\n"); } else { } return (0); } else { } drm_mode_probed_add(connector, mode); return (1); } } static void intel_dsi_connector_destroy(struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; long tmp ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsi_connector_destroy", "\n"); } else { } intel_panel_fini(& intel_connector->panel); drm_connector_cleanup(connector); kfree((void const *)connector); return; } } static void intel_dsi_encoder_destroy(struct drm_encoder *encoder ) { struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; { tmp = enc_to_intel_dsi(encoder); intel_dsi = tmp; if ((unsigned long )intel_dsi->panel != (unsigned long )((struct drm_panel *)0)) { drm_panel_detach(intel_dsi->panel); drm_panel_remove(intel_dsi->panel); } else { } intel_encoder_destroy(encoder); return; } } static struct drm_encoder_funcs const intel_dsi_funcs = {0, & intel_dsi_encoder_destroy}; static struct drm_connector_helper_funcs const intel_dsi_connector_helper_funcs = {& intel_dsi_get_modes, & intel_dsi_mode_valid, & intel_best_encoder}; static struct drm_connector_funcs const intel_dsi_connector_funcs = {& intel_connector_dpms, 0, 0, 0, & intel_dsi_detect, & drm_helper_probe_single_connector_modes, 0, & intel_dsi_connector_destroy, 0, & drm_atomic_helper_connector_duplicate_state, & drm_atomic_helper_connector_destroy_state, 0, & intel_connector_atomic_get_property}; void intel_dsi_init(struct drm_device *dev ) { struct intel_dsi *intel_dsi ; struct intel_encoder *intel_encoder ; struct drm_encoder *encoder ; struct intel_connector *intel_connector ; struct drm_connector *connector ; struct drm_display_mode *scan ; struct drm_display_mode *fixed_mode ; struct drm_i915_private *dev_priv ; enum port port ; unsigned int i ; long tmp ; struct drm_i915_private *__p ; void *tmp___0 ; struct intel_dsi_host *host ; long tmp___1 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; long tmp___2 ; { fixed_mode = (struct drm_display_mode *)0; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_dsi_init", "\n"); } else { } if ((unsigned int )*((unsigned char *)dev_priv + 41280UL) == 0U) { return; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { dev_priv->mipi_mmio_base = 1572864U; } else { drm_err("Unsupported Mipi device to reg base"); return; } tmp___0 = kzalloc(352UL, 208U); intel_dsi = (struct intel_dsi *)tmp___0; if ((unsigned long )intel_dsi == (unsigned long )((struct intel_dsi *)0)) { return; } else { } intel_connector = intel_connector_alloc(); if ((unsigned long )intel_connector == (unsigned long )((struct intel_connector *)0)) { kfree((void const *)intel_dsi); return; } else { } intel_encoder = & intel_dsi->base; encoder = & intel_encoder->base; intel_dsi->attached_connector = intel_connector; connector = & intel_connector->base; drm_encoder_init(dev, encoder, & intel_dsi_funcs, 6); intel_encoder->hot_plug = & intel_dsi_hot_plug; intel_encoder->compute_config = & intel_dsi_compute_config; intel_encoder->pre_pll_enable = & intel_dsi_pre_pll_enable; intel_encoder->pre_enable = & intel_dsi_pre_enable; intel_encoder->enable = & intel_dsi_enable_nop; intel_encoder->disable = & intel_dsi_pre_disable; intel_encoder->post_disable = & intel_dsi_post_disable; intel_encoder->get_hw_state = & intel_dsi_get_hw_state; intel_encoder->get_config = & intel_dsi_get_config; intel_connector->get_hw_state = & intel_connector_get_hw_state; intel_connector->unregister = & intel_connector_unregister; if ((unsigned int )*((unsigned char *)dev_priv->vbt.dsi.config + 6UL) != 0U) { intel_encoder->crtc_mask = 1; intel_dsi->ports = 5U; } else if ((unsigned int )dev_priv->vbt.dsi.port == 21U) { intel_encoder->crtc_mask = 1; intel_dsi->ports = 1U; } else if ((unsigned int )dev_priv->vbt.dsi.port == 23U) { intel_encoder->crtc_mask = 2; intel_dsi->ports = 4U; } else { } port = 0; goto ldv_48977; ldv_48976: ; if (((int )intel_dsi->ports >> (int )port) & 1) { host = intel_dsi_host_init(intel_dsi, port); if ((unsigned long )host == (unsigned long )((struct intel_dsi_host *)0)) { goto err; } else { } intel_dsi->dsi_hosts[(unsigned int )port] = host; } else { } port = (enum port )((unsigned int )port + 1U); ldv_48977: ; if ((unsigned int )port <= 4U) { goto ldv_48976; } else { } i = 0U; goto ldv_48983; ldv_48982: intel_dsi->panel = (*(intel_dsi_drivers[i].init))(intel_dsi, (int )intel_dsi_drivers[i].panel_id); if ((unsigned long )intel_dsi->panel != (unsigned long )((struct drm_panel *)0)) { goto ldv_48981; } else { } i = i + 1U; ldv_48983: ; if (i == 0U) { goto ldv_48982; } else { } ldv_48981: ; if ((unsigned long )intel_dsi->panel == (unsigned long )((struct drm_panel *)0)) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_dsi_init", "no device found\n"); } else { } goto err; } else { } intel_encoder->type = 9; intel_encoder->cloneable = 0U; drm_connector_init(dev, connector, & intel_dsi_connector_funcs, 16); drm_connector_helper_add(connector, & intel_dsi_connector_helper_funcs); connector->display_info.subpixel_order = 1; connector->interlace_allowed = 0; connector->doublescan_allowed = 0; intel_connector_attach_encoder(intel_connector, intel_encoder); drm_connector_register(connector); drm_panel_attach(intel_dsi->panel, connector); mutex_lock_nested(& dev->mode_config.mutex, 0U); drm_panel_get_modes(intel_dsi->panel); __mptr = (struct list_head const *)connector->probed_modes.next; scan = (struct drm_display_mode *)__mptr; goto ldv_48990; ldv_48989: ; if ((scan->type & 8U) != 0U) { fixed_mode = drm_mode_duplicate(dev, (struct drm_display_mode const *)scan); goto ldv_48988; } else { } __mptr___0 = (struct list_head const *)scan->head.next; scan = (struct drm_display_mode *)__mptr___0; ldv_48990: ; if ((unsigned long )(& scan->head) != (unsigned long )(& connector->probed_modes)) { goto ldv_48989; } else { } ldv_48988: mutex_unlock(& dev->mode_config.mutex); if ((unsigned long )fixed_mode == (unsigned long )((struct drm_display_mode *)0)) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_dsi_init", "no fixed mode\n"); } else { } goto err; } else { } intel_panel_init(& intel_connector->panel, fixed_mode, (struct drm_display_mode *)0); return; err: drm_encoder_cleanup(& intel_encoder->base); kfree((void const *)intel_dsi); kfree((void const *)intel_connector); return; } } int ldv_retval_48 ; extern int ldv_open_46(void) ; extern int ldv_close_46(void) ; extern int ldv_release_46(void) ; extern int ldv_probe_46(void) ; extern int ldv_probe_43(void) ; extern int ldv_probe_45(void) ; void ldv_initialize_mipi_dsi_host_ops_46(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(16UL); intel_dsi_host_ops_group1 = (struct mipi_dsi_host *)tmp; tmp___0 = ldv_init_zalloc(1448UL); intel_dsi_host_ops_group0 = (struct mipi_dsi_device *)tmp___0; return; } } void ldv_initialize_drm_connector_helper_funcs_44(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_dsi_connector_helper_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_initialize_drm_connector_funcs_43(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_dsi_connector_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_main_exported_46(void) { struct mipi_dsi_msg *ldvarg530 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(40UL); ldvarg530 = (struct mipi_dsi_msg *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_46 == 2) { ldv_retval_48 = intel_dsi_host_attach(intel_dsi_host_ops_group1, intel_dsi_host_ops_group0); if (ldv_retval_48 == 0) { ldv_state_variable_46 = 3; } else { } } else { } goto ldv_49018; case 1: ; if (ldv_state_variable_46 == 2) { intel_dsi_host_transfer(intel_dsi_host_ops_group1, (struct mipi_dsi_msg const *)ldvarg530); ldv_state_variable_46 = 2; } else { } if (ldv_state_variable_46 == 1) { intel_dsi_host_transfer(intel_dsi_host_ops_group1, (struct mipi_dsi_msg const *)ldvarg530); ldv_state_variable_46 = 1; } else { } if (ldv_state_variable_46 == 3) { intel_dsi_host_transfer(intel_dsi_host_ops_group1, (struct mipi_dsi_msg const *)ldvarg530); ldv_state_variable_46 = 3; } else { } if (ldv_state_variable_46 == 4) { intel_dsi_host_transfer(intel_dsi_host_ops_group1, (struct mipi_dsi_msg const *)ldvarg530); ldv_state_variable_46 = 4; } else { } goto ldv_49018; case 2: ; if (ldv_state_variable_46 == 3) { intel_dsi_host_detach(intel_dsi_host_ops_group1, intel_dsi_host_ops_group0); ldv_state_variable_46 = 2; } else { } if (ldv_state_variable_46 == 4) { intel_dsi_host_detach(intel_dsi_host_ops_group1, intel_dsi_host_ops_group0); ldv_state_variable_46 = 2; } else { } goto ldv_49018; case 3: ; if (ldv_state_variable_46 == 2) { ldv_release_46(); ldv_state_variable_46 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_49018; case 4: ; if (ldv_state_variable_46 == 3) { ldv_open_46(); ldv_state_variable_46 = 4; } else { } goto ldv_49018; case 5: ; if (ldv_state_variable_46 == 4) { ldv_close_46(); ldv_state_variable_46 = 3; } else { } goto ldv_49018; case 6: ; if (ldv_state_variable_46 == 1) { ldv_probe_46(); ldv_state_variable_46 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_49018; default: ldv_stop(); } ldv_49018: ; return; } } void ldv_main_exported_45(void) { struct drm_encoder *ldvarg389 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(96UL); ldvarg389 = (struct drm_encoder *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_45 == 2) { intel_dsi_encoder_destroy(ldvarg389); ldv_state_variable_45 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_49031; case 1: ; if (ldv_state_variable_45 == 1) { ldv_probe_45(); ldv_state_variable_45 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_49031; default: ldv_stop(); } ldv_49031: ; return; } } void ldv_main_exported_43(void) { int ldvarg243 ; uint64_t *ldvarg236 ; void *tmp ; uint32_t ldvarg239 ; struct drm_connector_state *ldvarg238 ; void *tmp___0 ; bool ldvarg241 ; uint32_t ldvarg240 ; struct drm_property *ldvarg237 ; void *tmp___1 ; struct drm_connector_state *ldvarg242 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(8UL); ldvarg236 = (uint64_t *)tmp; tmp___0 = ldv_init_zalloc(32UL); ldvarg238 = (struct drm_connector_state *)tmp___0; tmp___1 = ldv_init_zalloc(104UL); ldvarg237 = (struct drm_property *)tmp___1; tmp___2 = ldv_init_zalloc(32UL); ldvarg242 = (struct drm_connector_state *)tmp___2; ldv_memset((void *)(& ldvarg243), 0, 4UL); ldv_memset((void *)(& ldvarg239), 0, 4UL); ldv_memset((void *)(& ldvarg241), 0, 1UL); ldv_memset((void *)(& ldvarg240), 0, 4UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_43 == 2) { intel_connector_dpms(intel_dsi_connector_funcs_group0, ldvarg243); ldv_state_variable_43 = 2; } else { } if (ldv_state_variable_43 == 1) { intel_connector_dpms(intel_dsi_connector_funcs_group0, ldvarg243); ldv_state_variable_43 = 1; } else { } goto ldv_49046; case 1: ; if (ldv_state_variable_43 == 2) { drm_atomic_helper_connector_destroy_state(intel_dsi_connector_funcs_group0, ldvarg242); ldv_state_variable_43 = 2; } else { } if (ldv_state_variable_43 == 1) { drm_atomic_helper_connector_destroy_state(intel_dsi_connector_funcs_group0, ldvarg242); ldv_state_variable_43 = 1; } else { } goto ldv_49046; case 2: ; if (ldv_state_variable_43 == 2) { drm_atomic_helper_connector_duplicate_state(intel_dsi_connector_funcs_group0); ldv_state_variable_43 = 2; } else { } if (ldv_state_variable_43 == 1) { drm_atomic_helper_connector_duplicate_state(intel_dsi_connector_funcs_group0); ldv_state_variable_43 = 1; } else { } goto ldv_49046; case 3: ; if (ldv_state_variable_43 == 2) { intel_dsi_detect(intel_dsi_connector_funcs_group0, (int )ldvarg241); ldv_state_variable_43 = 2; } else { } if (ldv_state_variable_43 == 1) { intel_dsi_detect(intel_dsi_connector_funcs_group0, (int )ldvarg241); ldv_state_variable_43 = 1; } else { } goto ldv_49046; case 4: ; if (ldv_state_variable_43 == 2) { intel_dsi_connector_destroy(intel_dsi_connector_funcs_group0); ldv_state_variable_43 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_49046; case 5: ; if (ldv_state_variable_43 == 2) { drm_helper_probe_single_connector_modes(intel_dsi_connector_funcs_group0, ldvarg240, ldvarg239); ldv_state_variable_43 = 2; } else { } if (ldv_state_variable_43 == 1) { drm_helper_probe_single_connector_modes(intel_dsi_connector_funcs_group0, ldvarg240, ldvarg239); ldv_state_variable_43 = 1; } else { } goto ldv_49046; case 6: ; if (ldv_state_variable_43 == 2) { intel_connector_atomic_get_property(intel_dsi_connector_funcs_group0, (struct drm_connector_state const *)ldvarg238, ldvarg237, ldvarg236); ldv_state_variable_43 = 2; } else { } if (ldv_state_variable_43 == 1) { intel_connector_atomic_get_property(intel_dsi_connector_funcs_group0, (struct drm_connector_state const *)ldvarg238, ldvarg237, ldvarg236); ldv_state_variable_43 = 1; } else { } goto ldv_49046; case 7: ; if (ldv_state_variable_43 == 1) { ldv_probe_43(); ldv_state_variable_43 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_49046; default: ldv_stop(); } ldv_49046: ; return; } } void ldv_main_exported_44(void) { struct drm_display_mode *ldvarg24 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(208UL); ldvarg24 = (struct drm_display_mode *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_44 == 1) { intel_dsi_get_modes(intel_dsi_connector_helper_funcs_group0); ldv_state_variable_44 = 1; } else { } goto ldv_49060; case 1: ; if (ldv_state_variable_44 == 1) { intel_dsi_mode_valid(intel_dsi_connector_helper_funcs_group0, ldvarg24); ldv_state_variable_44 = 1; } else { } goto ldv_49060; case 2: ; if (ldv_state_variable_44 == 1) { intel_best_encoder(intel_dsi_connector_helper_funcs_group0); ldv_state_variable_44 = 1; } else { } goto ldv_49060; default: ldv_stop(); } ldv_49060: ; return; } } bool ldv_queue_work_on_909(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_910(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_911(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_912(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_913(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___24(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } bool ldv_queue_work_on_923(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_925(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_924(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_927(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_926(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool drm_can_sleep___15(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_40002; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40002; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40002; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_40002; default: __bad_percpu_size(); } ldv_40002: pscr_ret__ = pfo_ret__; goto ldv_40008; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40012; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40012; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40012; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_40012; default: __bad_percpu_size(); } ldv_40012: pscr_ret__ = pfo_ret_____0; goto ldv_40008; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40021; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40021; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40021; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_40021; default: __bad_percpu_size(); } ldv_40021: pscr_ret__ = pfo_ret_____1; goto ldv_40008; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40030; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40030; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40030; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_40030; default: __bad_percpu_size(); } ldv_40030: pscr_ret__ = pfo_ret_____2; goto ldv_40008; default: __bad_size_call_parameter(); goto ldv_40008; } ldv_40008: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___24(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } static u32 const lfsr_converts[31U] = { 426U, 469U, 234U, 373U, 442U, 221U, 110U, 311U, 411U, 461U, 486U, 243U, 377U, 188U, 350U, 175U, 343U, 427U, 213U, 106U, 53U, 282U, 397U, 354U, 227U, 113U, 56U, 284U, 142U, 71U, 35U}; static u32 dsi_clk_from_pclk(u32 pclk , int pixel_format , int lane_count ) { u32 dsi_clk_khz ; u32 bpp ; u32 __x ; int __d ; { switch (pixel_format) { default: ; case 512: ; case 384: bpp = 24U; goto ldv_48199; case 256: bpp = 18U; goto ldv_48199; case 128: bpp = 16U; goto ldv_48199; } ldv_48199: __x = pclk * bpp; __d = lane_count; dsi_clk_khz = ((u32 )(__d / 2) + __x) / (u32 )__d; return (dsi_clk_khz); } } static int dsi_calc_mnp(int target_dsi_clk , struct dsi_mnp *dsi_mnp ) { unsigned int calc_m ; unsigned int calc_p ; unsigned int m ; unsigned int n ; unsigned int p ; int ref_clk ; int delta ; u32 m_seed ; int calc_dsi_clk ; int d ; long ret ; int __x___0 ; int tmp ; { calc_m = 0U; calc_p = 0U; n = 1U; ref_clk = 25000; delta = target_dsi_clk; if (target_dsi_clk <= 299999 || target_dsi_clk > 1150000) { drm_err("DSI CLK Out of Range\n"); return (-44); } else { } m = 62U; goto ldv_48227; ldv_48226: p = 2U; goto ldv_48224; ldv_48223: calc_dsi_clk = (int )((m * (unsigned int )ref_clk) / (p * n)); __x___0 = target_dsi_clk - calc_dsi_clk; ret = (long )(__x___0 < 0 ? - __x___0 : __x___0); d = (int )ret; if (d < delta) { delta = d; calc_m = m; calc_p = p; } else { } p = p + 1U; ldv_48224: ; if (p <= 6U && delta != 0) { goto ldv_48223; } else { } m = m + 1U; ldv_48227: ; if (m <= 92U && delta != 0) { goto ldv_48226; } else { } tmp = ffs((int )n); n = (unsigned int )(tmp + -1); m_seed = lfsr_converts[calc_m - 62U]; dsi_mnp->dsi_pll_ctrl = (u32 )(1 << (int )(calc_p + 15U)); dsi_mnp->dsi_pll_div = (n << 16) | m_seed; return (0); } } static void vlv_configure_dsi_pll(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; int ret ; struct dsi_mnp dsi_mnp ; u32 dsi_clk ; long tmp___0 ; long tmp___1 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, (int )intel_dsi->pixel_format, (int )intel_dsi->lane_count); ret = dsi_calc_mnp((int )dsi_clk, & dsi_mnp); if (ret != 0) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("vlv_configure_dsi_pll", "dsi_calc_mnp failed\n"); } else { } return; } else { } if ((int )intel_dsi->ports & 1) { dsi_mnp.dsi_pll_ctrl = dsi_mnp.dsi_pll_ctrl | 256U; } else { } if (((int )intel_dsi->ports & 4) != 0) { dsi_mnp.dsi_pll_ctrl = dsi_mnp.dsi_pll_ctrl | 128U; } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("vlv_configure_dsi_pll", "dsi pll div %08x, ctrl %08x\n", dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl); } else { } vlv_cck_write(dev_priv, 72U, 0U); vlv_cck_write(dev_priv, 76U, dsi_mnp.dsi_pll_div); vlv_cck_write(dev_priv, 72U, dsi_mnp.dsi_pll_ctrl); return; } } void vlv_enable_dsi_pll(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; u32 tmp ; long tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; u32 tmp___2 ; bool tmp___3 ; u32 tmp___4 ; long tmp___5 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("vlv_enable_dsi_pll", "\n"); } else { } mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_configure_dsi_pll(encoder); usleep_range(1UL, 10UL); tmp = vlv_cck_read(dev_priv, 72U); tmp = tmp | 2147483648U; vlv_cck_write(dev_priv, 72U, tmp); tmp___1 = msecs_to_jiffies(20U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48254; ldv_48253: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = vlv_cck_read(dev_priv, 72U); if ((tmp___2 & 1U) == 0U) { ret__ = -110; } else { } goto ldv_48252; } else { } tmp___3 = drm_can_sleep___15(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48254: tmp___4 = vlv_cck_read(dev_priv, 72U); if ((tmp___4 & 1U) == 0U) { goto ldv_48253; } else { } ldv_48252: ; if (ret__ != 0) { mutex_unlock(& dev_priv->sb_lock); drm_err("DSI PLL lock failed\n"); return; } else { } mutex_unlock(& dev_priv->sb_lock); tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("vlv_enable_dsi_pll", "DSI PLL locked\n"); } else { } return; } } void vlv_disable_dsi_pll(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; u32 tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("vlv_disable_dsi_pll", "\n"); } else { } mutex_lock_nested(& dev_priv->sb_lock, 0U); tmp = vlv_cck_read(dev_priv, 72U); tmp = tmp & 2147483647U; tmp = tmp | 1073741824U; vlv_cck_write(dev_priv, 72U, tmp); mutex_unlock(& dev_priv->sb_lock); return; } } static void assert_bpp_mismatch(int pixel_format , int pipe_bpp ) { int bpp ; int __ret_warn_on ; long tmp ; { switch (pixel_format) { default: ; case 512: ; case 384: bpp = 24; goto ldv_48270; case 256: bpp = 18; goto ldv_48270; case 128: bpp = 16; goto ldv_48270; } ldv_48270: __ret_warn_on = bpp != pipe_bpp; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_dsi_pll.c", 307, "bpp match assertion failure (expected %d, current %d)\n", bpp, pipe_bpp); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } u32 vlv_get_dsi_pclk(struct intel_encoder *encoder , int pipe_bpp ) { struct drm_i915_private *dev_priv ; struct intel_dsi *intel_dsi ; struct intel_dsi *tmp ; u32 dsi_clock ; u32 pclk ; u32 pll_ctl ; u32 pll_div ; u32 m ; u32 p ; u32 n ; int refclk ; int i ; long tmp___0 ; u32 __x ; int __d ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = enc_to_intel_dsi(& encoder->base); intel_dsi = tmp; m = 0U; p = 0U; refclk = 25000; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("vlv_get_dsi_pclk", "\n"); } else { } mutex_lock_nested(& dev_priv->sb_lock, 0U); pll_ctl = vlv_cck_read(dev_priv, 72U); pll_div = vlv_cck_read(dev_priv, 76U); mutex_unlock(& dev_priv->sb_lock); pll_ctl = pll_ctl & 66977792U; pll_ctl = pll_ctl >> 15; n = (pll_div & 196608U) >> 16; n = (u32 )(1 << (int )n); pll_div = pll_div & 511U; pll_div = pll_div; goto ldv_48292; ldv_48291: pll_ctl = pll_ctl >> 1; p = p + 1U; ldv_48292: ; if (pll_ctl != 0U) { goto ldv_48291; } else { } p = p - 1U; if (p == 0U) { drm_err("wrong P1 divisor\n"); return (0U); } else { } i = 0; goto ldv_48298; ldv_48297: ; if ((unsigned int )lfsr_converts[i] == pll_div) { goto ldv_48296; } else { } i = i + 1; ldv_48298: ; if ((unsigned int )i <= 30U) { goto ldv_48297; } else { } ldv_48296: ; if (i == 31) { drm_err("wrong m_seed programmed\n"); return (0U); } else { } m = (u32 )(i + 62); dsi_clock = (m * (u32 )refclk) / (p * n); assert_bpp_mismatch((int )intel_dsi->pixel_format, pipe_bpp); __x = intel_dsi->lane_count * dsi_clock; __d = pipe_bpp; pclk = ((u32 )(__d / 2) + __x) / (u32 )__d; return (pclk); } } bool ldv_queue_work_on_923(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_924(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_925(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_926(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_927(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_937(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_939(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_938(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_941(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_940(struct workqueue_struct *ldv_func_arg1 ) ; extern void *devm_kmalloc(struct device * , size_t , gfp_t ) ; __inline static void *devm_kzalloc(struct device *dev , size_t size , gfp_t gfp ) { void *tmp ; { tmp = devm_kmalloc(dev, size, gfp | 32768U); return (tmp); } } extern void drm_panel_init(struct drm_panel * ) ; extern int drm_panel_add(struct drm_panel * ) ; extern int mipi_dsi_attach(struct mipi_dsi_device * ) ; extern ssize_t mipi_dsi_generic_write(struct mipi_dsi_device * , void const * , size_t ) ; extern ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device * , void const * , size_t ) ; __inline static struct vbt_panel *to_vbt_panel(struct drm_panel *panel ) { struct drm_panel const *__mptr ; { __mptr = (struct drm_panel const *)panel; return ((struct vbt_panel *)__mptr); } } static struct gpio_table gtable[12U] = { {16688U, 16696U, 0U}, {16672U, 16680U, 0U}, {16656U, 16664U, 0U}, {16704U, 16712U, 0U}, {16720U, 16728U, 0U}, {16736U, 16744U, 0U}, {16768U, 16776U, 0U}, {16784U, 16792U, 0U}, {16752U, 16760U, 0U}, {16640U, 16648U, 0U}, {16608U, 16616U, 0U}, {16624U, 16632U, 0U}}; __inline static enum port intel_dsi_seq_port_to_port(u8 port ) { { return ((unsigned int )port != 0U ? 2 : 0); } } static u8 const *mipi_exec_send_packet(struct intel_dsi *intel_dsi , u8 const *data ) { struct mipi_dsi_device *dsi_device ; u8 type ; u8 flags ; u8 seq_port ; u16 len ; enum port port ; u8 const *tmp ; u8 const *tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp = data; data = data + 1; flags = *tmp; tmp___0 = data; data = data + 1; type = *tmp___0; len = *((u16 *)data); data = data + 2UL; seq_port = (unsigned int )((u8 )((int )flags >> 3)) & 3U; if ((unsigned int )intel_dsi->ports == 4U) { port = 2; } else { port = intel_dsi_seq_port_to_port((int )seq_port); } dsi_device = (intel_dsi->dsi_hosts[(unsigned int )port])->device; if ((unsigned long )dsi_device == (unsigned long )((struct mipi_dsi_device *)0)) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("mipi_exec_send_packet", "no dsi device for port %c\n", (unsigned int )port + 65U); } else { } goto out; } else { } if ((int )flags & 1) { dsi_device->mode_flags = dsi_device->mode_flags & 0xfffffffffffff7ffUL; } else { dsi_device->mode_flags = dsi_device->mode_flags | 2048UL; } dsi_device->channel = (unsigned int )((int )flags >> 1) & 3U; switch ((int )type) { case 3: mipi_dsi_generic_write(dsi_device, (void const *)0, 0UL); goto ldv_48610; case 19: mipi_dsi_generic_write(dsi_device, (void const *)data, 1UL); goto ldv_48610; case 35: mipi_dsi_generic_write(dsi_device, (void const *)data, 2UL); goto ldv_48610; case 4: ; case 20: ; case 36: tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("mipi_exec_send_packet", "Generic Read not yet implemented or used\n"); } else { } goto ldv_48610; case 41: mipi_dsi_generic_write(dsi_device, (void const *)data, (size_t )len); goto ldv_48610; case 5: mipi_dsi_dcs_write_buffer(dsi_device, (void const *)data, 1UL); goto ldv_48610; case 21: mipi_dsi_dcs_write_buffer(dsi_device, (void const *)data, 2UL); goto ldv_48610; case 6: tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("mipi_exec_send_packet", "DCS Read not yet implemented or used\n"); } else { } goto ldv_48610; case 57: mipi_dsi_dcs_write_buffer(dsi_device, (void const *)data, (size_t )len); goto ldv_48610; } ldv_48610: ; out: data = data + (unsigned long )len; return (data); } } static u8 const *mipi_exec_delay(struct intel_dsi *intel_dsi , u8 const *data ) { u32 delay ; { delay = *((u32 const *)data); usleep_range((unsigned long )delay, (unsigned long )(delay + 10U)); data = data + 4UL; return (data); } } static u8 const *mipi_exec_gpio(struct intel_dsi *intel_dsi , u8 const *data ) { u8 gpio ; u8 action ; u16 function ; u16 pad ; u32 val ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u8 const *tmp ; u8 const *tmp___0 ; { dev = intel_dsi->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = data; data = data + 1; gpio = *tmp; tmp___0 = data; data = data + 1; action = *tmp___0; function = gtable[(int )gpio].function_reg; pad = gtable[(int )gpio].pad_reg; mutex_lock_nested(& dev_priv->sb_lock, 0U); if ((unsigned int )gtable[(int )gpio].init == 0U) { vlv_gpio_nc_write(dev_priv, (u32 )function, 536923136U); gtable[(int )gpio].init = 1U; } else { } val = (unsigned int )action | 4U; vlv_gpio_nc_write(dev_priv, (u32 )pad, val); mutex_unlock(& dev_priv->sb_lock); return (data); } } static fn_mipi_elem_exec const exec_elem[5U] = { (u8 const *(*)(struct intel_dsi * , u8 const * ))0, & mipi_exec_send_packet, & mipi_exec_delay, & mipi_exec_gpio, (u8 const *(*)(struct intel_dsi * , u8 const * ))0}; static char const * const seq_name[6U] = { "UNDEFINED", "MIPI_SEQ_ASSERT_RESET", "MIPI_SEQ_INIT_OTP", "MIPI_SEQ_DISPLAY_ON", "MIPI_SEQ_DISPLAY_OFF", "MIPI_SEQ_DEASSERT_RESET"}; static void generic_exec_sequence(struct intel_dsi *intel_dsi , u8 const *data ) { u8 const *(*mipi_elem_exec)(struct intel_dsi * , u8 const * ) ; int index ; long tmp ; { if ((unsigned long )data == (unsigned long )((u8 const *)0U)) { return; } else { } tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("generic_exec_sequence", "Starting MIPI sequence - %s\n", seq_name[(int )*data]); } else { } data = data + 1; ldv_48650: index = (int )*data; mipi_elem_exec = exec_elem[index]; if ((unsigned long )mipi_elem_exec == (unsigned long )((u8 const *(*/* const */)(struct intel_dsi * , u8 const * ))0)) { drm_err("Unsupported MIPI element, skipping sequence execution\n"); return; } else { } data = data + 1; data = (*mipi_elem_exec)(intel_dsi, data); if ((unsigned int )((unsigned char )*data) == 0U) { goto ldv_48649; } else { } goto ldv_48650; ldv_48649: ; return; } } static int vbt_panel_prepare(struct drm_panel *panel ) { struct vbt_panel *vbt_panel ; struct vbt_panel *tmp ; struct intel_dsi *intel_dsi ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u8 const *sequence ; { tmp = to_vbt_panel(panel); vbt_panel = tmp; intel_dsi = vbt_panel->intel_dsi; dev = intel_dsi->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; sequence = (u8 const *)dev_priv->vbt.dsi.sequence[1]; generic_exec_sequence(intel_dsi, sequence); sequence = (u8 const *)dev_priv->vbt.dsi.sequence[2]; generic_exec_sequence(intel_dsi, sequence); return (0); } } static int vbt_panel_unprepare(struct drm_panel *panel ) { struct vbt_panel *vbt_panel ; struct vbt_panel *tmp ; struct intel_dsi *intel_dsi ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u8 const *sequence ; { tmp = to_vbt_panel(panel); vbt_panel = tmp; intel_dsi = vbt_panel->intel_dsi; dev = intel_dsi->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; sequence = (u8 const *)dev_priv->vbt.dsi.sequence[5]; generic_exec_sequence(intel_dsi, sequence); return (0); } } static int vbt_panel_enable(struct drm_panel *panel ) { struct vbt_panel *vbt_panel ; struct vbt_panel *tmp ; struct intel_dsi *intel_dsi ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u8 const *sequence ; { tmp = to_vbt_panel(panel); vbt_panel = tmp; intel_dsi = vbt_panel->intel_dsi; dev = intel_dsi->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; sequence = (u8 const *)dev_priv->vbt.dsi.sequence[3]; generic_exec_sequence(intel_dsi, sequence); return (0); } } static int vbt_panel_disable(struct drm_panel *panel ) { struct vbt_panel *vbt_panel ; struct vbt_panel *tmp ; struct intel_dsi *intel_dsi ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u8 const *sequence ; { tmp = to_vbt_panel(panel); vbt_panel = tmp; intel_dsi = vbt_panel->intel_dsi; dev = intel_dsi->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; sequence = (u8 const *)dev_priv->vbt.dsi.sequence[4]; generic_exec_sequence(intel_dsi, sequence); return (0); } } static int vbt_panel_get_modes(struct drm_panel *panel ) { struct vbt_panel *vbt_panel ; struct vbt_panel *tmp ; struct intel_dsi *intel_dsi ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_display_mode *mode ; { tmp = to_vbt_panel(panel); vbt_panel = tmp; intel_dsi = vbt_panel->intel_dsi; dev = intel_dsi->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((unsigned long )panel->connector == (unsigned long )((struct drm_connector *)0)) { return (0); } else { } mode = drm_mode_duplicate(dev, (struct drm_display_mode const *)dev_priv->vbt.lfp_lvds_vbt_mode); if ((unsigned long )mode == (unsigned long )((struct drm_display_mode *)0)) { return (0); } else { } mode->type = mode->type | 8U; drm_mode_probed_add(panel->connector, mode); return (1); } } static struct drm_panel_funcs const vbt_panel_funcs = {& vbt_panel_disable, & vbt_panel_unprepare, & vbt_panel_prepare, & vbt_panel_enable, & vbt_panel_get_modes, 0}; struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi , u16 panel_id ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct mipi_config *mipi_config ; struct mipi_pps_data *pps ; struct drm_display_mode *mode ; struct vbt_panel *vbt_panel ; u32 bits_per_pixel ; u32 tlpx_ns ; u32 extra_byte_count ; u32 bitrate ; u32 tlpx_ui ; u32 ui_num ; u32 ui_den ; u32 prepare_cnt ; u32 exit_zero_cnt ; u32 clk_zero_cnt ; u32 trail_cnt ; u32 ths_prepare_ns ; u32 tclk_trail_ns ; u32 tclk_prepare_clkzero ; u32 ths_prepare_hszero ; u32 lp_to_hs_switch ; u32 hs_to_lp_switch ; u32 pclk ; u32 computed_ddr ; u16 burst_mode_ratio ; enum port port ; long tmp ; u8 _max1 ; u8 _max2 ; u8 _max1___0 ; u8 _max2___0 ; long tmp___0 ; u32 _max1___1 ; u32 _max2___1 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; long tmp___11 ; long tmp___12 ; long tmp___13 ; long tmp___14 ; long tmp___15 ; long tmp___16 ; long tmp___17 ; void *tmp___18 ; { dev = intel_dsi->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; mipi_config = dev_priv->vbt.dsi.config; pps = dev_priv->vbt.dsi.pps; mode = dev_priv->vbt.lfp_lvds_vbt_mode; bits_per_pixel = 24U; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("vbt_panel_init", "\n"); } else { } intel_dsi->eotp_pkt = (unsigned int )*((unsigned char *)mipi_config + 24UL) == 0U; intel_dsi->clock_stop = (unsigned int )*((unsigned char *)mipi_config + 24UL) != 0U; intel_dsi->lane_count = (unsigned int )((int )mipi_config->lane_cnt + 1); intel_dsi->pixel_format = (u32 )((int )mipi_config->videomode_color_format << 7); intel_dsi->dual_link = mipi_config->dual_link; intel_dsi->pixel_overlap = mipi_config->pixel_overlap; if (intel_dsi->pixel_format == 256U) { bits_per_pixel = 18U; } else if (intel_dsi->pixel_format == 128U) { bits_per_pixel = 16U; } else { } intel_dsi->operation_mode = (u16 )mipi_config->is_cmd_mode; intel_dsi->video_mode_format = (u32 )mipi_config->video_transfer_mode; intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; intel_dsi->lp_rx_timeout = (u16 )mipi_config->lp_rx_timeout; intel_dsi->turn_arnd_val = (u16 )mipi_config->turn_around_timeout; intel_dsi->rst_timer_val = (u16 )mipi_config->device_reset_timer; intel_dsi->init_count = (u16 )mipi_config->master_init_timer; intel_dsi->bw_timer = mipi_config->dbi_bw_timer; intel_dsi->video_frmt_cfg_bits = (unsigned int )*((unsigned char *)mipi_config + 4UL) != 0U ? 8U : 0U; pclk = (u32 )mode->clock; if ((unsigned int )intel_dsi->dual_link != 0U) { pclk = pclk / 2U; if ((unsigned int )intel_dsi->dual_link == 1U) { pclk = (u32 )(((mode->vtotal * (int )intel_dsi->pixel_overlap) * 60 + 999) / 1000) + pclk; } else { } } else { } if (intel_dsi->video_mode_format == 3U) { if (mipi_config->target_burst_mode_freq != 0U) { computed_ddr = (pclk * bits_per_pixel) / intel_dsi->lane_count; if (mipi_config->target_burst_mode_freq < computed_ddr) { drm_err("Burst mode freq is less than computed\n"); return ((struct drm_panel *)0); } else { } burst_mode_ratio = (u16 )(((mipi_config->target_burst_mode_freq * 100U + computed_ddr) - 1U) / computed_ddr); pclk = ((u32 )burst_mode_ratio * pclk + 99U) / 100U; } else { drm_err("Burst mode target is not set\n"); return ((struct drm_panel *)0); } } else { burst_mode_ratio = 100U; } intel_dsi->burst_mode_ratio = burst_mode_ratio; intel_dsi->pclk = pclk; bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count; switch ((int )intel_dsi->escape_clk_div) { case 0: tlpx_ns = 50U; goto ldv_48725; case 1: tlpx_ns = 100U; goto ldv_48725; case 2: tlpx_ns = 200U; goto ldv_48725; default: tlpx_ns = 50U; goto ldv_48725; } ldv_48725: ; switch (intel_dsi->lane_count) { case 1U: ; case 2U: extra_byte_count = 2U; goto ldv_48731; case 3U: extra_byte_count = 4U; goto ldv_48731; case 4U: ; default: extra_byte_count = 3U; goto ldv_48731; } ldv_48731: ui_num = 1000000U; ui_den = bitrate; tclk_prepare_clkzero = (u32 )mipi_config->tclk_prepare_clkzero; ths_prepare_hszero = (u32 )mipi_config->ths_prepare_hszero; intel_dsi->lp_byte_clk = (u16 )(((tlpx_ns * ui_den + ui_num * 8U) - 1U) / (ui_num * 8U)); _max1 = mipi_config->ths_prepare; _max2 = mipi_config->tclk_prepare; ths_prepare_ns = (u32 )((int )_max1 > (int )_max2 ? _max1 : _max2); prepare_cnt = ((ths_prepare_ns * ui_den + ui_num * 2U) - 1U) / (ui_num * 2U); exit_zero_cnt = (((ths_prepare_hszero - ths_prepare_ns) * ui_den + ui_num * 2U) - 1U) / (ui_num * 2U); if ((ui_den * 55U) / ui_num > exit_zero_cnt) { if ((ui_den * 55U) % ui_num != 0U) { exit_zero_cnt = exit_zero_cnt + 1U; } else { } } else { } clk_zero_cnt = (((tclk_prepare_clkzero - ths_prepare_ns) * ui_den + ui_num * 2U) - 1U) / (ui_num * 2U); _max1___0 = mipi_config->tclk_trail; _max2___0 = mipi_config->ths_trail; tclk_trail_ns = (u32 )((int )_max1___0 > (int )_max2___0 ? _max1___0 : _max2___0); trail_cnt = ((tclk_trail_ns * ui_den + ui_num * 2U) - 1U) / (ui_num * 2U); if (((prepare_cnt > 63U || exit_zero_cnt > 63U) || clk_zero_cnt > 255U) || trail_cnt > 31U) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Values crossing maximum limits, restricting to max values\n"); } else { } } else { } if (prepare_cnt > 63U) { prepare_cnt = 63U; } else { } if (exit_zero_cnt > 63U) { exit_zero_cnt = 63U; } else { } if (clk_zero_cnt > 255U) { clk_zero_cnt = 255U; } else { } if (trail_cnt > 31U) { trail_cnt = 31U; } else { } intel_dsi->dphy_reg = (((exit_zero_cnt << 24) | (trail_cnt << 16)) | (clk_zero_cnt << 8)) | prepare_cnt; tlpx_ui = ((tlpx_ns * ui_den + ui_num) - 1U) / ui_num; lp_to_hs_switch = ((((tlpx_ui * 2U + prepare_cnt) + exit_zero_cnt) + 9U) * 2U - 1U) / 8U; hs_to_lp_switch = (((u32 )mipi_config->ths_trail + tlpx_ui * 2U) + 7U) / 8U; _max1___1 = lp_to_hs_switch; _max2___1 = hs_to_lp_switch; intel_dsi->hs_to_lp_count = (u16 )(_max1___1 > _max2___1 ? _max1___1 : _max2___1); intel_dsi->hs_to_lp_count = (int )intel_dsi->hs_to_lp_count + (int )((u16 )extra_byte_count); intel_dsi->clk_lp_to_hs_count = (u16 )(((((tlpx_ui * 2U + prepare_cnt) + clk_zero_cnt) + 4U) * 2U - 1U) / 8U); intel_dsi->clk_lp_to_hs_count = (int )intel_dsi->clk_lp_to_hs_count + (int )((u16 )extra_byte_count); intel_dsi->clk_hs_to_lp_count = (u16 )((((tlpx_ui + trail_cnt) + 8U) * 2U - 1U) / 8U); intel_dsi->clk_hs_to_lp_count = (int )intel_dsi->clk_hs_to_lp_count + (int )((u16 )extra_byte_count); tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Eot %s\n", (unsigned int )intel_dsi->eotp_pkt != 0U ? (char *)"enabled" : (char *)"disabled"); } else { } tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Clockstop %s\n", (unsigned int )intel_dsi->clock_stop != 0U ? (char *)"disabled" : (char *)"enabled"); } else { } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Mode %s\n", (unsigned int )intel_dsi->operation_mode != 0U ? (char *)"command" : (char *)"video"); } else { } if ((unsigned int )intel_dsi->dual_link == 1U) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); } else { } } else if ((unsigned int )intel_dsi->dual_link == 2U) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); } else { } } else { tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Dual link: NONE\n"); } else { } } tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Pixel Format %d\n", intel_dsi->pixel_format); } else { } tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("vbt_panel_init", "TLPX %d\n", (int )intel_dsi->escape_clk_div); } else { } tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("vbt_panel_init", "LP RX Timeout 0x%x\n", (int )intel_dsi->lp_rx_timeout); } else { } tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Turnaround Timeout 0x%x\n", (int )intel_dsi->turn_arnd_val); } else { } tmp___11 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___11 != 0L) { drm_ut_debug_printk("vbt_panel_init", "Init Count 0x%x\n", (int )intel_dsi->init_count); } else { } tmp___12 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___12 != 0L) { drm_ut_debug_printk("vbt_panel_init", "HS to LP Count 0x%x\n", (int )intel_dsi->hs_to_lp_count); } else { } tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("vbt_panel_init", "LP Byte Clock %d\n", (int )intel_dsi->lp_byte_clk); } else { } tmp___14 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___14 != 0L) { drm_ut_debug_printk("vbt_panel_init", "DBI BW Timer 0x%x\n", intel_dsi->bw_timer); } else { } tmp___15 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___15 != 0L) { drm_ut_debug_printk("vbt_panel_init", "LP to HS Clock Count 0x%x\n", (int )intel_dsi->clk_lp_to_hs_count); } else { } tmp___16 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___16 != 0L) { drm_ut_debug_printk("vbt_panel_init", "HS to LP Clock Count 0x%x\n", (int )intel_dsi->clk_hs_to_lp_count); } else { } tmp___17 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___17 != 0L) { drm_ut_debug_printk("vbt_panel_init", "BTA %s\n", (intel_dsi->video_frmt_cfg_bits & 8U) != 0U ? (char *)"disabled" : (char *)"enabled"); } else { } intel_dsi->backlight_off_delay = (u16 )((unsigned int )pps->bl_disable_delay / 10U); intel_dsi->backlight_on_delay = (u16 )((unsigned int )pps->bl_enable_delay / 10U); intel_dsi->panel_on_delay = (u16 )((unsigned int )pps->panel_on_delay / 10U); intel_dsi->panel_off_delay = (u16 )((unsigned int )pps->panel_off_delay / 10U); intel_dsi->panel_pwr_cycle_delay = (u16 )((unsigned int )pps->panel_power_cycle_delay / 10U); tmp___18 = devm_kzalloc(dev->dev, 56UL, 208U); vbt_panel = (struct vbt_panel *)tmp___18; vbt_panel->intel_dsi = intel_dsi; drm_panel_init(& vbt_panel->panel); vbt_panel->panel.funcs = & vbt_panel_funcs; drm_panel_add(& vbt_panel->panel); port = 0; goto ldv_48745; ldv_48744: ; if (((int )intel_dsi->ports >> (int )port) & 1) { mipi_dsi_attach((intel_dsi->dsi_hosts[(unsigned int )port])->device); } else { } port = (enum port )((unsigned int )port + 1U); ldv_48745: ; if ((unsigned int )port <= 4U) { goto ldv_48744; } else { } return (& vbt_panel->panel); } } void ldv_initialize_drm_panel_funcs_42(void) { void *tmp ; { tmp = ldv_init_zalloc(48UL); vbt_panel_funcs_group0 = (struct drm_panel *)tmp; return; } } void ldv_main_exported_42(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_42 == 1) { vbt_panel_get_modes(vbt_panel_funcs_group0); ldv_state_variable_42 = 1; } else { } goto ldv_48754; case 1: ; if (ldv_state_variable_42 == 1) { vbt_panel_enable(vbt_panel_funcs_group0); ldv_state_variable_42 = 1; } else { } goto ldv_48754; case 2: ; if (ldv_state_variable_42 == 1) { vbt_panel_prepare(vbt_panel_funcs_group0); ldv_state_variable_42 = 1; } else { } goto ldv_48754; case 3: ; if (ldv_state_variable_42 == 1) { vbt_panel_disable(vbt_panel_funcs_group0); ldv_state_variable_42 = 1; } else { } goto ldv_48754; case 4: ; if (ldv_state_variable_42 == 1) { vbt_panel_unprepare(vbt_panel_funcs_group0); ldv_state_variable_42 = 1; } else { } goto ldv_48754; default: ldv_stop(); } ldv_48754: ; return; } } bool ldv_queue_work_on_937(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_938(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_939(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_940(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_941(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_951(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_953(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_952(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_955(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_954(struct workqueue_struct *ldv_func_arg1 ) ; static struct intel_dvo_device const intel_dvo_devices[7U] = { {"sil164", 2, 397664U, 0U, 56, (struct intel_dvo_dev_ops const *)(& sil164_ops), 0, 0}, {"ch7xxx", 2, 397664U, 0U, 118, (struct intel_dvo_dev_ops const *)(& ch7xxx_ops), 0, 0}, {"ch7xxx", 2, 397664U, 0U, 117, (struct intel_dvo_dev_ops const *)(& ch7xxx_ops), 0, 0}, {"ivch", 1, 397600U, 0U, 2, (struct intel_dvo_dev_ops const *)(& ivch_ops), 0, 0}, {"tfp410", 2, 397664U, 0U, 56, (struct intel_dvo_dev_ops const *)(& tfp410_ops), 0, 0}, {"ch7017", 1, 397664U, 5U, 117, (struct intel_dvo_dev_ops const *)(& ch7017_ops), 0, 0}, {"ns2501", 2, 397632U, 0U, 56, (struct intel_dvo_dev_ops const *)(& ns2501_ops), 0, 0}}; static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder ) { struct intel_encoder const *__mptr ; { __mptr = (struct intel_encoder const *)encoder; return ((struct intel_dvo *)__mptr); } } static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector ) { struct intel_encoder *tmp ; struct intel_dvo *tmp___0 ; { tmp = intel_attached_encoder(connector); tmp___0 = enc_to_dvo(tmp); return (tmp___0); } } static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; u32 tmp___0 ; bool tmp___1 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_attached_dvo(& connector->base); intel_dvo = tmp; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dvo->dev.dvo_reg, 1); if ((int )tmp___0 >= 0) { return (0); } else { } tmp___1 = (*((intel_dvo->dev.dev_ops)->get_hw_state))(& intel_dvo->dev); return (tmp___1); } } static bool intel_dvo_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; u32 tmp___0 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = enc_to_dvo(encoder); intel_dvo = tmp; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dvo->dev.dvo_reg, 1); if ((int )tmp___0 >= 0) { return (0); } else { } *pipe = (enum pipe )((tmp___0 & 1073741824U) >> 30); return (1); } } static void intel_dvo_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_i915_private *dev_priv ; struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; u32 tmp___0 ; u32 flags ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = enc_to_dvo(encoder); intel_dvo = tmp; flags = 0U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dvo->dev.dvo_reg, 1); if ((tmp___0 & 8U) != 0U) { flags = flags | 1U; } else { flags = flags | 2U; } if ((tmp___0 & 16U) != 0U) { flags = flags | 4U; } else { flags = flags | 8U; } pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | flags; pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; return; } } static void intel_disable_dvo(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; u32 dvo_reg ; u32 temp ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = enc_to_dvo(encoder); intel_dvo = tmp; dvo_reg = intel_dvo->dev.dvo_reg; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dvo_reg, 1); temp = tmp___0; (*((intel_dvo->dev.dev_ops)->dpms))(& intel_dvo->dev, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )dvo_reg, temp & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dvo_reg, 1); return; } } static void intel_enable_dvo(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; u32 dvo_reg ; u32 temp ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = enc_to_dvo(encoder); intel_dvo = tmp; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; dvo_reg = intel_dvo->dev.dvo_reg; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dvo_reg, 1); temp = tmp___0; (*((intel_dvo->dev.dev_ops)->mode_set))(& intel_dvo->dev, & (crtc->config)->base.mode, & (crtc->config)->base.adjusted_mode); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )dvo_reg, temp | 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dvo_reg, 1); (*((intel_dvo->dev.dev_ops)->dpms))(& intel_dvo->dev, 1); return; } } static void intel_dvo_dpms(struct drm_connector *connector , int mode ) { struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; struct drm_crtc *crtc ; struct intel_crtc_state *config ; struct drm_crtc const *__mptr ; { tmp = intel_attached_dvo(connector); intel_dvo = tmp; if (mode != 0) { mode = 3; } else { } if (connector->dpms == mode) { return; } else { } connector->dpms = mode; crtc = intel_dvo->base.base.crtc; if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { intel_dvo->base.connectors_active = 0; return; } else { } if (mode == 0) { __mptr = (struct drm_crtc const *)crtc; config = ((struct intel_crtc *)__mptr)->config; intel_dvo->base.connectors_active = 1; intel_crtc_update_dpms(crtc); (*((intel_dvo->dev.dev_ops)->dpms))(& intel_dvo->dev, 1); } else { (*((intel_dvo->dev.dev_ops)->dpms))(& intel_dvo->dev, 0); intel_dvo->base.connectors_active = 0; intel_crtc_update_dpms(crtc); } intel_modeset_check_state(connector->dev); return; } } static enum drm_mode_status intel_dvo_mode_valid(struct drm_connector *connector , struct drm_display_mode *mode ) { struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; int tmp___0 ; { tmp = intel_attached_dvo(connector); intel_dvo = tmp; if ((mode->flags & 32U) != 0U) { return (8); } else { } if ((unsigned long )intel_dvo->panel_fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { if (mode->hdisplay > (intel_dvo->panel_fixed_mode)->hdisplay) { return (29); } else { } if (mode->vdisplay > (intel_dvo->panel_fixed_mode)->vdisplay) { return (29); } else { } } else { } tmp___0 = (*((intel_dvo->dev.dev_ops)->mode_valid))(& intel_dvo->dev, mode); return ((enum drm_mode_status )tmp___0); } } static bool intel_dvo_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; struct drm_display_mode *adjusted_mode ; { tmp = enc_to_dvo(encoder); intel_dvo = tmp; adjusted_mode = & pipe_config->base.adjusted_mode; if ((unsigned long )intel_dvo->panel_fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { adjusted_mode->hdisplay = (intel_dvo->panel_fixed_mode)->hdisplay; adjusted_mode->hsync_start = (intel_dvo->panel_fixed_mode)->hsync_start; adjusted_mode->hsync_end = (intel_dvo->panel_fixed_mode)->hsync_end; adjusted_mode->htotal = (intel_dvo->panel_fixed_mode)->htotal; adjusted_mode->vdisplay = (intel_dvo->panel_fixed_mode)->vdisplay; adjusted_mode->vsync_start = (intel_dvo->panel_fixed_mode)->vsync_start; adjusted_mode->vsync_end = (intel_dvo->panel_fixed_mode)->vsync_end; adjusted_mode->vtotal = (intel_dvo->panel_fixed_mode)->vtotal; adjusted_mode->clock = (intel_dvo->panel_fixed_mode)->clock; drm_mode_set_crtcinfo(adjusted_mode, 0); } else { } return (1); } } static void intel_dvo_pre_enable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode *adjusted_mode ; struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; int pipe ; u32 dvo_val ; u32 dvo_reg ; u32 dvo_srcdim_reg ; uint32_t tmp___0 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; adjusted_mode = & (crtc->config)->base.adjusted_mode; tmp = enc_to_dvo(encoder); intel_dvo = tmp; pipe = crtc->pipe; dvo_reg = intel_dvo->dev.dvo_reg; switch (dvo_reg) { case 397600U: ; default: dvo_srcdim_reg = 397604U; goto ldv_48231; case 397632U: dvo_srcdim_reg = 397636U; goto ldv_48231; case 397664U: dvo_srcdim_reg = 397668U; goto ldv_48231; } ldv_48231: tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )dvo_reg, 1); dvo_val = tmp___0 & 117440576U; dvo_val = dvo_val | 16516U; if (pipe == 1) { dvo_val = dvo_val | 1073741824U; } else { } dvo_val = dvo_val | 268435456U; if ((int )adjusted_mode->flags & 1) { dvo_val = dvo_val | 8U; } else { } if ((adjusted_mode->flags & 4U) != 0U) { dvo_val = dvo_val | 16U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )dvo_srcdim_reg, (uint32_t )((adjusted_mode->hdisplay << 12) | adjusted_mode->vdisplay), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )dvo_reg, dvo_val, 1); return; } } static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector , bool force ) { struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; long tmp___0 ; enum drm_connector_status tmp___1 ; { tmp = intel_attached_dvo(connector); intel_dvo = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_dvo_detect", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } tmp___1 = (*((intel_dvo->dev.dev_ops)->detect))(& intel_dvo->dev); return (tmp___1); } } static int intel_dvo_get_modes(struct drm_connector *connector ) { struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; struct drm_i915_private *dev_priv ; struct i2c_adapter *tmp___0 ; int tmp___1 ; struct drm_display_mode *mode ; { tmp = intel_attached_dvo(connector); intel_dvo = tmp; dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; tmp___0 = intel_gmbus_get_adapter(dev_priv, 4U); intel_ddc_get_modes(connector, tmp___0); tmp___1 = list_empty((struct list_head const *)(& connector->probed_modes)); if (tmp___1 == 0) { return (1); } else { } if ((unsigned long )intel_dvo->panel_fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { mode = drm_mode_duplicate(connector->dev, (struct drm_display_mode const *)intel_dvo->panel_fixed_mode); if ((unsigned long )mode != (unsigned long )((struct drm_display_mode *)0)) { drm_mode_probed_add(connector, mode); return (1); } else { } } else { } return (0); } } static void intel_dvo_destroy(struct drm_connector *connector ) { { drm_connector_cleanup(connector); kfree((void const *)connector); return; } } static struct drm_connector_funcs const intel_dvo_connector_funcs = {& intel_dvo_dpms, 0, 0, 0, & intel_dvo_detect, & drm_helper_probe_single_connector_modes, 0, & intel_dvo_destroy, 0, & drm_atomic_helper_connector_duplicate_state, & drm_atomic_helper_connector_destroy_state, 0, & intel_connector_atomic_get_property}; static struct drm_connector_helper_funcs const intel_dvo_connector_helper_funcs = {& intel_dvo_get_modes, & intel_dvo_mode_valid, & intel_best_encoder}; static void intel_dvo_enc_destroy(struct drm_encoder *encoder ) { struct intel_dvo *intel_dvo ; struct drm_encoder const *__mptr ; struct intel_dvo *tmp ; { __mptr = (struct drm_encoder const *)encoder; tmp = enc_to_dvo((struct intel_encoder *)__mptr); intel_dvo = tmp; if ((unsigned long )(intel_dvo->dev.dev_ops)->destroy != (unsigned long )((void (*/* const */)(struct intel_dvo_device * ))0)) { (*((intel_dvo->dev.dev_ops)->destroy))(& intel_dvo->dev); } else { } kfree((void const *)intel_dvo->panel_fixed_mode); intel_encoder_destroy(encoder); return; } } static struct drm_encoder_funcs const intel_dvo_enc_funcs = {0, & intel_dvo_enc_destroy}; static struct drm_display_mode *intel_dvo_get_current_mode(struct drm_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_dvo *intel_dvo ; struct intel_dvo *tmp ; uint32_t dvo_val ; uint32_t tmp___0 ; struct drm_display_mode *mode ; struct drm_crtc *crtc ; int pipe ; { dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_attached_dvo(connector); intel_dvo = tmp; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_dvo->dev.dvo_reg, 1); dvo_val = tmp___0; mode = (struct drm_display_mode *)0; if ((int )dvo_val < 0) { pipe = (dvo_val & 1073741824U) != 0U; crtc = intel_get_crtc_for_pipe(dev, pipe); if ((unsigned long )crtc != (unsigned long )((struct drm_crtc *)0)) { mode = intel_crtc_mode_get(dev, crtc); if ((unsigned long )mode != (unsigned long )((struct drm_display_mode *)0)) { mode->type = mode->type | 8U; if ((dvo_val & 8U) != 0U) { mode->flags = mode->flags | 1U; } else { } if ((dvo_val & 16U) != 0U) { mode->flags = mode->flags | 4U; } else { } } else { } } else { } } else { } return (mode); } } void intel_dvo_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_encoder *intel_encoder ; struct intel_dvo *intel_dvo ; struct intel_connector *intel_connector ; int i ; int encoder_type ; void *tmp ; struct drm_connector *connector ; struct intel_dvo_device const *dvo ; struct i2c_adapter *i2c ; int gpio ; bool dvoinit ; enum pipe pipe ; uint32_t dpll[3U] ; bool tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; encoder_type = 0; tmp = kzalloc(280UL, 208U); intel_dvo = (struct intel_dvo *)tmp; if ((unsigned long )intel_dvo == (unsigned long )((struct intel_dvo *)0)) { return; } else { } intel_connector = intel_connector_alloc(); if ((unsigned long )intel_connector == (unsigned long )((struct intel_connector *)0)) { kfree((void const *)intel_dvo); return; } else { } intel_encoder = & intel_dvo->base; drm_encoder_init(dev, & intel_encoder->base, & intel_dvo_enc_funcs, encoder_type); intel_encoder->disable = & intel_disable_dvo; intel_encoder->enable = & intel_enable_dvo; intel_encoder->get_hw_state = & intel_dvo_get_hw_state; intel_encoder->get_config = & intel_dvo_get_config; intel_encoder->compute_config = & intel_dvo_compute_config; intel_encoder->pre_enable = & intel_dvo_pre_enable; intel_connector->get_hw_state = & intel_dvo_connector_get_hw_state; intel_connector->unregister = & intel_connector_unregister; i = 0; goto ldv_48309; ldv_48308: connector = & intel_connector->base; dvo = (struct intel_dvo_device const *)(& intel_dvo_devices) + (unsigned long )i; tmp___0 = intel_gmbus_is_valid_pin(dev_priv, dvo->gpio); if ((int )tmp___0) { gpio = (int )dvo->gpio; } else if ((int )dvo->type == 1) { gpio = 1; } else { gpio = 5; } i2c = intel_gmbus_get_adapter(dev_priv, (unsigned int )gpio); intel_dvo->dev = *dvo; intel_gmbus_force_bit(i2c, 1); pipe = 0; goto ldv_48293; ldv_48292: dpll[(int )pipe] = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), dpll[(int )pipe] | 1073741824U, 1); pipe = (enum pipe )((int )pipe + 1); ldv_48293: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_48292; } else { } dvoinit = (*((dvo->dev_ops)->init))(& intel_dvo->dev, i2c); pipe = 0; goto ldv_48302; ldv_48301: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )pipe == 0 ? (unsigned int )dev_priv->info.display_mmio_offset + 24596U : ((int )pipe == 1 ? (unsigned int )dev_priv->info.display_mmio_offset + 24600U : (unsigned int )dev_priv->info.display_mmio_offset + 24624U)), dpll[(int )pipe], 1); pipe = (enum pipe )((int )pipe + 1); ldv_48302: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > (int )pipe) { goto ldv_48301; } else { } intel_gmbus_force_bit(i2c, 0); if (! dvoinit) { goto ldv_48304; } else { } intel_encoder->type = 2; intel_encoder->crtc_mask = 3; switch (dvo->type) { case 2: intel_encoder->cloneable = 6U; drm_connector_init(dev, connector, & intel_dvo_connector_funcs, 2); encoder_type = 2; goto ldv_48306; case 1: intel_encoder->cloneable = 0U; drm_connector_init(dev, connector, & intel_dvo_connector_funcs, 7); encoder_type = 3; goto ldv_48306; } ldv_48306: drm_connector_helper_add(connector, & intel_dvo_connector_helper_funcs); connector->display_info.subpixel_order = 1; connector->interlace_allowed = 0; connector->doublescan_allowed = 0; intel_connector_attach_encoder(intel_connector, intel_encoder); if ((int )dvo->type == 1) { intel_dvo->panel_fixed_mode = intel_dvo_get_current_mode(connector); intel_dvo->panel_wants_dither = 1; } else { } drm_connector_register(connector); return; ldv_48304: i = i + 1; ldv_48309: ; if ((unsigned int )i <= 6U) { goto ldv_48308; } else { } drm_encoder_cleanup(& intel_encoder->base); kfree((void const *)intel_dvo); kfree((void const *)intel_connector); return; } } extern int ldv_probe_41(void) ; extern int ldv_probe_39(void) ; void ldv_initialize_drm_connector_funcs_41(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_dvo_connector_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_initialize_drm_connector_helper_funcs_40(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_dvo_connector_helper_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_main_exported_39(void) { struct drm_encoder *ldvarg370 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(96UL); ldvarg370 = (struct drm_encoder *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_39 == 2) { intel_dvo_enc_destroy(ldvarg370); ldv_state_variable_39 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48326; case 1: ; if (ldv_state_variable_39 == 1) { ldv_probe_39(); ldv_state_variable_39 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48326; default: ldv_stop(); } ldv_48326: ; return; } } void ldv_main_exported_40(void) { struct drm_display_mode *ldvarg484 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(208UL); ldvarg484 = (struct drm_display_mode *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_40 == 1) { intel_dvo_get_modes(intel_dvo_connector_helper_funcs_group0); ldv_state_variable_40 = 1; } else { } goto ldv_48334; case 1: ; if (ldv_state_variable_40 == 1) { intel_dvo_mode_valid(intel_dvo_connector_helper_funcs_group0, ldvarg484); ldv_state_variable_40 = 1; } else { } goto ldv_48334; case 2: ; if (ldv_state_variable_40 == 1) { intel_best_encoder(intel_dvo_connector_helper_funcs_group0); ldv_state_variable_40 = 1; } else { } goto ldv_48334; default: ldv_stop(); } ldv_48334: ; return; } } void ldv_main_exported_41(void) { bool ldvarg381 ; struct drm_connector_state *ldvarg378 ; void *tmp ; uint32_t ldvarg379 ; int ldvarg383 ; uint32_t ldvarg380 ; struct drm_property *ldvarg377 ; void *tmp___0 ; struct drm_connector_state *ldvarg382 ; void *tmp___1 ; uint64_t *ldvarg376 ; void *tmp___2 ; int tmp___3 ; { tmp = ldv_init_zalloc(32UL); ldvarg378 = (struct drm_connector_state *)tmp; tmp___0 = ldv_init_zalloc(104UL); ldvarg377 = (struct drm_property *)tmp___0; tmp___1 = ldv_init_zalloc(32UL); ldvarg382 = (struct drm_connector_state *)tmp___1; tmp___2 = ldv_init_zalloc(8UL); ldvarg376 = (uint64_t *)tmp___2; ldv_memset((void *)(& ldvarg381), 0, 1UL); ldv_memset((void *)(& ldvarg379), 0, 4UL); ldv_memset((void *)(& ldvarg383), 0, 4UL); ldv_memset((void *)(& ldvarg380), 0, 4UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_41 == 2) { intel_dvo_dpms(intel_dvo_connector_funcs_group0, ldvarg383); ldv_state_variable_41 = 2; } else { } if (ldv_state_variable_41 == 1) { intel_dvo_dpms(intel_dvo_connector_funcs_group0, ldvarg383); ldv_state_variable_41 = 1; } else { } goto ldv_48350; case 1: ; if (ldv_state_variable_41 == 2) { drm_atomic_helper_connector_destroy_state(intel_dvo_connector_funcs_group0, ldvarg382); ldv_state_variable_41 = 2; } else { } if (ldv_state_variable_41 == 1) { drm_atomic_helper_connector_destroy_state(intel_dvo_connector_funcs_group0, ldvarg382); ldv_state_variable_41 = 1; } else { } goto ldv_48350; case 2: ; if (ldv_state_variable_41 == 2) { drm_atomic_helper_connector_duplicate_state(intel_dvo_connector_funcs_group0); ldv_state_variable_41 = 2; } else { } if (ldv_state_variable_41 == 1) { drm_atomic_helper_connector_duplicate_state(intel_dvo_connector_funcs_group0); ldv_state_variable_41 = 1; } else { } goto ldv_48350; case 3: ; if (ldv_state_variable_41 == 2) { intel_dvo_detect(intel_dvo_connector_funcs_group0, (int )ldvarg381); ldv_state_variable_41 = 2; } else { } if (ldv_state_variable_41 == 1) { intel_dvo_detect(intel_dvo_connector_funcs_group0, (int )ldvarg381); ldv_state_variable_41 = 1; } else { } goto ldv_48350; case 4: ; if (ldv_state_variable_41 == 2) { intel_dvo_destroy(intel_dvo_connector_funcs_group0); ldv_state_variable_41 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48350; case 5: ; if (ldv_state_variable_41 == 2) { drm_helper_probe_single_connector_modes(intel_dvo_connector_funcs_group0, ldvarg380, ldvarg379); ldv_state_variable_41 = 2; } else { } if (ldv_state_variable_41 == 1) { drm_helper_probe_single_connector_modes(intel_dvo_connector_funcs_group0, ldvarg380, ldvarg379); ldv_state_variable_41 = 1; } else { } goto ldv_48350; case 6: ; if (ldv_state_variable_41 == 2) { intel_connector_atomic_get_property(intel_dvo_connector_funcs_group0, (struct drm_connector_state const *)ldvarg378, ldvarg377, ldvarg376); ldv_state_variable_41 = 2; } else { } if (ldv_state_variable_41 == 1) { intel_connector_atomic_get_property(intel_dvo_connector_funcs_group0, (struct drm_connector_state const *)ldvarg378, ldvarg377, ldvarg376); ldv_state_variable_41 = 1; } else { } goto ldv_48350; case 7: ; if (ldv_state_variable_41 == 1) { ldv_probe_41(); ldv_state_variable_41 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48350; default: ldv_stop(); } ldv_48350: ; return; } } bool ldv_queue_work_on_951(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_952(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_953(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_954(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_955(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_965(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_967(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_966(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_969(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_968(struct workqueue_struct *ldv_func_arg1 ) ; extern int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe * , char const * , char const * ) ; extern ssize_t hdmi_infoframe_pack(union hdmi_infoframe * , void * , size_t ) ; extern int drm_mode_create_aspect_ratio_property(struct drm_device * ) ; extern bool drm_detect_hdmi_monitor(struct edid * ) ; extern bool drm_rgb_quant_range_selectable(struct edid * ) ; extern int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe * , struct drm_display_mode const * ) ; extern int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe * , struct drm_display_mode const * ) ; __inline static int vlv_dport_to_channel___0(struct intel_digital_port *dport ) { { switch ((unsigned int )dport->port) { case 1U: ; case 3U: ; return (0); case 2U: ; return (1); default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/inst/current/envs/linux-4.2-rc1.tar.xz/linux-4.2-rc1/drivers/gpu/drm/i915/intel_drv.h"), "i" (776), "i" (12UL)); ldv_47424: ; goto ldv_47424; } } } __inline static struct intel_digital_port *hdmi_to_dig_port(struct intel_hdmi *intel_hdmi ) { struct intel_hdmi const *__mptr ; { __mptr = (struct intel_hdmi const *)intel_hdmi; return ((struct intel_digital_port *)__mptr + 0xffffffffffffedb8UL); } } static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi ) { struct intel_digital_port *tmp ; { tmp = hdmi_to_dig_port(intel_hdmi); return (tmp->base.base.dev); } } static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *dev_priv ; uint32_t enabled_bits ; struct drm_i915_private *__p ; int __ret_warn_on ; uint32_t tmp___0 ; long tmp___1 ; { tmp = intel_hdmi_to_dev(intel_hdmi); dev = tmp; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); enabled_bits = 2147483648U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 1); __ret_warn_on = (tmp___0 & enabled_bits) != 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_hdmi.c", 56, "HDMI port enabled, expecting disabled\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder ) { struct intel_digital_port *intel_dig_port ; struct drm_encoder const *__mptr ; { __mptr = (struct drm_encoder const *)encoder; intel_dig_port = (struct intel_digital_port *)__mptr; return (& intel_dig_port->hdmi); } } static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector ) { struct intel_encoder *tmp ; struct intel_hdmi *tmp___0 ; { tmp = intel_attached_encoder(connector); tmp___0 = enc_to_intel_hdmi(& tmp->base); return (tmp___0); } } static u32 g4x_infoframe_index(enum hdmi_infoframe_type type ) { long tmp ; { switch ((unsigned int )type) { case 130U: ; return (0U); case 131U: ; return (1572864U); case 129U: ; return (524288U); default: tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("g4x_infoframe_index", "unknown info frame type %d\n", (unsigned int )type); } else { } return (0U); } } } static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type ) { long tmp ; { switch ((unsigned int )type) { case 130U: ; return (2097152U); case 131U: ; return (16777216U); case 129U: ; return (4194304U); default: tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("g4x_infoframe_enable", "unknown info frame type %d\n", (unsigned int )type); } else { } return (0U); } } } static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type ) { long tmp ; { switch ((unsigned int )type) { case 130U: ; return (4096U); case 131U: ; return (1U); case 129U: ; return (256U); default: tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("hsw_infoframe_enable", "unknown info frame type %d\n", (unsigned int )type); } else { } return (0U); } } } static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type , enum transcoder cpu_transcoder , struct drm_i915_private *dev_priv ) { long tmp ; { switch ((unsigned int )type) { case 130U: ; return (((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393760U); case 131U: ; return (((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393888U); case 129U: ; return (((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393824U); default: tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("hsw_infoframe_data_reg", "unknown info frame type %d\n", (unsigned int )type); } else { } return (0U); } } } static void g4x_write_infoframe(struct drm_encoder *encoder , enum hdmi_infoframe_type type , void const *frame , ssize_t len ) { uint32_t const *data ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 val ; uint32_t tmp ; int i ; int __ret_warn_on ; long tmp___0 ; u32 tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; { data = (uint32_t const *)frame; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397680L, 1); val = tmp; __ret_warn_on = (int )val >= 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_hdmi.c", 143, "Writing DIP with CTL reg disabled\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); val = val & 4293394416U; tmp___1 = g4x_infoframe_index(type); val = tmp___1 | val; tmp___2 = g4x_infoframe_enable(type); val = ~ tmp___2 & val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397680L, val, 1); __asm__ volatile ("": : : "memory"); i = 0; goto ldv_48328; ldv_48327: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397688L, *data, 1); data = data + 1; i = i + 4; ldv_48328: ; if ((ssize_t )i < len) { goto ldv_48327; } else { } goto ldv_48331; ldv_48330: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397688L, 0U, 1); i = i + 4; ldv_48331: ; if (i <= 31) { goto ldv_48330; } else { } __asm__ volatile ("": : : "memory"); tmp___3 = g4x_infoframe_enable(type); val = tmp___3 | val; val = val & 4294770687U; val = val | 65536U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397680L, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397680L, 0); return; } } static bool g4x_infoframe_enabled(struct drm_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; u32 val ; uint32_t tmp___0 ; { dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = enc_to_dig_port(encoder); intel_dig_port = tmp; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397680L, 1); val = tmp___0; if ((unsigned int )intel_dig_port->port << 29 == (val & 1610612736U)) { return ((val & 2147483648U) != 0U); } else { } return (0); } } static void ibx_write_infoframe(struct drm_encoder *encoder , enum hdmi_infoframe_type type , void const *frame , ssize_t len ) { uint32_t const *data ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int i ; int reg ; u32 val ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; u32 tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; { data = (uint32_t const *)frame; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; reg = (int )intel_crtc->pipe * 4096 + 918016; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp; __ret_warn_on = (int )val >= 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_hdmi.c", 194, "Writing DIP with CTL reg disabled\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); val = val & 4293394416U; tmp___1 = g4x_infoframe_index(type); val = tmp___1 | val; tmp___2 = g4x_infoframe_enable(type); val = ~ tmp___2 & val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); __asm__ volatile ("": : : "memory"); i = 0; goto ldv_48358; ldv_48357: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )intel_crtc->pipe * 4096 + 918024), *data, 1); data = data + 1; i = i + 4; ldv_48358: ; if ((ssize_t )i < len) { goto ldv_48357; } else { } goto ldv_48361; ldv_48360: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )intel_crtc->pipe * 4096 + 918024), 0U, 1); i = i + 4; ldv_48361: ; if (i <= 31) { goto ldv_48360; } else { } __asm__ volatile ("": : : "memory"); tmp___3 = g4x_infoframe_enable(type); val = tmp___3 | val; val = val & 4294770687U; val = val | 65536U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } } static bool ibx_infoframe_enabled(struct drm_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; int reg ; u32 val ; uint32_t tmp___0 ; { dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_dig_port(encoder); intel_dig_port = tmp; reg = (int )intel_crtc->pipe * 4096 + 918016; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp___0; if ((unsigned int )intel_dig_port->port << 29 == (val & 1610612736U)) { return ((val & 2147483648U) != 0U); } else { } return (0); } } static void cpt_write_infoframe(struct drm_encoder *encoder , enum hdmi_infoframe_type type , void const *frame , ssize_t len ) { uint32_t const *data ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int i ; int reg ; u32 val ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; u32 tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; { data = (uint32_t const *)frame; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; reg = (int )intel_crtc->pipe * 4096 + 918016; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp; __ret_warn_on = (int )val >= 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_hdmi.c", 247, "Writing DIP with CTL reg disabled\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); val = val & 4293394416U; tmp___1 = g4x_infoframe_index(type); val = tmp___1 | val; if ((unsigned int )type != 130U) { tmp___2 = g4x_infoframe_enable(type); val = ~ tmp___2 & val; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); __asm__ volatile ("": : : "memory"); i = 0; goto ldv_48392; ldv_48391: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )intel_crtc->pipe * 4096 + 918024), *data, 1); data = data + 1; i = i + 4; ldv_48392: ; if ((ssize_t )i < len) { goto ldv_48391; } else { } goto ldv_48395; ldv_48394: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((int )intel_crtc->pipe * 4096 + 918024), 0U, 1); i = i + 4; ldv_48395: ; if (i <= 31) { goto ldv_48394; } else { } __asm__ volatile ("": : : "memory"); tmp___3 = g4x_infoframe_enable(type); val = tmp___3 | val; val = val & 4294770687U; val = val | 65536U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } } static bool cpt_infoframe_enabled(struct drm_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int reg ; u32 val ; uint32_t tmp ; { dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; reg = (int )intel_crtc->pipe * 4096 + 918016; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp; return ((val & 2147483648U) != 0U); } } static void vlv_write_infoframe(struct drm_encoder *encoder , enum hdmi_infoframe_type type , void const *frame , ssize_t len ) { uint32_t const *data ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; int i ; int reg ; u32 val ; uint32_t tmp ; int __ret_warn_on ; long tmp___0 ; u32 tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; { data = (uint32_t const *)frame; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; reg = (int )intel_crtc->pipe != 0 ? ((int )intel_crtc->pipe == 1 ? 1970544 : 1970672) : 1966592; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp; __ret_warn_on = (int )val >= 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_hdmi.c", 299, "Writing DIP with CTL reg disabled\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); val = val & 4293394416U; tmp___1 = g4x_infoframe_index(type); val = tmp___1 | val; tmp___2 = g4x_infoframe_enable(type); val = ~ tmp___2 & val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); __asm__ volatile ("": : : "memory"); i = 0; goto ldv_48425; ldv_48424: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (int )intel_crtc->pipe != 0 ? ((int )intel_crtc->pipe == 1 ? 1970548L : 1970676L) : 1966600L, *data, 1); data = data + 1; i = i + 4; ldv_48425: ; if ((ssize_t )i < len) { goto ldv_48424; } else { } goto ldv_48428; ldv_48427: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (int )intel_crtc->pipe != 0 ? ((int )intel_crtc->pipe == 1 ? 1970548L : 1970676L) : 1966600L, 0U, 1); i = i + 4; ldv_48428: ; if (i <= 31) { goto ldv_48427; } else { } __asm__ volatile ("": : : "memory"); tmp___3 = g4x_infoframe_enable(type); val = tmp___3 | val; val = val & 4294770687U; val = val | 65536U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } } static bool vlv_infoframe_enabled(struct drm_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; int reg ; u32 val ; uint32_t tmp___0 ; { dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_dig_port(encoder); intel_dig_port = tmp; reg = (int )intel_crtc->pipe != 0 ? ((int )intel_crtc->pipe == 1 ? 1970544 : 1970672) : 1966592; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp___0; if ((unsigned int )intel_dig_port->port << 29 == (val & 1610612736U)) { return ((val & 2147483648U) != 0U); } else { } return (0); } } static void hsw_write_infoframe(struct drm_encoder *encoder , enum hdmi_infoframe_type type , void const *frame , ssize_t len ) { uint32_t const *data ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; u32 ctl_reg ; u32 data_reg ; int i ; u32 val ; uint32_t tmp ; u32 tmp___0 ; u32 tmp___1 ; { data = (uint32_t const *)frame; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; ctl_reg = ((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )(intel_crtc->config)->cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393728U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ctl_reg, 1); val = tmp; data_reg = hsw_infoframe_data_reg(type, (intel_crtc->config)->cpu_transcoder, dev_priv); if (data_reg == 0U) { return; } else { } tmp___0 = hsw_infoframe_enable(type); val = ~ tmp___0 & val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )ctl_reg, val, 1); __asm__ volatile ("": : : "memory"); i = 0; goto ldv_48458; ldv_48457: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(data_reg + (u32 )i), *data, 1); data = data + 1; i = i + 4; ldv_48458: ; if ((ssize_t )i < len) { goto ldv_48457; } else { } goto ldv_48461; ldv_48460: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(data_reg + (u32 )i), 0U, 1); i = i + 4; ldv_48461: ; if (i <= 31) { goto ldv_48460; } else { } __asm__ volatile ("": : : "memory"); tmp___1 = hsw_infoframe_enable(type); val = tmp___1 | val; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )ctl_reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ctl_reg, 0); return; } } static bool hsw_infoframe_enabled(struct drm_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; u32 ctl_reg ; u32 val ; uint32_t tmp ; { dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; ctl_reg = ((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )(intel_crtc->config)->cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393728U; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ctl_reg, 1); val = tmp; return ((val & 4353U) != 0U); } } static void intel_write_infoframe(struct drm_encoder *encoder , union hdmi_infoframe *frame ) { struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; uint8_t buffer[32U] ; ssize_t len ; { tmp = enc_to_intel_hdmi(encoder); intel_hdmi = tmp; len = hdmi_infoframe_pack(frame, (void *)(& buffer) + 1U, 31UL); if (len < 0L) { return; } else { } buffer[0] = buffer[1]; buffer[1] = buffer[2]; buffer[2] = buffer[3]; buffer[3] = 0U; len = len + 1L; (*(intel_hdmi->write_infoframe))(encoder, frame->any.type, (void const *)(& buffer), len); return; } } static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder , struct drm_display_mode *adjusted_mode ) { struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; union hdmi_infoframe frame ; int ret ; { tmp = enc_to_intel_hdmi(encoder); intel_hdmi = tmp; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; ret = drm_hdmi_avi_infoframe_from_display_mode(& frame.avi, (struct drm_display_mode const *)adjusted_mode); if (ret < 0) { drm_err("couldn\'t fill AVI infoframe\n"); return; } else { } if ((int )intel_hdmi->rgb_quant_range_selectable) { if ((int )(intel_crtc->config)->limited_color_range) { frame.avi.quantization_range = 1; } else { frame.avi.quantization_range = 2; } } else { } intel_write_infoframe(encoder, & frame); return; } } static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder ) { union hdmi_infoframe frame ; int ret ; { ret = hdmi_spd_infoframe_init(& frame.spd, "Intel", "Integrated gfx"); if (ret < 0) { drm_err("couldn\'t fill SPD infoframe\n"); return; } else { } frame.spd.sdi = 9; intel_write_infoframe(encoder, & frame); return; } } static void intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder , struct drm_display_mode *adjusted_mode ) { union hdmi_infoframe frame ; int ret ; { ret = drm_hdmi_vendor_infoframe_from_display_mode(& frame.vendor.hdmi, (struct drm_display_mode const *)adjusted_mode); if (ret < 0) { return; } else { } intel_write_infoframe(encoder, & frame); return; } } static void g4x_set_infoframes(struct drm_encoder *encoder , bool enable , struct drm_display_mode *adjusted_mode ) { struct drm_i915_private *dev_priv ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_hdmi *intel_hdmi ; u32 reg ; u32 val ; uint32_t tmp___0 ; u32 port ; { dev_priv = (struct drm_i915_private *)(encoder->dev)->dev_private; tmp = enc_to_dig_port(encoder); intel_dig_port = tmp; intel_hdmi = & intel_dig_port->hdmi; reg = 397680U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp___0; port = (unsigned int )intel_dig_port->port << 29; assert_hdmi_port_disabled(intel_hdmi); val = val | 65536U; if (! enable) { if ((int )val >= 0) { return; } else { } val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } else { } if ((val & 1610612736U) != port) { if ((int )val < 0) { val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); } else { } val = val & 2684354559U; val = val | port; } else { } val = val | 2147483648U; val = val & 4290772991U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); return; } } static void ibx_set_infoframes(struct drm_encoder *encoder , bool enable , struct drm_display_mode *adjusted_mode ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_hdmi *intel_hdmi ; u32 reg ; u32 val ; uint32_t tmp___0 ; u32 port ; { dev_priv = (struct drm_i915_private *)(encoder->dev)->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_dig_port(encoder); intel_dig_port = tmp; intel_hdmi = & intel_dig_port->hdmi; reg = (u32 )((int )intel_crtc->pipe * 4096 + 918016); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp___0; port = (unsigned int )intel_dig_port->port << 29; assert_hdmi_port_disabled(intel_hdmi); val = val | 65536U; if (! enable) { if ((int )val >= 0) { return; } else { } val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } else { } if ((val & 1610612736U) != port) { if ((int )val < 0) { val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); } else { } val = val & 2684354559U; val = val | port; } else { } val = val | 2147483648U; val = val & 4248829951U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); return; } } static void cpt_set_infoframes(struct drm_encoder *encoder , bool enable , struct drm_display_mode *adjusted_mode ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; u32 reg ; u32 val ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)(encoder->dev)->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_intel_hdmi(encoder); intel_hdmi = tmp; reg = (u32 )((int )intel_crtc->pipe * 4096 + 918016); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp___0; assert_hdmi_port_disabled(intel_hdmi); val = val | 65536U; if (! enable) { if ((int )val >= 0) { return; } else { } val = val & 2145386495U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } else { } val = val | 2149580800U; val = val & 4248829951U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); return; } } static void vlv_set_infoframes(struct drm_encoder *encoder , bool enable , struct drm_display_mode *adjusted_mode ) { struct drm_i915_private *dev_priv ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp___0 ; u32 reg ; u32 val ; uint32_t tmp___1 ; u32 port ; { dev_priv = (struct drm_i915_private *)(encoder->dev)->dev_private; tmp = enc_to_dig_port(encoder); intel_dig_port = tmp; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___0 = enc_to_intel_hdmi(encoder); intel_hdmi = tmp___0; reg = (int )intel_crtc->pipe != 0 ? ((int )intel_crtc->pipe == 1 ? 1970544U : 1970672U) : 1966592U; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp___1; port = (unsigned int )intel_dig_port->port << 29; assert_hdmi_port_disabled(intel_hdmi); val = val | 65536U; if (! enable) { if ((int )val >= 0) { return; } else { } val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } else { } if ((val & 1610612736U) != port) { if ((int )val < 0) { val = val & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); } else { } val = val & 2684354559U; val = val | port; } else { } val = val | 2147483648U; val = val & 4246732799U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); return; } } static void hsw_set_infoframes(struct drm_encoder *encoder , bool enable , struct drm_display_mode *adjusted_mode ) { struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; u32 reg ; u32 val ; uint32_t tmp___0 ; { dev_priv = (struct drm_i915_private *)(encoder->dev)->dev_private; __mptr = (struct drm_crtc const *)encoder->crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_intel_hdmi(encoder); intel_hdmi = tmp; reg = ((unsigned int )(dev_priv->info.trans_offsets[(unsigned int )(intel_crtc->config)->cpu_transcoder] - dev_priv->info.trans_offsets[0]) + (unsigned int )dev_priv->info.display_mmio_offset) + 393728U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 1); val = tmp___0; assert_hdmi_port_disabled(intel_hdmi); if (! enable) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, 0U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); return; } else { } val = val & 4293852911U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )reg, 0); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); return; } } static void intel_hdmi_prepare(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; struct drm_display_mode *adjusted_mode ; u32 hdmi_val ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; tmp = enc_to_intel_hdmi(& encoder->base); intel_hdmi = tmp; adjusted_mode = & (crtc->config)->base.adjusted_mode; hdmi_val = 2048U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 0U) { hdmi_val = intel_hdmi->color_range | hdmi_val; } else { } if ((adjusted_mode->flags & 4U) != 0U) { hdmi_val = hdmi_val | 16U; } else { } if ((int )adjusted_mode->flags & 1) { hdmi_val = hdmi_val | 8U; } else { } if ((crtc->config)->pipe_bpp > 24) { hdmi_val = hdmi_val | 201326592U; } else { hdmi_val = hdmi_val; } if ((int )(crtc->config)->has_hdmi_sink) { hdmi_val = hdmi_val | 512U; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___2->pch_type == 2U) { hdmi_val = (u32 )((int )crtc->pipe << 29) | hdmi_val; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 8U) { hdmi_val = (u32 )((int )crtc->pipe << 24) | hdmi_val; } else { hdmi_val = (u32 )((int )crtc->pipe << 30) | hdmi_val; } } else { hdmi_val = (u32 )((int )crtc->pipe << 30) | hdmi_val; } } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_hdmi->hdmi_reg, hdmi_val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 0); return; } } static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; enum intel_display_power_domain power_domain ; u32 tmp___0 ; bool tmp___1 ; int tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = enc_to_intel_hdmi(& encoder->base); intel_hdmi = tmp; power_domain = intel_display_port_power_domain(encoder); tmp___1 = intel_display_power_is_enabled(dev_priv, power_domain); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 1); if ((int )tmp___0 >= 0) { return (0); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type == 2U) { *pipe = (enum pipe )((tmp___0 & 1610612736U) >> 29); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { *pipe = (enum pipe )((tmp___0 & 50331648U) >> 24); } else { *pipe = (enum pipe )((tmp___0 & 1073741824U) >> 30); } } else { *pipe = (enum pipe )((tmp___0 & 1073741824U) >> 30); } } return (1); } } static void intel_hdmi_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 tmp___0 ; u32 flags ; int dotclock ; bool tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { tmp = enc_to_intel_hdmi(& encoder->base); intel_hdmi = tmp; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; flags = 0U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 1); if ((tmp___0 & 8U) != 0U) { flags = flags | 1U; } else { flags = flags | 2U; } if ((tmp___0 & 16U) != 0U) { flags = flags | 4U; } else { flags = flags | 8U; } if ((tmp___0 & 512U) != 0U) { pipe_config->has_hdmi_sink = 1; } else { } tmp___1 = (*(intel_hdmi->infoframe_enabled))(& encoder->base); if ((int )tmp___1) { pipe_config->has_infoframe = 1; } else { } if ((tmp___0 & 64U) != 0U) { pipe_config->has_audio = 1; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 0U && (tmp___0 & 256U) != 0U) { pipe_config->limited_color_range = 1; } else { } pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | flags; if ((tmp___0 & 469762048U) == 201326592U) { dotclock = (pipe_config->port_clock * 2) / 3; } else { dotclock = pipe_config->port_clock; } __p___0 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p___0->pch_type != 0U) { ironlake_check_encoder_dotclock((struct intel_crtc_state const *)pipe_config, dotclock); } else { } pipe_config->base.adjusted_mode.crtc_clock = dotclock; return; } } static void intel_enable_hdmi(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; u32 temp ; u32 enable_bits ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_intel_hdmi(& encoder->base); intel_hdmi = tmp; enable_bits = 2147483648U; if ((int )(intel_crtc->config)->has_audio) { enable_bits = enable_bits | 64U; } else { } temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U) { enable_bits = (u32 )((int )intel_crtc->pipe << 30) | enable_bits; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_hdmi->hdmi_reg, temp & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 0); } else { } temp = temp | enable_bits; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_hdmi->hdmi_reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 0); __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_hdmi->hdmi_reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 0); } else { } if ((int )(intel_crtc->config)->has_audio) { __ret_warn_on = ! (intel_crtc->config)->has_hdmi_sink; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_hdmi.c", 858, "WARN_ON(!intel_crtc->config->has_hdmi_sink)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_enable_hdmi", "Enabling HDMI audio on pipe %c\n", (int )intel_crtc->pipe + 65); } else { } intel_audio_codec_enable(encoder); } else { } return; } } static void vlv_enable_hdmi(struct intel_encoder *encoder ) { { return; } } static void intel_disable_hdmi(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; u32 temp ; struct drm_i915_private *__p ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = enc_to_intel_hdmi(& encoder->base); intel_hdmi = tmp; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 1); temp = temp & 2147483583U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_hdmi->hdmi_reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 0); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U && (int )crtc->pipe == 1) { temp = temp & 3221225471U; temp = temp | 2147483648U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_hdmi->hdmi_reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_hdmi->hdmi_reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 0); temp = temp & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_hdmi->hdmi_reg, temp, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_hdmi->hdmi_reg, 0); } else { } return; } } static void g4x_disable_hdmi(struct intel_encoder *encoder ) { struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; { __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; if ((int )(crtc->config)->has_audio) { intel_audio_codec_disable(encoder); } else { } intel_disable_hdmi(encoder); return; } } static void pch_disable_hdmi(struct intel_encoder *encoder ) { struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; { __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; if ((int )(crtc->config)->has_audio) { intel_audio_codec_disable(encoder); } else { } return; } } static void pch_post_disable_hdmi(struct intel_encoder *encoder ) { { intel_disable_hdmi(encoder); return; } } static int hdmi_portclock_limit(struct intel_hdmi *hdmi , bool respect_dvi_limit ) { struct drm_device *dev ; struct drm_device *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { tmp = intel_hdmi_to_dev(hdmi); dev = tmp; if ((int )respect_dvi_limit && ! hdmi->has_hdmi_sink) { return (165000); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { return (165000); } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { return (300000); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 7U) { return (300000); } else { return (225000); } } } } } } static enum drm_mode_status intel_hdmi_mode_valid(struct drm_connector *connector , struct drm_display_mode *mode ) { int clock ; struct intel_hdmi *tmp ; int tmp___0 ; { clock = mode->clock; if ((mode->flags & 4096U) != 0U) { clock = clock * 2; } else { } tmp = intel_attached_hdmi(connector); tmp___0 = hdmi_portclock_limit(tmp, 1); if (tmp___0 < clock) { return (15); } else { } if (clock <= 19999) { return (16); } else { } if ((mode->flags & 32U) != 0U) { return (8); } else { } return (0); } } static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state ) { struct drm_device *dev ; struct drm_atomic_state *state ; struct intel_encoder *encoder ; struct drm_connector *connector ; struct drm_connector_state *connector_state ; int count ; int count_hdmi ; int i ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_encoder const *__mptr ; { dev = (crtc_state->base.crtc)->dev; count = 0; count_hdmi = 0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { return (0); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { return (0); } else { } } state = crtc_state->base.state; i = 0; goto ldv_48769; ldv_48768: ; if ((unsigned long )connector != (unsigned long )((struct drm_connector *)0)) { if ((unsigned long )connector_state->crtc != (unsigned long )crtc_state->base.crtc) { goto ldv_48765; } else { } __mptr = (struct drm_encoder const *)connector_state->best_encoder; encoder = (struct intel_encoder *)__mptr; count_hdmi = ((unsigned int )encoder->type == 6U) + count_hdmi; count = count + 1; } else { } ldv_48765: i = i + 1; ldv_48769: ; if (state->num_connector > i) { connector = *(state->connectors + (unsigned long )i); connector_state = *(state->connector_states + (unsigned long )i); goto ldv_48768; } else { } return ((bool )(count_hdmi > 0 && count_hdmi == count)); } } bool intel_hdmi_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; struct drm_device *dev ; struct drm_display_mode *adjusted_mode ; int clock_12bpc ; int portclock_limit ; int tmp___0 ; int desired_bpp ; u8 tmp___1 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; long tmp___3 ; bool tmp___4 ; int tmp___5 ; long tmp___6 ; long tmp___7 ; { tmp = enc_to_intel_hdmi(& encoder->base); intel_hdmi = tmp; dev = encoder->base.dev; adjusted_mode = & pipe_config->base.adjusted_mode; clock_12bpc = (pipe_config->base.adjusted_mode.crtc_clock * 3) / 2; tmp___0 = hdmi_portclock_limit(intel_hdmi, 0); portclock_limit = tmp___0; pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink; if ((int )pipe_config->has_hdmi_sink) { pipe_config->has_infoframe = 1; } else { } if ((int )intel_hdmi->color_range_auto) { if ((int )pipe_config->has_hdmi_sink) { tmp___1 = drm_match_cea_mode((struct drm_display_mode const *)adjusted_mode); if ((unsigned int )tmp___1 > 1U) { intel_hdmi->color_range = 256U; } else { intel_hdmi->color_range = 0U; } } else { intel_hdmi->color_range = 0U; } } else { } if ((adjusted_mode->flags & 4096U) != 0U) { pipe_config->pixel_multiplier = 2U; } else { } if (intel_hdmi->color_range != 0U) { pipe_config->limited_color_range = 1; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 46UL) == 0U) { pipe_config->has_pch_encoder = 1; } else { } } else { } if ((int )pipe_config->has_hdmi_sink && (int )intel_hdmi->has_audio) { pipe_config->has_audio = 1; } else { } if ((pipe_config->pipe_bpp > 24 && (int )pipe_config->has_hdmi_sink) && clock_12bpc <= portclock_limit) { tmp___4 = hdmi_12bpc_possible(pipe_config); if ((int )tmp___4) { tmp___5 = 1; } else { tmp___5 = 0; } } else { tmp___5 = 0; } tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_hdmi_compute_config", "picking bpc to 8 for HDMI output\n"); } else { } desired_bpp = 24; if (! pipe_config->bw_constrained) { tmp___6 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_hdmi_compute_config", "forcing pipe bpc to %i for HDMI\n", desired_bpp); } else { } pipe_config->pipe_bpp = desired_bpp; } else { } if (adjusted_mode->crtc_clock > portclock_limit) { tmp___7 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_hdmi_compute_config", "too high HDMI clock, rejecting mode\n"); } else { } return (0); } else { } return (1); } } static void intel_hdmi_unset_edid(struct drm_connector *connector ) { struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; struct drm_connector const *__mptr ; struct drm_connector const *__mptr___0 ; { tmp = intel_attached_hdmi(connector); intel_hdmi = tmp; intel_hdmi->has_hdmi_sink = 0; intel_hdmi->has_audio = 0; intel_hdmi->rgb_quant_range_selectable = 0; __mptr = (struct drm_connector const *)connector; kfree((void const *)((struct intel_connector *)__mptr)->detect_edid); __mptr___0 = (struct drm_connector const *)connector; ((struct intel_connector *)__mptr___0)->detect_edid = (struct edid *)0; return; } } static bool intel_hdmi_set_edid(struct drm_connector *connector ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp___0 ; struct intel_encoder *intel_encoder ; struct intel_digital_port *tmp___1 ; enum intel_display_power_domain power_domain ; struct edid *edid ; bool connected ; struct i2c_adapter *tmp___2 ; struct drm_connector const *__mptr ; { tmp = to_i915((struct drm_device const *)connector->dev); dev_priv = tmp; tmp___0 = intel_attached_hdmi(connector); intel_hdmi = tmp___0; tmp___1 = hdmi_to_dig_port(intel_hdmi); intel_encoder = & tmp___1->base; connected = 0; power_domain = intel_display_port_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); tmp___2 = intel_gmbus_get_adapter(dev_priv, (unsigned int )intel_hdmi->ddc_bus); edid = drm_get_edid(connector, tmp___2); intel_display_power_put(dev_priv, power_domain); __mptr = (struct drm_connector const *)connector; ((struct intel_connector *)__mptr)->detect_edid = edid; if ((unsigned long )edid != (unsigned long )((struct edid *)0) && (int )((signed char )edid->input) < 0) { intel_hdmi->rgb_quant_range_selectable = drm_rgb_quant_range_selectable(edid); intel_hdmi->has_audio = drm_detect_monitor_audio(edid); if ((int )intel_hdmi->force_audio != 0) { intel_hdmi->has_audio = (int )intel_hdmi->force_audio == 1; } else { } if ((int )intel_hdmi->force_audio != -2) { intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); } else { } connected = 1; } else { } return (connected); } } static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector , bool force ) { enum drm_connector_status status ; long tmp ; struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp___0 ; struct intel_digital_port *tmp___1 ; bool tmp___2 ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_hdmi_detect", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } intel_hdmi_unset_edid(connector); tmp___2 = intel_hdmi_set_edid(connector); if ((int )tmp___2) { tmp___0 = intel_attached_hdmi(connector); intel_hdmi = tmp___0; tmp___1 = hdmi_to_dig_port(intel_hdmi); tmp___1->base.type = 6; status = 1; } else { status = 2; } return (status); } } static void intel_hdmi_force(struct drm_connector *connector ) { struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; long tmp___0 ; struct intel_digital_port *tmp___1 ; { tmp = intel_attached_hdmi(connector); intel_hdmi = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_hdmi_force", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } intel_hdmi_unset_edid(connector); if ((unsigned int )connector->status != 1U) { return; } else { } intel_hdmi_set_edid(connector); tmp___1 = hdmi_to_dig_port(intel_hdmi); tmp___1->base.type = 6; return; } } static int intel_hdmi_get_modes(struct drm_connector *connector ) { struct edid *edid ; struct drm_connector const *__mptr ; int tmp ; { __mptr = (struct drm_connector const *)connector; edid = ((struct intel_connector *)__mptr)->detect_edid; if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { return (0); } else { } tmp = intel_connector_update_modes(connector, edid); return (tmp); } } static bool intel_hdmi_detect_audio(struct drm_connector *connector ) { bool has_audio ; struct edid *edid ; struct drm_connector const *__mptr ; { has_audio = 0; __mptr = (struct drm_connector const *)connector; edid = ((struct intel_connector *)__mptr)->detect_edid; if ((unsigned long )edid != (unsigned long )((struct edid *)0) && (int )((signed char )edid->input) < 0) { has_audio = drm_detect_monitor_audio(edid); } else { } return (has_audio); } } static int intel_hdmi_set_property(struct drm_connector *connector , struct drm_property *property , uint64_t val ) { struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; struct intel_digital_port *intel_dig_port ; struct intel_digital_port *tmp___0 ; struct drm_i915_private *dev_priv ; int ret ; enum hdmi_force_audio i ; bool has_audio ; bool old_auto ; uint32_t old_range ; { tmp = intel_attached_hdmi(connector); intel_hdmi = tmp; tmp___0 = hdmi_to_dig_port(intel_hdmi); intel_dig_port = tmp___0; dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; ret = drm_object_property_set_value(& connector->base, property, val); if (ret != 0) { return (ret); } else { } if ((unsigned long )dev_priv->force_audio_property == (unsigned long )property) { i = (enum hdmi_force_audio )val; if ((int )intel_hdmi->force_audio == (int )i) { return (0); } else { } intel_hdmi->force_audio = i; if ((int )i == 0) { has_audio = intel_hdmi_detect_audio(connector); } else { has_audio = (int )i == 1; } if ((int )i == -2) { intel_hdmi->has_hdmi_sink = 0; } else { } intel_hdmi->has_audio = has_audio; goto done; } else { } if ((unsigned long )dev_priv->broadcast_rgb_property == (unsigned long )property) { old_auto = intel_hdmi->color_range_auto; old_range = intel_hdmi->color_range; switch (val) { case 0ULL: intel_hdmi->color_range_auto = 1; goto ldv_48853; case 1ULL: intel_hdmi->color_range_auto = 0; intel_hdmi->color_range = 0U; goto ldv_48853; case 2ULL: intel_hdmi->color_range_auto = 0; intel_hdmi->color_range = 256U; goto ldv_48853; default: ; return (-22); } ldv_48853: ; if ((int )intel_hdmi->color_range_auto == (int )old_auto && intel_hdmi->color_range == old_range) { return (0); } else { } goto done; } else { } if ((unsigned long )(connector->dev)->mode_config.aspect_ratio_property == (unsigned long )property) { switch (val) { case 0ULL: intel_hdmi->aspect_ratio = 0; goto ldv_48858; case 1ULL: intel_hdmi->aspect_ratio = 1; goto ldv_48858; case 2ULL: intel_hdmi->aspect_ratio = 2; goto ldv_48858; default: ; return (-22); } ldv_48858: ; goto done; } else { } return (-22); done: ; if ((unsigned long )intel_dig_port->base.base.crtc != (unsigned long )((struct drm_crtc *)0)) { intel_crtc_restore_mode(intel_dig_port->base.base.crtc); } else { } return (0); } } static void intel_hdmi_pre_enable(struct intel_encoder *encoder ) { struct intel_hdmi *intel_hdmi ; struct intel_hdmi *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode *adjusted_mode ; { tmp = enc_to_intel_hdmi(& encoder->base); intel_hdmi = tmp; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; adjusted_mode = & (intel_crtc->config)->base.adjusted_mode; intel_hdmi_prepare(encoder); (*(intel_hdmi->set_infoframes))(& encoder->base, (int )(intel_crtc->config)->has_hdmi_sink, adjusted_mode); return; } } static void vlv_hdmi_pre_enable(struct intel_encoder *encoder ) { struct intel_digital_port *dport ; struct intel_digital_port *tmp ; struct intel_hdmi *intel_hdmi ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode *adjusted_mode ; enum dpio_channel port ; int tmp___0 ; int pipe ; u32 val ; { tmp = enc_to_dig_port(& encoder->base); dport = tmp; intel_hdmi = & dport->hdmi; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; adjusted_mode = & (intel_crtc->config)->base.adjusted_mode; tmp___0 = vlv_dport_to_channel___0(dport); port = (enum dpio_channel )tmp___0; pipe = intel_crtc->pipe; mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 9216U + 544U)); val = 0U; if (pipe != 0) { val = val | 2097152U; } else { val = val & 4292870143U; } val = val | 1048772U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33312U), val); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33428U), 0U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33424U), 723803999U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33416U), 1433974842U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33420U), 209199168U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 9216U + 1680U), 723810424U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33324U), 196608U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33316U), 8192U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33428U), 2147483648U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33336U), 7733272U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33372U), 4196488U); mutex_unlock(& dev_priv->sb_lock); (*(intel_hdmi->set_infoframes))(& encoder->base, (int )(intel_crtc->config)->has_hdmi_sink, adjusted_mode); intel_enable_hdmi(encoder); vlv_wait_port_ready(dev_priv, dport, 0U); return; } } static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder ) { struct intel_digital_port *dport ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum dpio_channel port ; int tmp___0 ; int pipe ; { tmp = enc_to_dig_port(& encoder->base); dport = tmp; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___0 = vlv_dport_to_channel___0(dport); port = (enum dpio_channel )tmp___0; pipe = intel_crtc->pipe; intel_hdmi_prepare(encoder); mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )(((unsigned int )port + 65U) * 512U), 65664U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33284U), 6291552U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33328U), 7671552U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33452U), 5376U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33464U), 1077936128U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33316U), 8192U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33428U), 2147483648U); mutex_unlock(& dev_priv->sb_lock); return; } } static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder ) { struct intel_digital_port *dport ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum dpio_channel ch ; int tmp___0 ; enum pipe pipe ; u32 val ; { tmp = enc_to_dig_port(& encoder->base); dport = tmp; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___0 = vlv_dport_to_channel___0(dport); ch = (enum dpio_channel )tmp___0; pipe = intel_crtc->pipe; intel_hdmi_prepare(encoder); mutex_lock_nested(& dev_priv->sb_lock, 0U); if ((int )pipe != 1) { val = vlv_dpio_read(dev_priv, pipe, 33044); val = val & 4279238655U; if ((unsigned int )ch == 0U) { val = val | 12582912U; } else { } if ((unsigned int )ch == 1U) { val = val | 3145728U; } else { } vlv_dpio_write(dev_priv, pipe, 33044, val); } else { val = vlv_dpio_read(dev_priv, pipe, 32900); val = val & 4293001215U; if ((unsigned int )ch == 0U) { val = val | 393216U; } else { } if ((unsigned int )ch == 1U) { val = val | 1572864U; } else { } vlv_dpio_write(dev_priv, pipe, 32900, val); } val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 544U)); val = val | 1048576U; if ((int )pipe != 1) { val = val & 4292870143U; } else { val = val | 2097152U; } vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 544U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1056U)); val = val | 1048576U; if ((int )pipe != 1) { val = val & 4292870143U; } else { val = val | 2097152U; } vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1056U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 4294967116U + 33100U)); if ((int )pipe != 1) { val = val & 4294959103U; } else { val = val | 8192U; } vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 4294967116U + 33100U), val); mutex_unlock(& dev_priv->sb_lock); return; } } static void vlv_hdmi_post_disable(struct intel_encoder *encoder ) { struct intel_digital_port *dport ; struct intel_digital_port *tmp ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum dpio_channel port ; int tmp___0 ; int pipe ; { tmp = enc_to_dig_port(& encoder->base); dport = tmp; dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___0 = vlv_dport_to_channel___0(dport); port = (enum dpio_channel )tmp___0; pipe = intel_crtc->pipe; mutex_lock_nested(& dev_priv->sb_lock, 0U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )(((unsigned int )port + 65U) * 512U), 0U); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )port * 512U + 33284U), 14680160U); mutex_unlock(& dev_priv->sb_lock); return; } } static void chv_hdmi_post_disable(struct intel_encoder *encoder ) { struct intel_digital_port *dport ; struct intel_digital_port *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; enum dpio_channel ch ; int tmp___0 ; enum pipe pipe ; u32 val ; { tmp = enc_to_dig_port(& encoder->base); dport = tmp; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp___0 = vlv_dport_to_channel___0(dport); ch = (enum dpio_channel )tmp___0; pipe = intel_crtc->pipe; mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 516U)); val = val | 8388608U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 516U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1028U)); val = val | 8388608U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1028U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 512U)); val = val & 4294901631U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 512U), val); val = vlv_dpio_read(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1024U)); val = val & 4294901631U; vlv_dpio_write(dev_priv, pipe, (int )((unsigned int )ch * 9216U + 1024U), val); mutex_unlock(& dev_priv->sb_lock); return; } } static void chv_hdmi_pre_enable(struct intel_encoder *encoder ) { struct intel_digital_port *dport ; struct intel_digital_port *tmp ; struct intel_hdmi *intel_hdmi ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode *adjusted_mode ; enum dpio_channel ch ; int tmp___0 ; int pipe ; int data ; int i ; int stagger ; u32 val ; { tmp = enc_to_dig_port(& encoder->base); dport = tmp; intel_hdmi = & dport->hdmi; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; adjusted_mode = & (intel_crtc->config)->base.adjusted_mode; tmp___0 = vlv_dport_to_channel___0(dport); ch = (enum dpio_channel )tmp___0; pipe = intel_crtc->pipe; mutex_lock_nested(& dev_priv->sb_lock, 0U); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 556U)); val = val & 4294967287U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 556U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1068U)); val = val & 4294967287U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1068U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 516U)); val = val | 8388608U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 516U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1028U)); val = val | 8388608U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1028U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 512U)); val = val | 65664U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 512U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1024U)); val = val | 65664U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1024U), val); i = 0; goto ldv_48947; ldv_48946: data = i != 1; vlv_dpio_write(dev_priv, (enum pipe )pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 184, (u32 )(data << 30)); i = i + 1; ldv_48947: ; if (i <= 3) { goto ldv_48946; } else { } if ((intel_crtc->config)->port_clock > 270000) { stagger = 24; } else if ((intel_crtc->config)->port_clock > 135000) { stagger = 13; } else if ((intel_crtc->config)->port_clock > 67500) { stagger = 7; } else if ((intel_crtc->config)->port_clock > 33750) { stagger = 4; } else { stagger = 2; } val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 556U)); val = val | 520093696U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 556U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1068U)); val = val | 520093696U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1068U), val); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 560U), (u32 )(stagger | 401216)); vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1072U), (u32 )(stagger | 5709632)); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 552U)); val = val & 1073741823U; val = val & 4042326015U; val = val; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 552U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1064U)); val = val & 1073741823U; val = val & 4042326015U; val = val; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1064U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 548U)); val = val & 4294902783U; val = val; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 548U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1060U)); val = val & 4294902783U; val = val; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1060U), val); i = 0; goto ldv_48950; ldv_48949: val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 144); val = val & 16777215U; val = val | 2147483648U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 144, val); i = i + 1; ldv_48950: ; if (i <= 3) { goto ldv_48949; } else { } i = 0; goto ldv_48953; ldv_48952: val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 136); val = val & 4278255615U; val = val | 6684672U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 136, val); i = i + 1; ldv_48953: ; if (i <= 3) { goto ldv_48952; } else { } i = 0; goto ldv_48956; ldv_48955: val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 140); val = val & 4160749567U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (((unsigned int )ch != 0U ? 9216 : 0) + i * 512) + 140, val); i = i + 1; ldv_48956: ; if (i <= 3) { goto ldv_48955; } else { } val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 552U)); val = val | 3221225472U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 552U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1064U)); val = val | 3221225472U; vlv_dpio_write(dev_priv, (enum pipe )pipe, (int )((unsigned int )ch * 9216U + 1064U), val); val = vlv_dpio_read(dev_priv, (enum pipe )pipe, 33144); val = val | 8U; vlv_dpio_write(dev_priv, (enum pipe )pipe, 33144, val); mutex_unlock(& dev_priv->sb_lock); (*(intel_hdmi->set_infoframes))(& encoder->base, (int )(intel_crtc->config)->has_hdmi_sink, adjusted_mode); intel_enable_hdmi(encoder); vlv_wait_port_ready(dev_priv, dport, 0U); return; } } static void intel_hdmi_destroy(struct drm_connector *connector ) { struct drm_connector const *__mptr ; { __mptr = (struct drm_connector const *)connector; kfree((void const *)((struct intel_connector *)__mptr)->detect_edid); drm_connector_cleanup(connector); kfree((void const *)connector); return; } } static struct drm_connector_funcs const intel_hdmi_connector_funcs = {& intel_connector_dpms, 0, 0, 0, & intel_hdmi_detect, & drm_helper_probe_single_connector_modes, & intel_hdmi_set_property, & intel_hdmi_destroy, & intel_hdmi_force, & drm_atomic_helper_connector_duplicate_state, & drm_atomic_helper_connector_destroy_state, 0, & intel_connector_atomic_get_property}; static struct drm_connector_helper_funcs const intel_hdmi_connector_helper_funcs = {& intel_hdmi_get_modes, & intel_hdmi_mode_valid, & intel_best_encoder}; static struct drm_encoder_funcs const intel_hdmi_enc_funcs = {0, & intel_encoder_destroy}; static void intel_attach_aspect_ratio_property(struct drm_connector *connector ) { int tmp ; { tmp = drm_mode_create_aspect_ratio_property(connector->dev); if (tmp == 0) { drm_object_attach_property(& connector->base, (connector->dev)->mode_config.aspect_ratio_property, 0ULL); } else { } return; } } static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi , struct drm_connector *connector ) { { intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_hdmi->color_range_auto = 1; intel_attach_aspect_ratio_property(connector); intel_hdmi->aspect_ratio = 0; return; } } void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port , struct intel_connector *intel_connector ) { struct drm_connector *connector ; struct intel_hdmi *intel_hdmi ; struct intel_encoder *intel_encoder ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum port port ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int __ret_warn_on ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; int tmp ; long tmp___0 ; long tmp___1 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; struct drm_i915_private *__p___11 ; u32 temp ; uint32_t tmp___2 ; struct drm_i915_private *__p___12 ; struct drm_i915_private *__p___13 ; { connector = & intel_connector->base; intel_hdmi = & intel_dig_port->hdmi; intel_encoder = & intel_dig_port->base; dev = intel_encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; port = intel_dig_port->port; drm_connector_init(dev, connector, & intel_hdmi_connector_funcs, 11); drm_connector_helper_add(connector, & intel_hdmi_connector_helper_funcs); connector->interlace_allowed = 1; connector->doublescan_allowed = 0; connector->stereo_allowed = 1; switch ((unsigned int )port) { case 1U: __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { intel_hdmi->ddc_bus = 1; } else { intel_hdmi->ddc_bus = 5; } } else { intel_hdmi->ddc_bus = 5; } intel_encoder->hpd_pin = 4; goto ldv_48996; case 2U: __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) == 0U) { __p___2 = dev_priv; if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { intel_hdmi->ddc_bus = 2; } else { intel_hdmi->ddc_bus = 4; } } else { intel_hdmi->ddc_bus = 4; } intel_encoder->hpd_pin = 5; goto ldv_48996; case 3U: __p___5 = dev_priv; if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = dev_priv; if ((unsigned int )((unsigned char )__p___6->info.gen) == 9U) { tmp = 1; } else { tmp = 0; } } else { tmp = 0; } __ret_warn_on = tmp; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_hdmi.c", 1730, "WARN_ON(IS_BROXTON(dev_priv))"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { intel_hdmi->ddc_bus = 0; } else { __p___3 = dev_priv; if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = dev_priv; if ((unsigned int )((unsigned char )__p___4->info.gen) == 8U) { intel_hdmi->ddc_bus = 3; } else { intel_hdmi->ddc_bus = 6; } } else { intel_hdmi->ddc_bus = 6; } } intel_encoder->hpd_pin = 6; goto ldv_48996; case 0U: intel_encoder->hpd_pin = 0; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_hdmi.c"), "i" (1742), "i" (12UL)); ldv_49039: ; goto ldv_49039; } ldv_48996: __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___10 + 45UL) != 0U) { intel_hdmi->write_infoframe = & vlv_write_infoframe; intel_hdmi->set_infoframes = & vlv_set_infoframes; intel_hdmi->infoframe_enabled = & vlv_infoframe_enabled; } else { __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 44UL) != 0U) { intel_hdmi->write_infoframe = & g4x_write_infoframe; intel_hdmi->set_infoframes = & g4x_set_infoframes; intel_hdmi->infoframe_enabled = & g4x_infoframe_enabled; } else { __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 46UL) != 0U) { intel_hdmi->write_infoframe = & hsw_write_infoframe; intel_hdmi->set_infoframes = & hsw_set_infoframes; intel_hdmi->infoframe_enabled = & hsw_infoframe_enabled; } else { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___7->pch_type == 1U) { intel_hdmi->write_infoframe = & ibx_write_infoframe; intel_hdmi->set_infoframes = & ibx_set_infoframes; intel_hdmi->infoframe_enabled = & ibx_infoframe_enabled; } else { intel_hdmi->write_infoframe = & cpt_write_infoframe; intel_hdmi->set_infoframes = & cpt_set_infoframes; intel_hdmi->infoframe_enabled = & cpt_infoframe_enabled; } } } } __p___11 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___11 + 46UL) != 0U) { intel_connector->get_hw_state = & intel_ddi_connector_get_hw_state; } else { intel_connector->get_hw_state = & intel_connector_get_hw_state; } intel_connector->unregister = & intel_connector_unregister; intel_hdmi_add_properties(intel_hdmi, connector); intel_connector_attach_encoder(intel_connector, intel_encoder); drm_connector_register(connector); __p___12 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___12 + 44UL) != 0U) { __p___13 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___13->info.device_id) != 10818U) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 85352L, 1); temp = tmp___2; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 85352L, (temp & 4294967280U) | 13U, 1); } else { } } else { } return; } } void intel_hdmi_init(struct drm_device *dev , int hdmi_reg , enum port port ) { struct intel_digital_port *intel_dig_port ; struct intel_encoder *intel_encoder ; struct intel_connector *intel_connector ; void *tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; { tmp = kzalloc(4744UL, 208U); intel_dig_port = (struct intel_digital_port *)tmp; if ((unsigned long )intel_dig_port == (unsigned long )((struct intel_digital_port *)0)) { return; } else { } intel_connector = intel_connector_alloc(); if ((unsigned long )intel_connector == (unsigned long )((struct intel_connector *)0)) { kfree((void const *)intel_dig_port); return; } else { } intel_encoder = & intel_dig_port->base; drm_encoder_init(dev, & intel_encoder->base, & intel_hdmi_enc_funcs, 2); intel_encoder->compute_config = & intel_hdmi_compute_config; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { intel_encoder->disable = & pch_disable_hdmi; intel_encoder->post_disable = & pch_post_disable_hdmi; } else { intel_encoder->disable = & g4x_disable_hdmi; } intel_encoder->get_hw_state = & intel_hdmi_get_hw_state; intel_encoder->get_config = & intel_hdmi_get_config; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 8U) { intel_encoder->pre_pll_enable = & chv_hdmi_pre_pll_enable; intel_encoder->pre_enable = & chv_hdmi_pre_enable; intel_encoder->enable = & vlv_enable_hdmi; intel_encoder->post_disable = & chv_hdmi_post_disable; } else { goto _L; } } else { _L: /* CIL Label */ __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { intel_encoder->pre_pll_enable = & vlv_hdmi_pre_pll_enable; intel_encoder->pre_enable = & vlv_hdmi_pre_enable; intel_encoder->enable = & vlv_enable_hdmi; intel_encoder->post_disable = & vlv_hdmi_post_disable; } else { intel_encoder->pre_enable = & intel_hdmi_pre_enable; intel_encoder->enable = & intel_enable_hdmi; } } intel_encoder->type = 6; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 8U) { if ((unsigned int )port == 3U) { intel_encoder->crtc_mask = 4; } else { intel_encoder->crtc_mask = 3; } } else { intel_encoder->crtc_mask = 7; } } else { intel_encoder->crtc_mask = 7; } intel_encoder->cloneable = 2U; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 44UL) != 0U) { intel_encoder->cloneable = intel_encoder->cloneable | 64U; } else { } intel_dig_port->port = port; intel_dig_port->hdmi.hdmi_reg = (u32 )hdmi_reg; intel_dig_port->dp.output_reg = 0U; intel_hdmi_init_connector(intel_dig_port, intel_connector); return; } } extern int ldv_probe_38(void) ; extern int ldv_probe_36(void) ; void ldv_initialize_drm_connector_funcs_38(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(104UL); intel_hdmi_connector_funcs_group0 = (struct drm_property *)tmp; tmp___0 = ldv_init_zalloc(936UL); intel_hdmi_connector_funcs_group1 = (struct drm_connector *)tmp___0; return; } } void ldv_initialize_drm_connector_helper_funcs_37(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_hdmi_connector_helper_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_main_exported_38(void) { struct drm_connector_state *ldvarg562 ; void *tmp ; uint32_t ldvarg558 ; bool ldvarg561 ; int ldvarg563 ; uint64_t *ldvarg557 ; void *tmp___0 ; uint64_t ldvarg560 ; struct drm_connector_state *ldvarg556 ; void *tmp___1 ; uint32_t ldvarg559 ; int tmp___2 ; { tmp = ldv_init_zalloc(32UL); ldvarg562 = (struct drm_connector_state *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg557 = (uint64_t *)tmp___0; tmp___1 = ldv_init_zalloc(32UL); ldvarg556 = (struct drm_connector_state *)tmp___1; ldv_memset((void *)(& ldvarg558), 0, 4UL); ldv_memset((void *)(& ldvarg561), 0, 1UL); ldv_memset((void *)(& ldvarg563), 0, 4UL); ldv_memset((void *)(& ldvarg560), 0, 8UL); ldv_memset((void *)(& ldvarg559), 0, 4UL); tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_38 == 2) { intel_connector_dpms(intel_hdmi_connector_funcs_group1, ldvarg563); ldv_state_variable_38 = 2; } else { } if (ldv_state_variable_38 == 1) { intel_connector_dpms(intel_hdmi_connector_funcs_group1, ldvarg563); ldv_state_variable_38 = 1; } else { } goto ldv_49155; case 1: ; if (ldv_state_variable_38 == 2) { drm_atomic_helper_connector_destroy_state(intel_hdmi_connector_funcs_group1, ldvarg562); ldv_state_variable_38 = 2; } else { } if (ldv_state_variable_38 == 1) { drm_atomic_helper_connector_destroy_state(intel_hdmi_connector_funcs_group1, ldvarg562); ldv_state_variable_38 = 1; } else { } goto ldv_49155; case 2: ; if (ldv_state_variable_38 == 2) { drm_atomic_helper_connector_duplicate_state(intel_hdmi_connector_funcs_group1); ldv_state_variable_38 = 2; } else { } if (ldv_state_variable_38 == 1) { drm_atomic_helper_connector_duplicate_state(intel_hdmi_connector_funcs_group1); ldv_state_variable_38 = 1; } else { } goto ldv_49155; case 3: ; if (ldv_state_variable_38 == 2) { intel_hdmi_force(intel_hdmi_connector_funcs_group1); ldv_state_variable_38 = 2; } else { } if (ldv_state_variable_38 == 1) { intel_hdmi_force(intel_hdmi_connector_funcs_group1); ldv_state_variable_38 = 1; } else { } goto ldv_49155; case 4: ; if (ldv_state_variable_38 == 2) { intel_hdmi_detect(intel_hdmi_connector_funcs_group1, (int )ldvarg561); ldv_state_variable_38 = 2; } else { } if (ldv_state_variable_38 == 1) { intel_hdmi_detect(intel_hdmi_connector_funcs_group1, (int )ldvarg561); ldv_state_variable_38 = 1; } else { } goto ldv_49155; case 5: ; if (ldv_state_variable_38 == 2) { intel_hdmi_set_property(intel_hdmi_connector_funcs_group1, intel_hdmi_connector_funcs_group0, ldvarg560); ldv_state_variable_38 = 2; } else { } if (ldv_state_variable_38 == 1) { intel_hdmi_set_property(intel_hdmi_connector_funcs_group1, intel_hdmi_connector_funcs_group0, ldvarg560); ldv_state_variable_38 = 1; } else { } goto ldv_49155; case 6: ; if (ldv_state_variable_38 == 2) { intel_hdmi_destroy(intel_hdmi_connector_funcs_group1); ldv_state_variable_38 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_49155; case 7: ; if (ldv_state_variable_38 == 2) { drm_helper_probe_single_connector_modes(intel_hdmi_connector_funcs_group1, ldvarg559, ldvarg558); ldv_state_variable_38 = 2; } else { } if (ldv_state_variable_38 == 1) { drm_helper_probe_single_connector_modes(intel_hdmi_connector_funcs_group1, ldvarg559, ldvarg558); ldv_state_variable_38 = 1; } else { } goto ldv_49155; case 8: ; if (ldv_state_variable_38 == 2) { intel_connector_atomic_get_property(intel_hdmi_connector_funcs_group1, (struct drm_connector_state const *)ldvarg556, intel_hdmi_connector_funcs_group0, ldvarg557); ldv_state_variable_38 = 2; } else { } if (ldv_state_variable_38 == 1) { intel_connector_atomic_get_property(intel_hdmi_connector_funcs_group1, (struct drm_connector_state const *)ldvarg556, intel_hdmi_connector_funcs_group0, ldvarg557); ldv_state_variable_38 = 1; } else { } goto ldv_49155; case 9: ; if (ldv_state_variable_38 == 1) { ldv_probe_38(); ldv_state_variable_38 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_49155; default: ldv_stop(); } ldv_49155: ; return; } } void ldv_main_exported_36(void) { struct drm_encoder *ldvarg539 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(96UL); ldvarg539 = (struct drm_encoder *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_36 == 2) { intel_encoder_destroy(ldvarg539); ldv_state_variable_36 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_49171; case 1: ; if (ldv_state_variable_36 == 1) { ldv_probe_36(); ldv_state_variable_36 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_49171; default: ldv_stop(); } ldv_49171: ; return; } } void ldv_main_exported_37(void) { struct drm_display_mode *ldvarg232 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(208UL); ldvarg232 = (struct drm_display_mode *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_37 == 1) { intel_hdmi_get_modes(intel_hdmi_connector_helper_funcs_group0); ldv_state_variable_37 = 1; } else { } goto ldv_49179; case 1: ; if (ldv_state_variable_37 == 1) { intel_hdmi_mode_valid(intel_hdmi_connector_helper_funcs_group0, ldvarg232); ldv_state_variable_37 = 1; } else { } goto ldv_49179; case 2: ; if (ldv_state_variable_37 == 1) { intel_best_encoder(intel_hdmi_connector_helper_funcs_group0); ldv_state_variable_37 = 1; } else { } goto ldv_49179; default: ldv_stop(); } ldv_49179: ; return; } } bool ldv_queue_work_on_965(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_966(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_967(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_968(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_969(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___25(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } extern unsigned long __usecs_to_jiffies(unsigned int const ) ; __inline static unsigned long usecs_to_jiffies(unsigned int const u ) { unsigned long tmp___1 ; { tmp___1 = __usecs_to_jiffies(u); return (tmp___1); } } bool ldv_queue_work_on_979(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_981(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_980(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_983(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_982(struct workqueue_struct *ldv_func_arg1 ) ; extern int i2c_add_adapter(struct i2c_adapter * ) ; extern void i2c_del_adapter(struct i2c_adapter * ) ; extern struct i2c_algorithm const i2c_bit_algo ; __inline static bool drm_can_sleep___16(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39652; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39652; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39652; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39652; default: __bad_percpu_size(); } ldv_39652: pscr_ret__ = pfo_ret__; goto ldv_39658; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39662; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39662; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39662; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39662; default: __bad_percpu_size(); } ldv_39662: pscr_ret__ = pfo_ret_____0; goto ldv_39658; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39671; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39671; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39671; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39671; default: __bad_percpu_size(); } ldv_39671: pscr_ret__ = pfo_ret_____1; goto ldv_39658; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39680; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39680; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39680; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39680; default: __bad_percpu_size(); } ldv_39680: pscr_ret__ = pfo_ret_____2; goto ldv_39658; default: __bad_size_call_parameter(); goto ldv_39658; } ldv_39658: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___25(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } int intel_setup_gmbus(struct drm_device *dev ) ; void intel_teardown_gmbus(struct drm_device *dev ) ; void intel_gmbus_set_speed(struct i2c_adapter *adapter , int speed ) ; static struct gmbus_pin const gmbus_pins[7U] = { {0, 0}, {"ssc", 20500}, {"vga", 20496}, {"panel", 20504}, {"dpc", 20508}, {"dpb", 20512}, {"dpd", 20516}}; static struct gmbus_pin const gmbus_pins_bdw[7U] = { {0, 0}, {0, 0}, {"vga", 20496}, {0, 0}, {"dpc", 20508}, {"dpb", 20512}, {"dpd", 20516}}; static struct gmbus_pin const gmbus_pins_skl[7U] = { {0, 0}, {0, 0}, {0, 0}, {0, 0}, {"dpc", 20508}, {"dpb", 20512}, {"dpd", 20516}}; static struct gmbus_pin const gmbus_pins_bxt[4U] = { {0, 0}, {"dpb", 806932}, {"dpc", 806936}, {"misc", 806940}}; static struct gmbus_pin const *get_gmbus_pin(struct drm_i915_private *dev_priv , unsigned int pin ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { __p___2 = dev_priv; if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = dev_priv; if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { return ((struct gmbus_pin const *)(& gmbus_pins_bxt) + (unsigned long )pin); } else { goto _L; } } else { _L: /* CIL Label */ __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { return ((struct gmbus_pin const *)(& gmbus_pins_skl) + (unsigned long )pin); } else { __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { return ((struct gmbus_pin const *)(& gmbus_pins_bdw) + (unsigned long )pin); } else { return ((struct gmbus_pin const *)(& gmbus_pins) + (unsigned long )pin); } } else { return ((struct gmbus_pin const *)(& gmbus_pins) + (unsigned long )pin); } } } } } bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv , unsigned int pin ) { unsigned int size ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct gmbus_pin const *tmp ; int tmp___0 ; { __p___2 = dev_priv; if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = dev_priv; if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { size = 4U; } else { goto _L; } } else { _L: /* CIL Label */ __p___1 = dev_priv; if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { size = 7U; } else { __p = dev_priv; if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = dev_priv; if ((unsigned int )((unsigned char )__p___0->info.gen) == 8U) { size = 7U; } else { size = 7U; } } else { size = 7U; } } } if (pin < size) { tmp = get_gmbus_pin(dev_priv, pin); if ((int )tmp->reg != 0) { tmp___0 = 1; } else { tmp___0 = 0; } } else { tmp___0 = 0; } return ((bool )tmp___0); } } __inline static struct intel_gmbus *to_intel_gmbus(struct i2c_adapter *i2c ) { struct i2c_adapter const *__mptr ; { __mptr = (struct i2c_adapter const *)i2c; return ((struct intel_gmbus *)__mptr); } } void intel_i2c_reset(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(dev_priv->gpio_mmio_base + 20736U), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(dev_priv->gpio_mmio_base + 20752U), 0U, 1); return; } } static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv , bool enable ) { u32 val ; struct drm_i915_private *__p ; { __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )*((unsigned char *)__p + 44UL) == 0U) { return; } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), 1); if ((int )enable) { val = val | 16777216U; } else { val = val & 4278190079U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 25088U), val, 1); return; } } static u32 get_reserved(struct intel_gmbus *bus ) { struct drm_i915_private *dev_priv ; struct drm_device *dev ; u32 reserved ; uint32_t tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev_priv = bus->dev_priv; dev = dev_priv->dev; reserved = 0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) != 13687U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) != 9570U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )bus->gpio_reg, 0); reserved = tmp & 8224U; } else { } } else { } return (reserved); } } static int get_clock(void *data ) { struct intel_gmbus *bus ; struct drm_i915_private *dev_priv ; u32 reserved ; u32 tmp ; uint32_t tmp___0 ; { bus = (struct intel_gmbus *)data; dev_priv = bus->dev_priv; tmp = get_reserved(bus); reserved = tmp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )bus->gpio_reg, reserved | 1U, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )bus->gpio_reg, reserved, 0); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )bus->gpio_reg, 0); return ((tmp___0 & 16U) != 0U); } } static int get_data(void *data ) { struct intel_gmbus *bus ; struct drm_i915_private *dev_priv ; u32 reserved ; u32 tmp ; uint32_t tmp___0 ; { bus = (struct intel_gmbus *)data; dev_priv = bus->dev_priv; tmp = get_reserved(bus); reserved = tmp; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )bus->gpio_reg, reserved | 256U, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )bus->gpio_reg, reserved, 0); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )bus->gpio_reg, 0); return ((tmp___0 & 4096U) != 0U); } } static void set_clock(void *data , int state_high ) { struct intel_gmbus *bus ; struct drm_i915_private *dev_priv ; u32 reserved ; u32 tmp ; u32 clock_bits ; { bus = (struct intel_gmbus *)data; dev_priv = bus->dev_priv; tmp = get_reserved(bus); reserved = tmp; if (state_high != 0) { clock_bits = 1U; } else { clock_bits = 7U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )bus->gpio_reg, reserved | clock_bits, 0); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )bus->gpio_reg, 0); return; } } static void set_data(void *data , int state_high ) { struct intel_gmbus *bus ; struct drm_i915_private *dev_priv ; u32 reserved ; u32 tmp ; u32 data_bits ; { bus = (struct intel_gmbus *)data; dev_priv = bus->dev_priv; tmp = get_reserved(bus); reserved = tmp; if (state_high != 0) { data_bits = 256U; } else { data_bits = 1792U; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )bus->gpio_reg, reserved | data_bits, 0); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )bus->gpio_reg, 0); return; } } static int intel_gpio_pre_xfer(struct i2c_adapter *adapter ) { struct intel_gmbus *bus ; struct i2c_adapter const *__mptr ; struct drm_i915_private *dev_priv ; { __mptr = (struct i2c_adapter const *)adapter; bus = (struct intel_gmbus *)__mptr; dev_priv = bus->dev_priv; intel_i2c_reset(dev_priv->dev); intel_i2c_quirk_set(dev_priv, 1); set_data((void *)bus, 1); set_clock((void *)bus, 1); __const_udelay(42950UL); return (0); } } static void intel_gpio_post_xfer(struct i2c_adapter *adapter ) { struct intel_gmbus *bus ; struct i2c_adapter const *__mptr ; struct drm_i915_private *dev_priv ; { __mptr = (struct i2c_adapter const *)adapter; bus = (struct intel_gmbus *)__mptr; dev_priv = bus->dev_priv; set_data((void *)bus, 1); set_clock((void *)bus, 1); intel_i2c_quirk_set(dev_priv, 0); return; } } static void intel_gpio_setup(struct intel_gmbus *bus , unsigned int pin ) { struct drm_i915_private *dev_priv ; struct i2c_algo_bit_data *algo ; struct gmbus_pin const *tmp ; unsigned long tmp___0 ; { dev_priv = bus->dev_priv; algo = & bus->bit_algo; tmp = get_gmbus_pin(dev_priv, pin); bus->gpio_reg = dev_priv->gpio_mmio_base + (uint32_t )tmp->reg; bus->adapter.algo_data = (void *)algo; algo->setsda = & set_data; algo->setscl = & set_clock; algo->getsda = & get_data; algo->getscl = & get_clock; algo->pre_xfer = & intel_gpio_pre_xfer; algo->post_xfer = & intel_gpio_post_xfer; algo->udelay = 10; tmp___0 = usecs_to_jiffies(2200U); algo->timeout = (int )tmp___0; algo->data = (void *)bus; return; } } static int gmbus_wait_hw_status(struct drm_i915_private *dev_priv , u32 gmbus2_status , u32 gmbus4_irq_en ) { int i ; int reg_offset ; u32 gmbus2 ; wait_queue_t wait ; struct task_struct *tmp ; struct drm_i915_private *__p ; unsigned long tmp___0 ; { reg_offset = (int )dev_priv->gpio_mmio_base; gmbus2 = 0U; tmp = get_current(); wait.flags = 0U; wait.private = (void *)tmp; wait.func = & autoremove_wake_function; wait.task_list.next = & wait.task_list; wait.task_list.prev = & wait.task_list; __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { gmbus4_irq_en = 0U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20752), gmbus4_irq_en, 1); i = 0; goto ldv_48164; ldv_48163: prepare_to_wait(& dev_priv->gmbus_wait_queue, & wait, 2); gmbus2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(reg_offset + 20744), 0); if (((gmbus2_status | 1024U) & gmbus2) != 0U) { goto ldv_48162; } else { } schedule_timeout(1L); i = i + 1; ldv_48164: tmp___0 = msecs_to_jiffies_timeout(50U); if ((unsigned long )i < tmp___0) { goto ldv_48163; } else { } ldv_48162: finish_wait(& dev_priv->gmbus_wait_queue, & wait); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20752), 0U, 1); if ((gmbus2 & 1024U) != 0U) { return (-6); } else { } if ((gmbus2 & gmbus2_status) != 0U) { return (0); } else { } return (-110); } } static int gmbus_wait_idle(struct drm_i915_private *dev_priv ) { int ret ; int reg_offset ; unsigned long timeout__ ; unsigned long tmp ; int ret__ ; uint32_t tmp___0 ; bool tmp___1 ; uint32_t tmp___2 ; struct drm_i915_private *__p ; long __ret ; unsigned long tmp___3 ; wait_queue_t __wait ; long __ret___0 ; unsigned long tmp___4 ; long __int ; long tmp___5 ; bool __cond___0 ; uint32_t tmp___6 ; bool __cond___1 ; uint32_t tmp___7 ; { reg_offset = (int )dev_priv->gpio_mmio_base; __p = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 4U) { tmp = msecs_to_jiffies(10U); timeout__ = (tmp + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_48186; ldv_48185: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(reg_offset + 20744), 0); if ((tmp___0 & 512U) != 0U) { ret__ = -110; } else { } goto ldv_48184; } else { } tmp___1 = drm_can_sleep___16(); if ((int )tmp___1) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_48186: tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(reg_offset + 20744), 0); if ((tmp___2 & 512U) != 0U) { goto ldv_48185; } else { } ldv_48184: ; return (ret__); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20752), 4U, 1); tmp___3 = msecs_to_jiffies_timeout(10U); __ret = (long )tmp___3; __might_sleep("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_i2c.c", 312, 0); tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(reg_offset + 20744), 0); __cond___1 = (tmp___7 & 512U) == 0U; if ((int )__cond___1 && __ret == 0L) { __ret = 1L; } else { } if (((int )__cond___1 || __ret == 0L) == 0) { tmp___4 = msecs_to_jiffies_timeout(10U); __ret___0 = (long )tmp___4; INIT_LIST_HEAD(& __wait.task_list); __wait.flags = 0U; ldv_48198: tmp___5 = prepare_to_wait_event(& dev_priv->gmbus_wait_queue, & __wait, 2); __int = tmp___5; tmp___6 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(reg_offset + 20744), 0); __cond___0 = (tmp___6 & 512U) == 0U; if ((int )__cond___0 && __ret___0 == 0L) { __ret___0 = 1L; } else { } if (((int )__cond___0 || __ret___0 == 0L) != 0) { goto ldv_48197; } else { } __ret___0 = schedule_timeout(__ret___0); goto ldv_48198; ldv_48197: finish_wait(& dev_priv->gmbus_wait_queue, & __wait); __ret = __ret___0; } else { } ret = (int )__ret; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20752), 0U, 1); if (ret != 0) { return (0); } else { return (-110); } } } static int gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv , unsigned short addr , u8 *buf , unsigned int len , u32 gmbus1_index ) { int reg_offset ; int ret ; u32 val ; u32 loop ; u8 *tmp ; { reg_offset = (int )dev_priv->gpio_mmio_base; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20740), (((len << 16) | gmbus1_index) | (u32 )((int )addr << 1)) | 1107296257U, 1); goto ldv_48215; ldv_48214: loop = 0U; ret = gmbus_wait_hw_status(dev_priv, 2048U, 1U); if (ret != 0) { return (ret); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(reg_offset + 20748), 1); ldv_48212: tmp = buf; buf = buf + 1; *tmp = (u8 )val; val = val >> 8; len = len - 1U; if (len != 0U) { loop = loop + 1U; if (loop <= 3U) { goto ldv_48212; } else { goto ldv_48213; } } else { } ldv_48213: ; ldv_48215: ; if (len != 0U) { goto ldv_48214; } else { } return (0); } } static int gmbus_xfer_read(struct drm_i915_private *dev_priv , struct i2c_msg *msg , u32 gmbus1_index ) { u8 *buf ; unsigned int rx_size ; unsigned int len ; int ret ; unsigned int _min1 ; unsigned int _min2 ; { buf = msg->buf; rx_size = (unsigned int )msg->len; ldv_48229: _min1 = rx_size; _min2 = 256U; len = _min1 < _min2 ? _min1 : _min2; ret = gmbus_xfer_read_chunk(dev_priv, (int )msg->addr, buf, len, gmbus1_index); if (ret != 0) { return (ret); } else { } rx_size = rx_size - len; buf = buf + (unsigned long )len; if (rx_size != 0U) { goto ldv_48229; } else { } return (0); } } static int gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv , unsigned short addr , u8 *buf , unsigned int len ) { int reg_offset ; unsigned int chunk_size ; u32 val ; u32 loop ; u8 *tmp ; u32 tmp___0 ; int ret ; u8 *tmp___1 ; { reg_offset = (int )dev_priv->gpio_mmio_base; chunk_size = len; loop = 0U; val = loop; goto ldv_48242; ldv_48241: tmp = buf; buf = buf + 1; tmp___0 = loop; loop = loop + 1U; val = (u32 )((int )*tmp << (int )(tmp___0 * 8U)) | val; len = len - 1U; ldv_48242: ; if (len != 0U && loop <= 3U) { goto ldv_48241; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20748), val, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20740), ((chunk_size << 16) | (unsigned int )((int )addr << 1)) | 1107296256U, 1); goto ldv_48248; ldv_48247: loop = 0U; val = loop; ldv_48245: tmp___1 = buf; buf = buf + 1; val = (u32 )((int )*tmp___1 << (int )(loop * 8U)) | val; len = len - 1U; if (len != 0U) { loop = loop + 1U; if (loop <= 3U) { goto ldv_48245; } else { goto ldv_48246; } } else { } ldv_48246: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20748), val, 1); ret = gmbus_wait_hw_status(dev_priv, 2048U, 1U); if (ret != 0) { return (ret); } else { } ldv_48248: ; if (len != 0U) { goto ldv_48247; } else { } return (0); } } static int gmbus_xfer_write(struct drm_i915_private *dev_priv , struct i2c_msg *msg ) { u8 *buf ; unsigned int tx_size ; unsigned int len ; int ret ; unsigned int _min1 ; unsigned int _min2 ; { buf = msg->buf; tx_size = (unsigned int )msg->len; ldv_48261: _min1 = tx_size; _min2 = 256U; len = _min1 < _min2 ? _min1 : _min2; ret = gmbus_xfer_write_chunk(dev_priv, (int )msg->addr, buf, len); if (ret != 0) { return (ret); } else { } buf = buf + (unsigned long )len; tx_size = tx_size - len; if (tx_size != 0U) { goto ldv_48261; } else { } return (0); } } static bool gmbus_is_index_read(struct i2c_msg *msgs , int i , int num ) { { return ((bool )(((i + 1 < num && ((int )(msgs + (unsigned long )i)->flags & 1) == 0) && (unsigned int )(msgs + (unsigned long )i)->len <= 2U) && (int )(msgs + ((unsigned long )i + 1UL))->flags & 1)); } } static int gmbus_xfer_index_read(struct drm_i915_private *dev_priv , struct i2c_msg *msgs ) { int reg_offset ; u32 gmbus1_index ; u32 gmbus5 ; int ret ; { reg_offset = (int )dev_priv->gpio_mmio_base; gmbus1_index = 0U; gmbus5 = 0U; if ((unsigned int )msgs->len == 2U) { gmbus5 = (u32 )(((long )((int )*(msgs->buf + 1UL)) | (-0x7FFFFFFF-1)) | (long )((int )*(msgs->buf) << 8)); } else { } if ((unsigned int )msgs->len == 1U) { gmbus1_index = (u32 )(((int )*(msgs->buf) << 8) | 67108864); } else { } if (gmbus5 != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20768), gmbus5, 1); } else { } ret = gmbus_xfer_read(dev_priv, msgs + 1UL, gmbus1_index); if (gmbus5 != 0U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20768), 0U, 1); } else { } return (ret); } } static int gmbus_xfer(struct i2c_adapter *adapter , struct i2c_msg *msgs , int num ) { struct intel_gmbus *bus ; struct i2c_adapter const *__mptr ; struct drm_i915_private *dev_priv ; int i ; int inc ; int try ; int reg_offset ; int ret ; bool tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; int tmp___3 ; long tmp___4 ; long tmp___5 ; int tmp___6 ; { __mptr = (struct i2c_adapter const *)adapter; bus = (struct intel_gmbus *)__mptr; dev_priv = bus->dev_priv; i = 0; try = 0; ret = 0; intel_aux_display_runtime_get(dev_priv); mutex_lock_nested(& dev_priv->gmbus_mutex, 0U); if (bus->force_bit != 0U) { ret = (*(i2c_bit_algo.master_xfer))(adapter, msgs, num); goto out; } else { } reg_offset = (int )dev_priv->gpio_mmio_base; retry: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20736), bus->reg0, 1); goto ldv_48295; ldv_48294: inc = 1; tmp = gmbus_is_index_read(msgs, i, num); if ((int )tmp) { ret = gmbus_xfer_index_read(dev_priv, msgs + (unsigned long )i); inc = 2; } else if ((int )(msgs + (unsigned long )i)->flags & 1) { ret = gmbus_xfer_read(dev_priv, msgs + (unsigned long )i, 0U); } else { ret = gmbus_xfer_write(dev_priv, msgs + (unsigned long )i); } if (ret == -110) { goto timeout; } else { } if (ret == -6) { goto clear_err; } else { } ret = gmbus_wait_hw_status(dev_priv, 16384U, 2U); if (ret == -6) { goto clear_err; } else { } if (ret != 0) { goto timeout; } else { } i = i + inc; ldv_48295: ; if (i < num) { goto ldv_48294; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20740), 1207959552U, 1); tmp___1 = gmbus_wait_idle(dev_priv); if (tmp___1 != 0) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("gmbus_xfer", "GMBUS [%s] timed out waiting for idle\n", (char *)(& adapter->name)); } else { } ret = -110; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20736), 0U, 1); ret = ret != 0 ? ret != 0 : i; goto out; clear_err: ret = -6; tmp___3 = gmbus_wait_idle(dev_priv); if (tmp___3 != 0) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("gmbus_xfer", "GMBUS [%s] timed out after NAK\n", (char *)(& adapter->name)); } else { } ret = -110; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20740), 2147483648U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20740), 0U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20736), 0U, 1); tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("gmbus_xfer", "GMBUS [%s] NAK for addr: %04x %c(%d)\n", (char *)(& adapter->name), (int )(msgs + (unsigned long )i)->addr, (int )(msgs + (unsigned long )i)->flags & 1 ? 114 : 119, (int )(msgs + (unsigned long )i)->len); } else { } if (ret == -6 && i == 0) { tmp___6 = try; try = try + 1; if (tmp___6 == 0) { tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("gmbus_xfer", "GMBUS [%s] NAK on first message, retry\n", (char *)(& adapter->name)); } else { } goto retry; } else { } } else { } goto out; timeout: printk("\016[drm] GMBUS [%s] timed out, falling back to bit banging on pin %d\n", (char *)(& bus->adapter.name), bus->reg0 & 255U); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(reg_offset + 20736), 0U, 1); bus->force_bit = 1U; ret = (*(i2c_bit_algo.master_xfer))(adapter, msgs, num); out: mutex_unlock(& dev_priv->gmbus_mutex); intel_aux_display_runtime_put(dev_priv); return (ret); } } static u32 gmbus_func(struct i2c_adapter *adapter ) { u32 tmp ; { tmp = (*(i2c_bit_algo.functionality))(adapter); return (tmp & 268402697U); } } static struct i2c_algorithm const gmbus_algorithm = {& gmbus_xfer, 0, & gmbus_func, 0, 0}; int intel_setup_gmbus(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_gmbus *bus ; unsigned int pin ; int ret ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; bool tmp ; int tmp___0 ; struct gmbus_pin const *tmp___1 ; struct drm_i915_private *__p___2 ; bool tmp___2 ; int tmp___3 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type == 5U) { return (0); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 0U) { dev_priv->gpio_mmio_base = 786432U; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { dev_priv->gpio_mmio_base = 1572864U; } else { dev_priv->gpio_mmio_base = 0U; } } } __mutex_init(& dev_priv->gmbus_mutex, "&dev_priv->gmbus_mutex", & __key); __init_waitqueue_head(& dev_priv->gmbus_wait_queue, "&dev_priv->gmbus_wait_queue", & __key___0); pin = 0U; goto ldv_48340; ldv_48339: tmp = intel_gmbus_is_valid_pin(dev_priv, pin); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_48331; } else { } bus = (struct intel_gmbus *)(& dev_priv->gmbus) + (unsigned long )pin; bus->adapter.owner = & __this_module; bus->adapter.class = 8U; tmp___1 = get_gmbus_pin(dev_priv, pin); snprintf((char *)(& bus->adapter.name), 48UL, "i915 gmbus %s", tmp___1->name); bus->adapter.dev.parent = & (dev->pdev)->dev; bus->dev_priv = dev_priv; bus->adapter.algo = & gmbus_algorithm; bus->reg0 = pin; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) == 13687U) { bus->force_bit = 1U; } else { } intel_gpio_setup(bus, pin); ret = i2c_add_adapter(& bus->adapter); if (ret != 0) { goto err; } else { } ldv_48331: pin = pin + 1U; ldv_48340: ; if (pin <= 6U) { goto ldv_48339; } else { } intel_i2c_reset(dev_priv->dev); return (0); err: ; goto ldv_48342; ldv_48343: tmp___2 = intel_gmbus_is_valid_pin(dev_priv, pin); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { goto ldv_48342; } else { } bus = (struct intel_gmbus *)(& dev_priv->gmbus) + (unsigned long )pin; i2c_del_adapter(& bus->adapter); ldv_48342: pin = pin - 1U; if (pin != 0U) { goto ldv_48343; } else { } return (ret); } } struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv , unsigned int pin ) { int __ret_warn_on ; bool tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = intel_gmbus_is_valid_pin(dev_priv, pin); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } __ret_warn_on = tmp___0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_i2c.c", 697, "WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin))"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return ((struct i2c_adapter *)0); } else { } return (& dev_priv->gmbus[pin].adapter); } } void intel_gmbus_set_speed(struct i2c_adapter *adapter , int speed ) { struct intel_gmbus *bus ; struct intel_gmbus *tmp ; { tmp = to_intel_gmbus(adapter); bus = tmp; bus->reg0 = (bus->reg0 & 4294966527U) | (u32 )speed; return; } } void intel_gmbus_force_bit(struct i2c_adapter *adapter , bool force_bit ) { struct intel_gmbus *bus ; struct intel_gmbus *tmp ; long tmp___0 ; { tmp = to_intel_gmbus(adapter); bus = tmp; bus->force_bit = bus->force_bit + ((int )force_bit ? 1U : 4294967295U); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_gmbus_force_bit", "%sabling bit-banging on %s. force bit now %d\n", (int )force_bit ? (char *)"en" : (char *)"dis", (char *)(& adapter->name), bus->force_bit); } else { } return; } } void intel_teardown_gmbus(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_gmbus *bus ; unsigned int pin ; bool tmp ; int tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; pin = 0U; goto ldv_48372; ldv_48371: tmp = intel_gmbus_is_valid_pin(dev_priv, pin); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_48370; } else { } bus = (struct intel_gmbus *)(& dev_priv->gmbus) + (unsigned long )pin; i2c_del_adapter(& bus->adapter); ldv_48370: pin = pin + 1U; ldv_48372: ; if (pin <= 6U) { goto ldv_48371; } else { } return; } } void ldv_initialize_i2c_algorithm_35(void) { void *tmp ; { tmp = ldv_init_zalloc(1936UL); gmbus_algorithm_group0 = (struct i2c_adapter *)tmp; return; } } void ldv_main_exported_35(void) { struct i2c_msg *ldvarg74 ; void *tmp ; int ldvarg73 ; int tmp___0 ; { tmp = ldv_init_zalloc(16UL); ldvarg74 = (struct i2c_msg *)tmp; ldv_memset((void *)(& ldvarg73), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_35 == 1) { gmbus_xfer(gmbus_algorithm_group0, ldvarg74, ldvarg73); ldv_state_variable_35 = 1; } else { } goto ldv_48383; case 1: ; if (ldv_state_variable_35 == 1) { gmbus_func(gmbus_algorithm_group0); ldv_state_variable_35 = 1; } else { } goto ldv_48383; default: ldv_stop(); } ldv_48383: ; return; } } bool ldv_queue_work_on_979(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_980(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_981(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_982(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_983(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned long arch_local_save_flags___26(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4860: ; goto ldv_4860; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void *ERR_PTR(long error ) ; __inline static bool IS_ERR_OR_NULL(void const *ptr ) ; bool ldv_queue_work_on_993(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_995(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_994(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_997(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_996(struct workqueue_struct *ldv_func_arg1 ) ; extern int acpi_lid_notifier_register(struct notifier_block * ) ; extern int acpi_lid_notifier_unregister(struct notifier_block * ) ; extern int acpi_lid_open(void) ; __inline static bool drm_can_sleep___17(void) { int tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; int tmp___0 ; unsigned long _flags ; int tmp___1 ; { tmp = preempt_count(); if (tmp != 0) { return (0); } else { __vpp_verify = (void const *)0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_39749; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39749; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39749; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_39749; default: __bad_percpu_size(); } ldv_39749: pscr_ret__ = pfo_ret__; goto ldv_39755; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39759; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39759; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39759; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_39759; default: __bad_percpu_size(); } ldv_39759: pscr_ret__ = pfo_ret_____0; goto ldv_39755; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39768; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39768; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39768; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_39768; default: __bad_percpu_size(); } ldv_39768: pscr_ret__ = pfo_ret_____1; goto ldv_39755; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39777; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39777; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39777; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_39777; default: __bad_percpu_size(); } ldv_39777: pscr_ret__ = pfo_ret_____2; goto ldv_39755; default: __bad_size_call_parameter(); goto ldv_39755; } ldv_39755: tmp___0 = atomic_read((atomic_t const *)(& kgdb_active)); if (pscr_ret__ == tmp___0) { return (0); } else { _flags = arch_local_save_flags___26(); tmp___1 = arch_irqs_disabled_flags(_flags); if (tmp___1 != 0) { return (0); } else { } } } return (1); } } static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder ) { struct drm_encoder const *__mptr ; { __mptr = (struct drm_encoder const *)encoder; return ((struct intel_lvds_encoder *)__mptr); } } static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector ) { struct drm_connector const *__mptr ; { __mptr = (struct drm_connector const *)connector; return ((struct intel_lvds_connector *)__mptr); } } static bool intel_lvds_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_lvds_encoder *lvds_encoder ; struct intel_lvds_encoder *tmp ; enum intel_display_power_domain power_domain ; u32 tmp___0 ; bool tmp___1 ; int tmp___2 ; struct drm_i915_private *__p ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = to_lvds_encoder(& encoder->base); lvds_encoder = tmp; power_domain = intel_display_port_power_domain(encoder); tmp___1 = intel_display_power_is_enabled(dev_priv, power_domain); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )lvds_encoder->reg, 1); if ((int )tmp___0 >= 0) { return (0); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 2U) { *pipe = (enum pipe )((tmp___0 & 1610612736U) >> 29); } else { *pipe = (enum pipe )((tmp___0 & 1073741824U) >> 30); } return (1); } } static void intel_lvds_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 lvds_reg ; u32 tmp ; u32 flags ; int dotclock ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; flags = 0U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { lvds_reg = 921984U; } else { lvds_reg = 397696U; } tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )lvds_reg, 1); if ((tmp & 1048576U) != 0U) { flags = flags | 2U; } else { flags = flags | 1U; } if ((tmp & 2097152U) != 0U) { flags = flags | 8U; } else { flags = flags | 4U; } pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | flags; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 3U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397872U), 1); pipe_config->gmch_pfit.control = pipe_config->gmch_pfit.control | (tmp & 8U); } else { } dotclock = pipe_config->port_clock; __p___1 = to_i915((struct drm_device const *)dev_priv->dev); if ((unsigned int )__p___1->pch_type != 0U) { ironlake_check_encoder_dotclock((struct intel_crtc_state const *)pipe_config, dotclock); } else { } pipe_config->base.adjusted_mode.crtc_clock = dotclock; return; } } static void intel_pre_enable_lvds(struct intel_encoder *encoder ) { struct intel_lvds_encoder *lvds_encoder ; struct intel_lvds_encoder *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode const *adjusted_mode ; int pipe ; u32 temp ; struct intel_shared_dpll *tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { tmp = to_lvds_encoder(& encoder->base); lvds_encoder = tmp; dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; adjusted_mode = (struct drm_display_mode const *)(& (crtc->config)->base.adjusted_mode); pipe = crtc->pipe; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { assert_fdi_rx_pll(dev_priv, (enum pipe )pipe, 0); tmp___0 = intel_crtc_to_shared_dpll(crtc); assert_shared_dpll(dev_priv, tmp___0, 0); } else { assert_pll(dev_priv, (enum pipe )pipe, 0); } temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )lvds_encoder->reg, 1); temp = temp | 2147484416U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { temp = temp & 2684354559U; temp = (u32 )(pipe << 29) | temp; } else if (pipe == 1) { temp = temp | 1073741824U; } else { temp = temp & 3221225471U; } temp = temp & 4294934527U; temp = (crtc->config)->gmch_pfit.lvds_border_bits | temp; if ((int )lvds_encoder->is_dual_link) { temp = temp | 60U; } else { temp = temp & 4294967235U; } temp = temp & 4294967103U; temp = lvds_encoder->a3_power | temp; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 4U) { if ((int )(crtc->config)->dither && (crtc->config)->pipe_bpp == 18) { temp = temp | 33554432U; } else { temp = temp & 4261412863U; } } else { } temp = temp & 4291821567U; if (((unsigned int )adjusted_mode->flags & 2U) != 0U) { temp = temp | 1048576U; } else { } if (((unsigned int )adjusted_mode->flags & 8U) != 0U) { temp = temp | 2097152U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )lvds_encoder->reg, temp, 1); return; } } static void intel_enable_lvds(struct intel_encoder *encoder ) { struct drm_device *dev ; struct intel_lvds_encoder *lvds_encoder ; struct intel_lvds_encoder *tmp ; struct intel_connector *intel_connector ; struct drm_i915_private *dev_priv ; u32 ctl_reg ; u32 stat_reg ; struct drm_i915_private *__p ; uint32_t tmp___0 ; uint32_t tmp___1 ; unsigned long timeout__ ; unsigned long tmp___2 ; int ret__ ; uint32_t tmp___3 ; bool tmp___4 ; uint32_t tmp___5 ; { dev = encoder->base.dev; tmp = to_lvds_encoder(& encoder->base); lvds_encoder = tmp; intel_connector = & (lvds_encoder->attached_connector)->base; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { ctl_reg = 815620U; stat_reg = 815616U; } else { ctl_reg = 397828U; stat_reg = 397824U; } tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )lvds_encoder->reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )lvds_encoder->reg, tmp___0 | 2147483648U, 1); tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ctl_reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )ctl_reg, tmp___1 | 1U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )lvds_encoder->reg, 0); tmp___2 = msecs_to_jiffies(1000U); timeout__ = (tmp___2 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52200; ldv_52199: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )stat_reg, 1); if ((int )tmp___3 >= 0) { ret__ = -110; } else { } goto ldv_52198; } else { } tmp___4 = drm_can_sleep___17(); if ((int )tmp___4) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52200: tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )stat_reg, 1); if ((int )tmp___5 >= 0) { goto ldv_52199; } else { } ldv_52198: ; if (ret__ != 0) { drm_err("timed out waiting for panel to power on\n"); } else { } intel_panel_enable_backlight(intel_connector); return; } } static void intel_disable_lvds(struct intel_encoder *encoder ) { struct drm_device *dev ; struct intel_lvds_encoder *lvds_encoder ; struct intel_lvds_encoder *tmp ; struct intel_connector *intel_connector ; struct drm_i915_private *dev_priv ; u32 ctl_reg ; u32 stat_reg ; struct drm_i915_private *__p ; uint32_t tmp___0 ; unsigned long timeout__ ; unsigned long tmp___1 ; int ret__ ; uint32_t tmp___2 ; bool tmp___3 ; uint32_t tmp___4 ; uint32_t tmp___5 ; { dev = encoder->base.dev; tmp = to_lvds_encoder(& encoder->base); lvds_encoder = tmp; intel_connector = & (lvds_encoder->attached_connector)->base; dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { ctl_reg = 815620U; stat_reg = 815616U; } else { ctl_reg = 397828U; stat_reg = 397824U; } intel_panel_disable_backlight(intel_connector); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )ctl_reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )ctl_reg, tmp___0 & 4294967294U, 1); tmp___1 = msecs_to_jiffies(1000U); timeout__ = (tmp___1 + (unsigned long )jiffies) + 1UL; ret__ = 0; goto ldv_52227; ldv_52226: ; if ((long )(timeout__ - (unsigned long )jiffies) < 0L) { tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )stat_reg, 1); if ((int )tmp___2 < 0) { ret__ = -110; } else { } goto ldv_52225; } else { } tmp___3 = drm_can_sleep___17(); if ((int )tmp___3) { usleep_range(1000UL, 2000UL); } else { cpu_relax(); } ldv_52227: tmp___4 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )stat_reg, 1); if ((int )tmp___4 < 0) { goto ldv_52226; } else { } ldv_52225: ; if (ret__ != 0) { drm_err("timed out waiting for panel to power off\n"); } else { } tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )lvds_encoder->reg, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )lvds_encoder->reg, tmp___5 & 2147483647U, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )lvds_encoder->reg, 0); return; } } static enum drm_mode_status intel_lvds_mode_valid(struct drm_connector *connector , struct drm_display_mode *mode ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct drm_display_mode *fixed_mode ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; fixed_mode = intel_connector->panel.fixed_mode; if (mode->hdisplay > fixed_mode->hdisplay) { return (29); } else { } if (mode->vdisplay > fixed_mode->vdisplay) { return (29); } else { } return (0); } } static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct intel_lvds_encoder *lvds_encoder ; struct intel_lvds_encoder *tmp ; struct intel_connector *intel_connector ; struct drm_display_mode *adjusted_mode ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; unsigned int lvds_bpp ; struct drm_i915_private *__p ; long tmp___0 ; struct drm_i915_private *__p___0 ; { dev = intel_encoder->base.dev; tmp = to_lvds_encoder(& intel_encoder->base); lvds_encoder = tmp; intel_connector = & (lvds_encoder->attached_connector)->base; adjusted_mode = & pipe_config->base.adjusted_mode; __mptr = (struct drm_crtc const *)pipe_config->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 3U && (int )intel_crtc->pipe == 0) { drm_err("Can\'t support LVDS on pipe A\n"); return (0); } else { } if (lvds_encoder->a3_power == 192U) { lvds_bpp = 24U; } else { lvds_bpp = 18U; } if ((unsigned int )pipe_config->pipe_bpp != lvds_bpp && ! pipe_config->bw_constrained) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_lvds_compute_config", "forcing display bpp (was %d) to LVDS (%d)\n", pipe_config->pipe_bpp, lvds_bpp); } else { } pipe_config->pipe_bpp = (int )lvds_bpp; } else { } intel_fixed_panel_mode((struct drm_display_mode const *)intel_connector->panel.fixed_mode, adjusted_mode); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 0U) { pipe_config->has_pch_encoder = 1; intel_pch_panel_fitting(intel_crtc, pipe_config, intel_connector->panel.fitting_mode); } else { intel_gmch_panel_fitting(intel_crtc, pipe_config, intel_connector->panel.fitting_mode); } return (1); } } static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector , bool force ) { struct drm_device *dev ; enum drm_connector_status status ; long tmp ; { dev = connector->dev; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_lvds_detect", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } status = intel_panel_detect(dev); if ((unsigned int )status != 3U) { return (status); } else { } return (1); } } static int intel_lvds_get_modes(struct drm_connector *connector ) { struct intel_lvds_connector *lvds_connector ; struct intel_lvds_connector *tmp ; struct drm_device *dev ; struct drm_display_mode *mode ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { tmp = to_lvds_connector(connector); lvds_connector = tmp; dev = connector->dev; tmp___1 = IS_ERR_OR_NULL((void const *)lvds_connector->base.edid); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { tmp___0 = drm_add_edid_modes(connector, lvds_connector->base.edid); return (tmp___0); } else { } mode = drm_mode_duplicate(dev, (struct drm_display_mode const *)lvds_connector->base.panel.fixed_mode); if ((unsigned long )mode == (unsigned long )((struct drm_display_mode *)0)) { return (0); } else { } drm_mode_probed_add(connector, mode); return (1); } } static int intel_no_modeset_on_lid_dmi_callback(struct dmi_system_id const *id ) { { printk("\016[drm] Skipping forced modeset for %s\n", id->ident); return (1); } } static struct dmi_system_id const intel_no_modeset_on_lid[2U] = { {& intel_no_modeset_on_lid_dmi_callback, "Toshiba Tecra A11", {{4U, (unsigned char)0, {'T', 'O', 'S', 'H', 'I', 'B', 'A', '\000'}}, {5U, (unsigned char)0, {'T', 'E', 'C', 'R', 'A', ' ', 'A', '1', '1', '\000'}}}, 0}}; static int intel_lid_notify(struct notifier_block *nb , unsigned long val , void *unused ) { struct intel_lvds_connector *lvds_connector ; struct notifier_block const *__mptr ; struct drm_connector *connector ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; int tmp ; int tmp___0 ; struct drm_i915_private *__p ; { __mptr = (struct notifier_block const *)nb; lvds_connector = (struct intel_lvds_connector *)__mptr + 0xfffffffffffffbd0UL; connector = & lvds_connector->base.base; dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (dev->switch_power_state != 0) { return (1); } else { } mutex_lock_nested(& dev_priv->modeset_restore_lock, 0U); if ((unsigned int )dev_priv->modeset_restore == 2U) { goto exit; } else { } connector->status = (*((connector->funcs)->detect))(connector, 0); tmp = dmi_check_system((struct dmi_system_id const *)(& intel_no_modeset_on_lid)); if (tmp != 0) { goto exit; } else { } tmp___0 = acpi_lid_open(); if (tmp___0 == 0) { dev_priv->modeset_restore = 0; goto exit; } else { } if ((unsigned int )dev_priv->modeset_restore == 1U) { goto exit; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 0U) { drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, 1); drm_modeset_unlock_all(dev); } else { } dev_priv->modeset_restore = 1; exit: mutex_unlock(& dev_priv->modeset_restore_lock); return (1); } } static void intel_lvds_destroy(struct drm_connector *connector ) { struct intel_lvds_connector *lvds_connector ; struct intel_lvds_connector *tmp ; bool tmp___0 ; int tmp___1 ; { tmp = to_lvds_connector(connector); lvds_connector = tmp; if ((unsigned long )lvds_connector->lid_notifier.notifier_call != (unsigned long )((int (*)(struct notifier_block * , unsigned long , void * ))0)) { acpi_lid_notifier_unregister(& lvds_connector->lid_notifier); } else { } tmp___0 = IS_ERR_OR_NULL((void const *)lvds_connector->base.edid); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { kfree((void const *)lvds_connector->base.edid); } else { } intel_panel_fini(& lvds_connector->base.panel); drm_connector_cleanup(connector); kfree((void const *)connector); return; } } static int intel_lvds_set_property(struct drm_connector *connector , struct drm_property *property , uint64_t value ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct drm_device *dev ; struct drm_crtc *crtc ; long tmp ; struct intel_encoder *tmp___0 ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; dev = connector->dev; if ((unsigned long )dev->mode_config.scaling_mode_property == (unsigned long )property) { if (value == 0ULL) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_lvds_set_property", "no scaling not supported\n"); } else { } return (-22); } else { } if ((uint64_t )intel_connector->panel.fitting_mode == value) { return (0); } else { } intel_connector->panel.fitting_mode = (int )value; tmp___0 = intel_attached_encoder(connector); crtc = tmp___0->base.crtc; if ((unsigned long )crtc != (unsigned long )((struct drm_crtc *)0) && (int )(crtc->state)->enable) { intel_crtc_restore_mode(crtc); } else { } } else { } return (0); } } static struct drm_connector_helper_funcs const intel_lvds_connector_helper_funcs = {& intel_lvds_get_modes, & intel_lvds_mode_valid, & intel_best_encoder}; static struct drm_connector_funcs const intel_lvds_connector_funcs = {& intel_connector_dpms, 0, 0, 0, & intel_lvds_detect, & drm_helper_probe_single_connector_modes, & intel_lvds_set_property, & intel_lvds_destroy, 0, & drm_atomic_helper_connector_duplicate_state, & drm_atomic_helper_connector_destroy_state, 0, & intel_connector_atomic_get_property}; static struct drm_encoder_funcs const intel_lvds_enc_funcs = {0, & intel_encoder_destroy}; static int intel_no_lvds_dmi_callback(struct dmi_system_id const *id ) { { printk("\016[drm] Skipping LVDS initialization for %s\n", id->ident); return (1); } } static struct dmi_system_id const intel_no_lvds[26U] = { {& intel_no_lvds_dmi_callback, "Apple Mac Mini (Core series)", {{4U, (unsigned char)0, {'A', 'p', 'p', 'l', 'e', '\000'}}, {5U, (unsigned char)0, {'M', 'a', 'c', 'm', 'i', 'n', 'i', '1', ',', '1', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Apple Mac Mini (Core 2 series)", {{4U, (unsigned char)0, {'A', 'p', 'p', 'l', 'e', '\000'}}, {5U, (unsigned char)0, {'M', 'a', 'c', 'm', 'i', 'n', 'i', '2', ',', '1', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "MSI IM-945GSE-A", {{4U, (unsigned char)0, {'M', 'S', 'I', '\000'}}, {5U, (unsigned char)0, {'A', '9', '8', '3', '0', 'I', 'M', 'S', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Dell Studio Hybrid", {{4U, (unsigned char)0, {'D', 'e', 'l', 'l', ' ', 'I', 'n', 'c', '.', '\000'}}, {5U, (unsigned char)0, {'S', 't', 'u', 'd', 'i', 'o', ' ', 'H', 'y', 'b', 'r', 'i', 'd', ' ', '1', '4', '0', 'g', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Dell OptiPlex FX170", {{4U, (unsigned char)0, {'D', 'e', 'l', 'l', ' ', 'I', 'n', 'c', '.', '\000'}}, {5U, (unsigned char)0, {'O', 'p', 't', 'i', 'P', 'l', 'e', 'x', ' ', 'F', 'X', '1', '7', '0', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "AOpen Mini PC", {{4U, (unsigned char)0, {'A', 'O', 'p', 'e', 'n', '\000'}}, {5U, (unsigned char)0, {'i', '9', '6', '5', 'G', 'M', 'x', '-', 'I', 'F', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "AOpen Mini PC MP915", {{9U, (unsigned char)0, {'A', 'O', 'p', 'e', 'n', '\000'}}, {10U, (unsigned char)0, {'i', '9', '1', '5', 'G', 'M', 'x', '-', 'F', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "AOpen i915GMm-HFS", {{9U, (unsigned char)0, {'A', 'O', 'p', 'e', 'n', '\000'}}, {10U, (unsigned char)0, {'i', '9', '1', '5', 'G', 'M', 'm', '-', 'H', 'F', 'S', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "AOpen i45GMx-I", {{9U, (unsigned char)0, {'A', 'O', 'p', 'e', 'n', '\000'}}, {10U, (unsigned char)0, {'i', '4', '5', 'G', 'M', 'x', '-', 'I', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Aopen i945GTt-VFA", {{6U, (unsigned char)0, {'A', 'O', '0', '0', '0', '0', '1', 'J', 'W', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Clientron U800", {{4U, (unsigned char)0, {'C', 'l', 'i', 'e', 'n', 't', 'r', 'o', 'n', '\000'}}, {5U, (unsigned char)0, {'U', '8', '0', '0', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Clientron E830", {{4U, (unsigned char)0, {'C', 'l', 'i', 'e', 'n', 't', 'r', 'o', 'n', '\000'}}, {5U, (unsigned char)0, {'E', '8', '3', '0', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Asus EeeBox PC EB1007", {{4U, (unsigned char)0, {'A', 'S', 'U', 'S', 'T', 'e', 'K', ' ', 'C', 'o', 'm', 'p', 'u', 't', 'e', 'r', ' ', 'I', 'N', 'C', '.', '\000'}}, {5U, (unsigned char)0, {'E', 'B', '1', '0', '0', '7', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Asus AT5NM10T-I", {{9U, (unsigned char)0, {'A', 'S', 'U', 'S', 'T', 'e', 'K', ' ', 'C', 'o', 'm', 'p', 'u', 't', 'e', 'r', ' ', 'I', 'N', 'C', '.', '\000'}}, {10U, (unsigned char)0, {'A', 'T', '5', 'N', 'M', '1', '0', 'T', '-', 'I', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Hewlett-Packard HP t5740", {{9U, (unsigned char)0, {'H', 'e', 'w', 'l', 'e', 't', 't', '-', 'P', 'a', 'c', 'k', 'a', 'r', 'd', '\000'}}, {5U, (unsigned char)0, {' ', 't', '5', '7', '4', '0', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Hewlett-Packard t5745", {{9U, (unsigned char)0, {'H', 'e', 'w', 'l', 'e', 't', 't', '-', 'P', 'a', 'c', 'k', 'a', 'r', 'd', '\000'}}, {5U, (unsigned char)0, {'h', 'p', ' ', 't', '5', '7', '4', '5', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Hewlett-Packard st5747", {{9U, (unsigned char)0, {'H', 'e', 'w', 'l', 'e', 't', 't', '-', 'P', 'a', 'c', 'k', 'a', 'r', 'd', '\000'}}, {5U, (unsigned char)0, {'h', 'p', ' ', 's', 't', '5', '7', '4', '7', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "MSI Wind Box DC500", {{9U, (unsigned char)0, {'M', 'I', 'C', 'R', 'O', '-', 'S', 'T', 'A', 'R', ' ', 'I', 'N', 'T', 'E', 'R', 'N', 'A', 'T', 'I', 'O', 'N', 'A', 'L', ' ', 'C', 'O', '.', ',', ' ', 'L', 'T', 'D', '\000'}}, {10U, (unsigned char)0, {'M', 'S', '-', '7', '4', '6', '9', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Gigabyte GA-D525TUD", {{9U, (unsigned char)0, {'G', 'i', 'g', 'a', 'b', 'y', 't', 'e', ' ', 'T', 'e', 'c', 'h', 'n', 'o', 'l', 'o', 'g', 'y', ' ', 'C', 'o', '.', ',', ' ', 'L', 't', 'd', '.', '\000'}}, {10U, (unsigned char)0, {'D', '5', '2', '5', 'T', 'U', 'D', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Supermicro X7SPA-H", {{4U, (unsigned char)0, {'S', 'u', 'p', 'e', 'r', 'm', 'i', 'c', 'r', 'o', '\000'}}, {5U, (unsigned char)0, {'X', '7', 'S', 'P', 'A', '-', 'H', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Fujitsu Esprimo Q900", {{4U, (unsigned char)0, {'F', 'U', 'J', 'I', 'T', 'S', 'U', '\000'}}, {5U, (unsigned char)0, {'E', 'S', 'P', 'R', 'I', 'M', 'O', ' ', 'Q', '9', '0', '0', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Intel D410PT", {{9U, (unsigned char)0, {'I', 'n', 't', 'e', 'l', '\000'}}, {10U, (unsigned char)0, {'D', '4', '1', '0', 'P', 'T', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Intel D425KT", {{9U, (unsigned char)0, {'I', 'n', 't', 'e', 'l', '\000'}}, {10U, 1U, {'D', '4', '2', '5', 'K', 'T', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Intel D510MO", {{9U, (unsigned char)0, {'I', 'n', 't', 'e', 'l', '\000'}}, {10U, 1U, {'D', '5', '1', '0', 'M', 'O', '\000'}}}, 0}, {& intel_no_lvds_dmi_callback, "Intel D525MW", {{9U, (unsigned char)0, {'I', 'n', 't', 'e', 'l', '\000'}}, {10U, 1U, {'D', '5', '2', '5', 'M', 'W', '\000'}}}, 0}}; static bool lvds_is_present_in_vbt(struct drm_device *dev , u8 *i2c_pin ) { struct drm_i915_private *dev_priv ; int i ; union child_device_config *uchild ; struct old_child_dev_config *child ; bool tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (dev_priv->vbt.child_dev_num == 0) { return (1); } else { } i = 0; goto ldv_52329; ldv_52328: uchild = dev_priv->vbt.child_dev + (unsigned long )i; child = & uchild->old; if ((unsigned int )child->device_type != 4130U && (unsigned int )child->device_type != 34U) { goto ldv_52327; } else { } tmp = intel_gmbus_is_valid_pin(dev_priv, (unsigned int )child->i2c_pin); if ((int )tmp) { *i2c_pin = child->i2c_pin; } else { } if ((unsigned int )child->addin_offset != 0U) { return (1); } else { } if ((unsigned long )dev_priv->opregion.vbt != (unsigned long )((void *)0)) { return (1); } else { } ldv_52327: i = i + 1; ldv_52329: ; if (dev_priv->vbt.child_dev_num > i) { goto ldv_52328; } else { } return (0); } } static int intel_dual_link_lvds_callback(struct dmi_system_id const *id ) { { printk("\016[drm] Forcing lvds to dual link mode on %s\n", id->ident); return (1); } } static struct dmi_system_id const intel_dual_link_lvds[4U] = { {& intel_dual_link_lvds_callback, "Apple MacBook Pro 15\" (2010)", {{4U, (unsigned char)0, {'A', 'p', 'p', 'l', 'e', ' ', 'I', 'n', 'c', '.', '\000'}}, {5U, (unsigned char)0, {'M', 'a', 'c', 'B', 'o', 'o', 'k', 'P', 'r', 'o', '6', ',', '2', '\000'}}}, 0}, {& intel_dual_link_lvds_callback, "Apple MacBook Pro 15\" (2011)", {{4U, (unsigned char)0, {'A', 'p', 'p', 'l', 'e', ' ', 'I', 'n', 'c', '.', '\000'}}, {5U, (unsigned char)0, {'M', 'a', 'c', 'B', 'o', 'o', 'k', 'P', 'r', 'o', '8', ',', '2', '\000'}}}, 0}, {& intel_dual_link_lvds_callback, "Apple MacBook Pro 15\" (2012)", {{4U, (unsigned char)0, {'A', 'p', 'p', 'l', 'e', ' ', 'I', 'n', 'c', '.', '\000'}}, {5U, (unsigned char)0, {'M', 'a', 'c', 'B', 'o', 'o', 'k', 'P', 'r', 'o', '9', ',', '1', '\000'}}}, 0}}; bool intel_is_dual_link_lvds(struct drm_device *dev ) { struct intel_encoder *encoder ; struct intel_lvds_encoder *lvds_encoder ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)dev->mode_config.encoder_list.next; encoder = (struct intel_encoder *)__mptr + 0xfffffffffffffff8UL; goto ldv_52345; ldv_52344: ; if ((unsigned int )encoder->type == 4U) { lvds_encoder = to_lvds_encoder(& encoder->base); return (lvds_encoder->is_dual_link); } else { } __mptr___0 = (struct list_head const *)encoder->base.head.next; encoder = (struct intel_encoder *)__mptr___0 + 0xfffffffffffffff8UL; ldv_52345: ; if ((unsigned long )(& encoder->base.head) != (unsigned long )(& dev->mode_config.encoder_list)) { goto ldv_52344; } else { } return (0); } } static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder ) { struct drm_device *dev ; unsigned int val ; struct drm_i915_private *dev_priv ; int tmp ; { dev = lvds_encoder->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if (i915.lvds_channel_mode > 0) { return (i915.lvds_channel_mode == 2); } else { } if (((lvds_encoder->attached_connector)->base.panel.fixed_mode)->clock > 112999) { return (1); } else { } tmp = dmi_check_system((struct dmi_system_id const *)(& intel_dual_link_lvds)); if (tmp != 0) { return (1); } else { } val = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )lvds_encoder->reg, 1); if ((val & 3221225469U) == 0U) { val = dev_priv->vbt.bios_lvds_val; } else { } return ((val & 48U) == 48U); } } static bool intel_lvds_supported(struct drm_device *dev ) { struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U) { return (1); } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type == 2U) { return (1); } else { } } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) <= 4U) { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 44UL) != 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___3->info.device_id) != 13687U) { return (1); } else { } } else { } } else { } return (0); } } void intel_lvds_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_lvds_encoder *lvds_encoder ; struct intel_encoder *intel_encoder ; struct intel_lvds_connector *lvds_connector ; struct intel_connector *intel_connector ; struct drm_connector *connector ; struct drm_encoder *encoder ; struct drm_display_mode *scan ; struct drm_display_mode *fixed_mode ; struct drm_display_mode *downclock_mode ; struct edid *edid ; struct drm_crtc *crtc ; u32 lvds ; int pipe ; u8 pin ; uint32_t tmp ; uint32_t tmp___0 ; struct drm_i915_private *__p ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; long tmp___4 ; bool tmp___5 ; int tmp___6 ; uint32_t tmp___7 ; long tmp___8 ; struct drm_i915_private *__p___0 ; void *tmp___9 ; void *tmp___10 ; int tmp___11 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct i2c_adapter *tmp___12 ; void *tmp___13 ; int tmp___14 ; void *tmp___15 ; bool tmp___16 ; struct list_head const *__mptr ; long tmp___17 ; long tmp___18 ; struct list_head const *__mptr___0 ; long tmp___19 ; struct drm_i915_private *__p___4 ; long tmp___20 ; long tmp___21 ; uint32_t tmp___22 ; long tmp___23 ; int tmp___24 ; long tmp___25 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; fixed_mode = (struct drm_display_mode *)0; downclock_mode = (struct drm_display_mode *)0; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 815620L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 815620L, tmp | 2882338816U, 1); } else { tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397828L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397828L, tmp___0 | 2882338816U, 1); } tmp___1 = intel_lvds_supported(dev); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return; } else { } tmp___3 = dmi_check_system((struct dmi_system_id const *)(& intel_no_lvds)); if (tmp___3 != 0) { return; } else { } pin = 3U; tmp___5 = lvds_is_present_in_vbt(dev, & pin); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_lvds_init", "LVDS is not present in VBT\n"); } else { } return; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___0->pch_type != 0U) { tmp___7 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 921984L, 1); if ((tmp___7 & 2U) == 0U) { return; } else { } if ((int )dev_priv->vbt.edp_support) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_lvds_init", "disable LVDS for eDP support\n"); } else { } return; } else { } } else { } tmp___9 = kzalloc(240UL, 208U); lvds_encoder = (struct intel_lvds_encoder *)tmp___9; if ((unsigned long )lvds_encoder == (unsigned long )((struct intel_lvds_encoder *)0)) { return; } else { } tmp___10 = kzalloc(1096UL, 208U); lvds_connector = (struct intel_lvds_connector *)tmp___10; if ((unsigned long )lvds_connector == (unsigned long )((struct intel_lvds_connector *)0)) { kfree((void const *)lvds_encoder); return; } else { } tmp___11 = intel_connector_init(& lvds_connector->base); if (tmp___11 < 0) { kfree((void const *)lvds_connector); kfree((void const *)lvds_encoder); return; } else { } lvds_encoder->attached_connector = lvds_connector; intel_encoder = & lvds_encoder->base; encoder = & intel_encoder->base; intel_connector = & lvds_connector->base; connector = & intel_connector->base; drm_connector_init(dev, & intel_connector->base, & intel_lvds_connector_funcs, 7); drm_encoder_init(dev, & intel_encoder->base, & intel_lvds_enc_funcs, 3); intel_encoder->enable = & intel_enable_lvds; intel_encoder->pre_enable = & intel_pre_enable_lvds; intel_encoder->compute_config = & intel_lvds_compute_config; intel_encoder->disable = & intel_disable_lvds; intel_encoder->get_hw_state = & intel_lvds_get_hw_state; intel_encoder->get_config = & intel_lvds_get_config; intel_connector->get_hw_state = & intel_connector_get_hw_state; intel_connector->unregister = & intel_connector_unregister; intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = 4; intel_encoder->cloneable = 0U; __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___2->pch_type != 0U) { intel_encoder->crtc_mask = 7; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) == 4U) { intel_encoder->crtc_mask = 3; } else { intel_encoder->crtc_mask = 2; } } drm_connector_helper_add(connector, & intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = 1; connector->interlace_allowed = 0; connector->doublescan_allowed = 0; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___3->pch_type != 0U) { lvds_encoder->reg = 921984U; } else { lvds_encoder->reg = 397696U; } drm_mode_create_scaling_mode_property(dev); drm_object_attach_property(& connector->base, dev->mode_config.scaling_mode_property, 3ULL); intel_connector->panel.fitting_mode = 3; mutex_lock_nested(& dev->mode_config.mutex, 0U); tmp___12 = intel_gmbus_get_adapter(dev_priv, (unsigned int )pin); edid = drm_get_edid(connector, tmp___12); if ((unsigned long )edid != (unsigned long )((struct edid *)0)) { tmp___14 = drm_add_edid_modes(connector, edid); if (tmp___14 != 0) { drm_mode_connector_update_edid_property(connector, (struct edid const *)edid); } else { kfree((void const *)edid); tmp___13 = ERR_PTR(-22L); edid = (struct edid *)tmp___13; } } else { tmp___15 = ERR_PTR(-2L); edid = (struct edid *)tmp___15; } lvds_connector->base.edid = edid; tmp___16 = IS_ERR_OR_NULL((void const *)edid); if ((int )tmp___16) { connector->display_info.min_vfreq = 0U; connector->display_info.max_vfreq = 200U; connector->display_info.min_hfreq = 0U; connector->display_info.max_hfreq = 200U; } else { } __mptr = (struct list_head const *)connector->probed_modes.next; scan = (struct drm_display_mode *)__mptr; goto ldv_52441; ldv_52440: ; if ((scan->type & 8U) != 0U) { tmp___17 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___17 != 0L) { drm_ut_debug_printk("intel_lvds_init", "using preferred mode from EDID: "); } else { } drm_mode_debug_printmodeline((struct drm_display_mode const *)scan); fixed_mode = drm_mode_duplicate(dev, (struct drm_display_mode const *)scan); if ((unsigned long )fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { downclock_mode = intel_find_panel_downclock(dev, fixed_mode, connector); if ((unsigned long )downclock_mode != (unsigned long )((struct drm_display_mode *)0) && i915.lvds_downclock != 0U) { dev_priv->lvds_downclock_avail = 1; dev_priv->lvds_downclock = downclock_mode->clock; tmp___18 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___18 != 0L) { drm_ut_debug_printk("intel_lvds_init", "LVDS downclock is found in EDID. Normal clock %dKhz, downclock %dKhz\n", fixed_mode->clock, dev_priv->lvds_downclock); } else { } } else { } goto out; } else { } } else { } __mptr___0 = (struct list_head const *)scan->head.next; scan = (struct drm_display_mode *)__mptr___0; ldv_52441: ; if ((unsigned long )(& scan->head) != (unsigned long )(& connector->probed_modes)) { goto ldv_52440; } else { } if ((unsigned long )dev_priv->vbt.lfp_lvds_vbt_mode != (unsigned long )((struct drm_display_mode *)0)) { tmp___19 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___19 != 0L) { drm_ut_debug_printk("intel_lvds_init", "using mode from VBT: "); } else { } drm_mode_debug_printmodeline((struct drm_display_mode const *)dev_priv->vbt.lfp_lvds_vbt_mode); fixed_mode = drm_mode_duplicate(dev, (struct drm_display_mode const *)dev_priv->vbt.lfp_lvds_vbt_mode); if ((unsigned long )fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { fixed_mode->type = fixed_mode->type | 8U; goto out; } else { } } else { } __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___4->pch_type != 0U) { goto failed; } else { } lvds = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397696L, 1); pipe = (lvds & 1073741824U) != 0U; crtc = intel_get_crtc_for_pipe(dev, pipe); if ((unsigned long )crtc != (unsigned long )((struct drm_crtc *)0) && (int )lvds < 0) { fixed_mode = intel_crtc_mode_get(dev, crtc); if ((unsigned long )fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { tmp___20 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___20 != 0L) { drm_ut_debug_printk("intel_lvds_init", "using current (BIOS) mode: "); } else { } drm_mode_debug_printmodeline((struct drm_display_mode const *)fixed_mode); fixed_mode->type = fixed_mode->type | 8U; goto out; } else { } } else { } if ((unsigned long )fixed_mode == (unsigned long )((struct drm_display_mode *)0)) { goto failed; } else { } out: mutex_unlock(& dev->mode_config.mutex); intel_panel_init(& intel_connector->panel, fixed_mode, downclock_mode); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); tmp___21 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___21 != 0L) { drm_ut_debug_printk("intel_lvds_init", "detected %s-link lvds configuration\n", (int )lvds_encoder->is_dual_link ? (char *)"dual" : (char *)"single"); } else { } tmp___22 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )lvds_encoder->reg, 1); lvds_encoder->a3_power = tmp___22 & 192U; lvds_connector->lid_notifier.notifier_call = & intel_lid_notify; tmp___24 = acpi_lid_notifier_register(& lvds_connector->lid_notifier); if (tmp___24 != 0) { tmp___23 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___23 != 0L) { drm_ut_debug_printk("intel_lvds_init", "lid notifier registration failed\n"); } else { } lvds_connector->lid_notifier.notifier_call = (int (*)(struct notifier_block * , unsigned long , void * ))0; } else { } drm_connector_register(connector); intel_panel_setup_backlight(connector, -1); return; failed: mutex_unlock(& dev->mode_config.mutex); tmp___25 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___25 != 0L) { drm_ut_debug_printk("intel_lvds_init", "No LVDS modes found, disabling.\n"); } else { } drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); kfree((void const *)lvds_encoder); kfree((void const *)lvds_connector); return; } } extern int ldv_probe_33(void) ; extern int ldv_probe_32(void) ; void ldv_initialize_drm_connector_helper_funcs_34(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_lvds_connector_helper_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_initialize_drm_connector_funcs_33(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(104UL); intel_lvds_connector_funcs_group0 = (struct drm_property *)tmp; tmp___0 = ldv_init_zalloc(936UL); intel_lvds_connector_funcs_group1 = (struct drm_connector *)tmp___0; return; } } void ldv_main_exported_33(void) { struct drm_connector_state *ldvarg245 ; void *tmp ; uint64_t ldvarg248 ; bool ldvarg249 ; struct drm_connector_state *ldvarg250 ; void *tmp___0 ; int ldvarg251 ; uint64_t *ldvarg244 ; void *tmp___1 ; uint32_t ldvarg247 ; uint32_t ldvarg246 ; int tmp___2 ; { tmp = ldv_init_zalloc(32UL); ldvarg245 = (struct drm_connector_state *)tmp; tmp___0 = ldv_init_zalloc(32UL); ldvarg250 = (struct drm_connector_state *)tmp___0; tmp___1 = ldv_init_zalloc(8UL); ldvarg244 = (uint64_t *)tmp___1; ldv_memset((void *)(& ldvarg248), 0, 8UL); ldv_memset((void *)(& ldvarg249), 0, 1UL); ldv_memset((void *)(& ldvarg251), 0, 4UL); ldv_memset((void *)(& ldvarg247), 0, 4UL); ldv_memset((void *)(& ldvarg246), 0, 4UL); tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_33 == 2) { intel_connector_dpms(intel_lvds_connector_funcs_group1, ldvarg251); ldv_state_variable_33 = 2; } else { } if (ldv_state_variable_33 == 1) { intel_connector_dpms(intel_lvds_connector_funcs_group1, ldvarg251); ldv_state_variable_33 = 1; } else { } goto ldv_52472; case 1: ; if (ldv_state_variable_33 == 2) { drm_atomic_helper_connector_destroy_state(intel_lvds_connector_funcs_group1, ldvarg250); ldv_state_variable_33 = 2; } else { } if (ldv_state_variable_33 == 1) { drm_atomic_helper_connector_destroy_state(intel_lvds_connector_funcs_group1, ldvarg250); ldv_state_variable_33 = 1; } else { } goto ldv_52472; case 2: ; if (ldv_state_variable_33 == 2) { drm_atomic_helper_connector_duplicate_state(intel_lvds_connector_funcs_group1); ldv_state_variable_33 = 2; } else { } if (ldv_state_variable_33 == 1) { drm_atomic_helper_connector_duplicate_state(intel_lvds_connector_funcs_group1); ldv_state_variable_33 = 1; } else { } goto ldv_52472; case 3: ; if (ldv_state_variable_33 == 2) { intel_lvds_detect(intel_lvds_connector_funcs_group1, (int )ldvarg249); ldv_state_variable_33 = 2; } else { } if (ldv_state_variable_33 == 1) { intel_lvds_detect(intel_lvds_connector_funcs_group1, (int )ldvarg249); ldv_state_variable_33 = 1; } else { } goto ldv_52472; case 4: ; if (ldv_state_variable_33 == 2) { intel_lvds_set_property(intel_lvds_connector_funcs_group1, intel_lvds_connector_funcs_group0, ldvarg248); ldv_state_variable_33 = 2; } else { } if (ldv_state_variable_33 == 1) { intel_lvds_set_property(intel_lvds_connector_funcs_group1, intel_lvds_connector_funcs_group0, ldvarg248); ldv_state_variable_33 = 1; } else { } goto ldv_52472; case 5: ; if (ldv_state_variable_33 == 2) { intel_lvds_destroy(intel_lvds_connector_funcs_group1); ldv_state_variable_33 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_52472; case 6: ; if (ldv_state_variable_33 == 2) { drm_helper_probe_single_connector_modes(intel_lvds_connector_funcs_group1, ldvarg247, ldvarg246); ldv_state_variable_33 = 2; } else { } if (ldv_state_variable_33 == 1) { drm_helper_probe_single_connector_modes(intel_lvds_connector_funcs_group1, ldvarg247, ldvarg246); ldv_state_variable_33 = 1; } else { } goto ldv_52472; case 7: ; if (ldv_state_variable_33 == 2) { intel_connector_atomic_get_property(intel_lvds_connector_funcs_group1, (struct drm_connector_state const *)ldvarg245, intel_lvds_connector_funcs_group0, ldvarg244); ldv_state_variable_33 = 2; } else { } if (ldv_state_variable_33 == 1) { intel_connector_atomic_get_property(intel_lvds_connector_funcs_group1, (struct drm_connector_state const *)ldvarg245, intel_lvds_connector_funcs_group0, ldvarg244); ldv_state_variable_33 = 1; } else { } goto ldv_52472; case 8: ; if (ldv_state_variable_33 == 1) { ldv_probe_33(); ldv_state_variable_33 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_52472; default: ldv_stop(); } ldv_52472: ; return; } } void ldv_main_exported_32(void) { struct drm_encoder *ldvarg3 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(96UL); ldvarg3 = (struct drm_encoder *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_32 == 2) { intel_encoder_destroy(ldvarg3); ldv_state_variable_32 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_52487; case 1: ; if (ldv_state_variable_32 == 1) { ldv_probe_32(); ldv_state_variable_32 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_52487; default: ldv_stop(); } ldv_52487: ; return; } } void ldv_main_exported_34(void) { struct drm_display_mode *ldvarg564 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(208UL); ldvarg564 = (struct drm_display_mode *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_34 == 1) { intel_lvds_get_modes(intel_lvds_connector_helper_funcs_group0); ldv_state_variable_34 = 1; } else { } goto ldv_52495; case 1: ; if (ldv_state_variable_34 == 1) { intel_lvds_mode_valid(intel_lvds_connector_helper_funcs_group0, ldvarg564); ldv_state_variable_34 = 1; } else { } goto ldv_52495; case 2: ; if (ldv_state_variable_34 == 1) { intel_best_encoder(intel_lvds_connector_helper_funcs_group0); ldv_state_variable_34 = 1; } else { } goto ldv_52495; default: ldv_stop(); } ldv_52495: ; return; } } bool ldv_queue_work_on_993(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_994(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_995(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_996(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_997(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; bool ldv_queue_work_on_1007(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_1009(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_1008(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_1011(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_1010(struct workqueue_struct *ldv_func_arg1 ) ; extern struct backlight_device *backlight_device_register(char const * , struct device * , void * , struct backlight_ops const * , struct backlight_properties const * ) ; extern void backlight_device_unregister(struct backlight_device * ) ; __inline static void *bl_get_data(struct backlight_device *bl_dev ) { void *tmp ; { tmp = dev_get_drvdata((struct device const *)(& bl_dev->dev)); return (tmp); } } extern void drm_mode_destroy(struct drm_device * , struct drm_display_mode * ) ; void intel_fixed_panel_mode(struct drm_display_mode const *fixed_mode , struct drm_display_mode *adjusted_mode ) { { drm_mode_copy(adjusted_mode, fixed_mode); drm_mode_set_crtcinfo(adjusted_mode, 0); return; } } struct drm_display_mode *intel_find_panel_downclock(struct drm_device *dev , struct drm_display_mode *fixed_mode , struct drm_connector *connector ) { struct drm_display_mode *scan ; struct drm_display_mode *tmp_mode ; int temp_downclock ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct drm_display_mode *tmp ; { temp_downclock = fixed_mode->clock; tmp_mode = (struct drm_display_mode *)0; __mptr = (struct list_head const *)connector->probed_modes.next; scan = (struct drm_display_mode *)__mptr; goto ldv_47994; ldv_47993: ; if (((((((scan->hdisplay == fixed_mode->hdisplay && scan->hsync_start == fixed_mode->hsync_start) && scan->hsync_end == fixed_mode->hsync_end) && scan->htotal == fixed_mode->htotal) && scan->vdisplay == fixed_mode->vdisplay) && scan->vsync_start == fixed_mode->vsync_start) && scan->vsync_end == fixed_mode->vsync_end) && scan->vtotal == fixed_mode->vtotal) { if (scan->clock < temp_downclock) { temp_downclock = scan->clock; tmp_mode = scan; } else { } } else { } __mptr___0 = (struct list_head const *)scan->head.next; scan = (struct drm_display_mode *)__mptr___0; ldv_47994: ; if ((unsigned long )(& scan->head) != (unsigned long )(& connector->probed_modes)) { goto ldv_47993; } else { } if (fixed_mode->clock > temp_downclock) { tmp = drm_mode_duplicate(dev, (struct drm_display_mode const *)tmp_mode); return (tmp); } else { return ((struct drm_display_mode *)0); } } } void intel_pch_panel_fitting(struct intel_crtc *intel_crtc , struct intel_crtc_state *pipe_config , int fitting_mode ) { struct drm_display_mode *adjusted_mode ; int x ; int y ; int width ; int height ; u32 scaled_width ; u32 scaled_height ; int __ret_warn_on ; long tmp ; { adjusted_mode = & pipe_config->base.adjusted_mode; height = 0; width = height; y = width; x = y; if (adjusted_mode->hdisplay == pipe_config->pipe_src_w && adjusted_mode->vdisplay == pipe_config->pipe_src_h) { goto done; } else { } switch (fitting_mode) { case 2: width = pipe_config->pipe_src_w; height = pipe_config->pipe_src_h; x = ((adjusted_mode->hdisplay - width) + 1) / 2; y = ((adjusted_mode->vdisplay - height) + 1) / 2; goto ldv_48008; case 3: scaled_width = (u32 )(adjusted_mode->hdisplay * pipe_config->pipe_src_h); scaled_height = (u32 )(pipe_config->pipe_src_w * adjusted_mode->vdisplay); if (scaled_width > scaled_height) { width = (int )(scaled_height / (u32 )pipe_config->pipe_src_h); if (width & 1) { width = width + 1; } else { } x = ((adjusted_mode->hdisplay - width) + 1) / 2; y = 0; height = adjusted_mode->vdisplay; } else if (scaled_width < scaled_height) { height = (int )(scaled_width / (u32 )pipe_config->pipe_src_w); if (height & 1) { height = height + 1; } else { } y = ((adjusted_mode->vdisplay - height) + 1) / 2; x = 0; width = adjusted_mode->hdisplay; } else { y = 0; x = y; width = adjusted_mode->hdisplay; height = adjusted_mode->vdisplay; } goto ldv_48008; case 1: y = 0; x = y; width = adjusted_mode->hdisplay; height = adjusted_mode->vdisplay; goto ldv_48008; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 161, "bad panel fit mode: %d\n", fitting_mode); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } ldv_48008: ; done: pipe_config->pch_pfit.pos = (u32 )((x << 16) | y); pipe_config->pch_pfit.size = (u32 )((width << 16) | height); pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0U; return; } } static void centre_horizontally(struct drm_display_mode *mode , int width ) { u32 border ; u32 sync_pos ; u32 blank_width ; u32 sync_width ; { sync_width = (u32 )(mode->crtc_hsync_end - mode->crtc_hsync_start); blank_width = (u32 )(mode->crtc_hblank_end - mode->crtc_hblank_start); sync_pos = ((blank_width - sync_width) + 1U) / 2U; border = (u32 )(((mode->hdisplay - width) + 1) / 2); border = (border & 1U) + border; mode->crtc_hdisplay = width; mode->crtc_hblank_start = (int )((u32 )width + border); mode->crtc_hblank_end = (int )((u32 )mode->crtc_hblank_start + blank_width); mode->crtc_hsync_start = (int )((u32 )mode->crtc_hblank_start + sync_pos); mode->crtc_hsync_end = (int )((u32 )mode->crtc_hsync_start + sync_width); return; } } static void centre_vertically(struct drm_display_mode *mode , int height ) { u32 border ; u32 sync_pos ; u32 blank_width ; u32 sync_width ; { sync_width = (u32 )(mode->crtc_vsync_end - mode->crtc_vsync_start); blank_width = (u32 )(mode->crtc_vblank_end - mode->crtc_vblank_start); sync_pos = ((blank_width - sync_width) + 1U) / 2U; border = (u32 )(((mode->vdisplay - height) + 1) / 2); mode->crtc_vdisplay = height; mode->crtc_vblank_start = (int )((u32 )height + border); mode->crtc_vblank_end = (int )((u32 )mode->crtc_vblank_start + blank_width); mode->crtc_vsync_start = (int )((u32 )mode->crtc_vblank_start + sync_pos); mode->crtc_vsync_end = (int )((u32 )mode->crtc_vsync_start + sync_width); return; } } __inline static u32 panel_fitter_scaling(u32 source , u32 target ) { u32 ratio ; { ratio = (source * 4096U) / target; return ((ratio * 4096U + 2048U) / 4096U); } } static void i965_scale_aspect(struct intel_crtc_state *pipe_config , u32 *pfit_control ) { struct drm_display_mode *adjusted_mode ; u32 scaled_width ; u32 scaled_height ; { adjusted_mode = & pipe_config->base.adjusted_mode; scaled_width = (u32 )(adjusted_mode->hdisplay * pipe_config->pipe_src_h); scaled_height = (u32 )(pipe_config->pipe_src_w * adjusted_mode->vdisplay); if (scaled_width > scaled_height) { *pfit_control = *pfit_control | 2281701376U; } else if (scaled_width < scaled_height) { *pfit_control = *pfit_control | 2348810240U; } else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w) { *pfit_control = *pfit_control | 2147483648U; } else { } return; } } static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config , u32 *pfit_control , u32 *pfit_pgm_ratios , u32 *border ) { struct drm_display_mode *adjusted_mode ; u32 scaled_width ; u32 scaled_height ; u32 bits ; { adjusted_mode = & pipe_config->base.adjusted_mode; scaled_width = (u32 )(adjusted_mode->hdisplay * pipe_config->pipe_src_h); scaled_height = (u32 )(pipe_config->pipe_src_w * adjusted_mode->vdisplay); if (scaled_width > scaled_height) { centre_horizontally(adjusted_mode, (int )(scaled_height / (u32 )pipe_config->pipe_src_h)); *border = 32768U; if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) { bits = panel_fitter_scaling((u32 )pipe_config->pipe_src_h, (u32 )adjusted_mode->vdisplay); *pfit_pgm_ratios = *pfit_pgm_ratios | ((bits << 4) | (bits << 20)); *pfit_control = *pfit_control | 2147484736U; } else { } } else if (scaled_width < scaled_height) { centre_vertically(adjusted_mode, (int )(scaled_width / (u32 )pipe_config->pipe_src_w)); *border = 32768U; if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) { bits = panel_fitter_scaling((u32 )pipe_config->pipe_src_w, (u32 )adjusted_mode->hdisplay); *pfit_pgm_ratios = *pfit_pgm_ratios | ((bits << 4) | (bits << 20)); *pfit_control = *pfit_control | 2147484736U; } else { } } else { *pfit_control = *pfit_control | 2147485280U; } return; } } void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc , struct intel_crtc_state *pipe_config , int fitting_mode ) { struct drm_device *dev ; u32 pfit_control ; u32 pfit_pgm_ratios ; u32 border ; struct drm_display_mode *adjusted_mode ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int __ret_warn_on ; long tmp ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = intel_crtc->base.dev; pfit_control = 0U; pfit_pgm_ratios = 0U; border = 0U; adjusted_mode = & pipe_config->base.adjusted_mode; if (adjusted_mode->hdisplay == pipe_config->pipe_src_w && adjusted_mode->vdisplay == pipe_config->pipe_src_h) { goto out; } else { } switch (fitting_mode) { case 2: centre_horizontally(adjusted_mode, pipe_config->pipe_src_w); centre_vertically(adjusted_mode, pipe_config->pipe_src_h); border = 32768U; goto ldv_48066; case 3: __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { i965_scale_aspect(pipe_config, & pfit_control); } else { i9xx_scale_aspect(pipe_config, & pfit_control, & pfit_pgm_ratios, & border); } goto ldv_48066; case 1: ; if (pipe_config->pipe_src_h != adjusted_mode->vdisplay || pipe_config->pipe_src_w != adjusted_mode->hdisplay) { pfit_control = pfit_control | 2147483648U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 3U) { pfit_control = pfit_control; } else { pfit_control = pfit_control | 1632U; } } else { } goto ldv_48066; default: __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 355, "bad panel fit mode: %d\n", fitting_mode); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } ldv_48066: __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 3U) { pfit_control = (u32 )((int )intel_crtc->pipe << 29) | pfit_control; } else { } out: ; if ((int )pfit_control >= 0) { pfit_control = 0U; pfit_pgm_ratios = 0U; } else { } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) <= 3U && pipe_config->pipe_bpp == 18) { pfit_control = pfit_control | 8U; } else { } pipe_config->gmch_pfit.control = pfit_control; pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; pipe_config->gmch_pfit.lvds_border_bits = border; return; } } enum drm_connector_status intel_panel_detect(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; unsigned int tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (i915.panel_ignore_lid == 0 && (unsigned long )dev_priv->opregion.lid_state != (unsigned long )((u32 *)0U)) { tmp = ioread32((void *)dev_priv->opregion.lid_state); return ((int )tmp & 1 ? 1 : 2); } else { } switch (i915.panel_ignore_lid) { case -2: ; return (1); case -1: ; return (2); default: ; return (3); } } } static uint32_t scale(uint32_t source_val , uint32_t source_min , uint32_t source_max , uint32_t target_min , uint32_t target_max ) { uint64_t target_val ; int __ret_warn_on ; long tmp ; int __ret_warn_on___0 ; long tmp___0 ; uint32_t _min1 ; uint32_t _max1 ; uint32_t _max2 ; uint32_t _min2 ; uint32_t __d ; unsigned long long _tmp ; uint32_t __base ; uint32_t __rem ; { __ret_warn_on = source_min > source_max; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 416, "WARN_ON(source_min > source_max)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = target_min > target_max; tmp___0 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 417, "WARN_ON(target_min > target_max)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); _max1 = source_val; _max2 = source_min; _min1 = _max1 > _max2 ? _max1 : _max2; _min2 = source_max; source_val = _min1 < _min2 ? _min1 : _min2; __d = source_max - source_min; _tmp = (unsigned long long )(source_val - source_min) * (unsigned long long )(target_max - target_min) + (unsigned long long )(__d / 2U); __base = __d; __rem = (uint32_t )(_tmp % (unsigned long long )__base); _tmp = _tmp / (unsigned long long )__base; target_val = _tmp; target_val = (uint64_t )target_min + target_val; return ((uint32_t )target_val); } } __inline static u32 scale_user_to_hw(struct intel_connector *connector , u32 user_level , u32 user_max ) { struct intel_panel *panel ; uint32_t tmp ; { panel = & connector->panel; tmp = scale(user_level, 0U, user_max, panel->backlight.min, panel->backlight.max); return (tmp); } } __inline static u32 clamp_user_to_hw(struct intel_connector *connector , u32 user_level , u32 user_max ) { struct intel_panel *panel ; u32 hw_level ; u32 _min1 ; u32 _max1 ; u32 _max2 ; u32 _min2 ; { panel = & connector->panel; hw_level = scale(user_level, 0U, user_max, 0U, panel->backlight.max); _max1 = hw_level; _max2 = panel->backlight.min; _min1 = _max1 > _max2 ? _max1 : _max2; _min2 = panel->backlight.max; hw_level = _min1 < _min2 ? _min1 : _min2; return (hw_level); } } __inline static u32 scale_hw_to_user(struct intel_connector *connector , u32 hw_level , u32 user_max ) { struct intel_panel *panel ; uint32_t tmp ; { panel = & connector->panel; tmp = scale(hw_level, panel->backlight.min, panel->backlight.max, 0U, user_max); return (tmp); } } static u32 intel_panel_compute_brightness(struct intel_connector *connector , u32 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; int __ret_warn_on ; long tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; __ret_warn_on = panel->backlight.max == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 471, "WARN_ON(panel->backlight.max == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if (i915.invert_brightness < 0) { return (val); } else { } if (i915.invert_brightness > 0 || (dev_priv->quirks & 4UL) != 0UL) { return (panel->backlight.max - val); } else { } return (val); } } static u32 bdw_get_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819796L, 1); return (tmp & 65535U); } } static u32 pch_get_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295508L, 1); return (tmp & 65535U); } } static u32 i9xx_get_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 val ; uint32_t tmp ; struct drm_i915_private *__p ; u8 lbpc ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), 1); val = tmp & 65535U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) <= 3U) { val = val >> 1; } else { } if ((int )panel->backlight.combination_mode) { pci_read_config_byte((struct pci_dev const *)dev->pdev, 244, & lbpc); val = (u32 )lbpc * val; } else { } return (val); } } static u32 _vlv_get_backlight(struct drm_device *dev , enum pipe pipe ) { struct drm_i915_private *dev_priv ; int __ret_warn_on ; long tmp ; long tmp___0 ; uint32_t tmp___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __ret_warn_on = (int )pipe != 0 && (int )pipe != 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 525, "WARN_ON(pipe != PIPE_A && pipe != PIPE_B)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (0U); } else { } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397908U), 1); return (tmp___1 & 65535U); } } static u32 vlv_get_backlight(struct intel_connector *connector ) { struct drm_device *dev ; enum pipe pipe ; enum pipe tmp ; u32 tmp___0 ; { dev = connector->base.dev; tmp = intel_get_pipe_from_connector(connector); pipe = tmp; tmp___0 = _vlv_get_backlight(dev, pipe); return (tmp___0); } } static u32 bxt_get_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819800L, 1); return (tmp); } } static u32 intel_panel_get_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 val ; long tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; val = 0U; mutex_lock_nested(& dev_priv->backlight_lock, 0U); if ((int )panel->backlight.enabled) { val = (*(dev_priv->display.get_backlight))(connector); val = intel_panel_compute_brightness(connector, val); } else { } mutex_unlock(& dev_priv->backlight_lock); tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_panel_get_backlight", "get backlight PWM = %d\n", val); } else { } return (val); } } static void bdw_set_backlight(struct intel_connector *connector , u32 level ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 val ; uint32_t tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819796L, 1); val = tmp & 4294901760U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819796L, val | level, 1); return; } } static void pch_set_backlight(struct intel_connector *connector , u32 level ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 tmp ; uint32_t tmp___0 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295508L, 1); tmp = tmp___0 & 4294901760U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 295508L, tmp | level, 1); return; } } static void i9xx_set_backlight(struct intel_connector *connector , u32 level ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 tmp ; u32 mask ; int __ret_warn_on ; long tmp___0 ; u8 lbpc ; struct drm_i915_private *__p ; uint32_t tmp___1 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; __ret_warn_on = panel->backlight.max == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 592, "WARN_ON(panel->backlight.max == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((int )panel->backlight.combination_mode) { lbpc = (unsigned int )((u8 )((level * 254U) / panel->backlight.max)) + 1U; level = level / (u32 )lbpc; pci_write_config_byte((struct pci_dev const *)dev->pdev, 244, (int )lbpc); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 4U) { mask = 65535U; } else { level = level << 1; mask = 65534U; } tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), 1); tmp = tmp___1 & ~ mask; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), tmp | level, 1); return; } } static void vlv_set_backlight(struct intel_connector *connector , u32 level ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum pipe pipe ; enum pipe tmp ; u32 tmp___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; uint32_t tmp___3 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_get_pipe_from_connector(connector); pipe = tmp; __ret_warn_on = (int )pipe != 0 && (int )pipe != 1; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 620, "WARN_ON(pipe != PIPE_A && pipe != PIPE_B)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return; } else { } tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397908U), 1); tmp___0 = tmp___3 & 4294901760U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397908U), tmp___0 | level, 1); return; } } static void bxt_set_backlight(struct intel_connector *connector , u32 level ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819800L, level, 1); return; } } static void intel_panel_actually_set_backlight(struct intel_connector *connector , u32 level ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; long tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_panel_actually_set_backlight", "set backlight PWM = %d\n", level); } else { } level = intel_panel_compute_brightness(connector, level); (*(dev_priv->display.set_backlight))(connector, level); return; } } static void intel_panel_set_backlight(struct intel_connector *connector , u32 user_level , u32 user_max ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 hw_level ; int __ret_warn_on ; long tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; if (! panel->backlight.present) { return; } else { } mutex_lock_nested(& dev_priv->backlight_lock, 0U); __ret_warn_on = panel->backlight.max == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 661, "WARN_ON(panel->backlight.max == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); hw_level = scale_user_to_hw(connector, user_level, user_max); panel->backlight.level = hw_level; if ((int )panel->backlight.enabled) { intel_panel_actually_set_backlight(connector, hw_level); } else { } mutex_unlock(& dev_priv->backlight_lock); return; } } void intel_panel_set_backlight_acpi(struct intel_connector *connector , u32 user_level , u32 user_max ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; enum pipe pipe ; enum pipe tmp ; u32 hw_level ; int __ret_warn_on ; long tmp___0 ; u32 tmp___1 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; tmp = intel_get_pipe_from_connector(connector); pipe = tmp; if (! panel->backlight.present || (int )pipe == -1) { return; } else { } mutex_lock_nested(& dev_priv->backlight_lock, 0U); __ret_warn_on = panel->backlight.max == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 695, "WARN_ON(panel->backlight.max == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); hw_level = clamp_user_to_hw(connector, user_level, user_max); panel->backlight.level = hw_level; if ((unsigned long )panel->backlight.device != (unsigned long )((struct backlight_device *)0)) { tmp___1 = scale_hw_to_user(connector, panel->backlight.level, (u32 )(panel->backlight.device)->props.max_brightness); (panel->backlight.device)->props.brightness = (int )tmp___1; } else { } if ((int )panel->backlight.enabled) { intel_panel_actually_set_backlight(connector, hw_level); } else { } mutex_unlock(& dev_priv->backlight_lock); return; } } static void pch_disable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_panel_actually_set_backlight(connector, 0U); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295504L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 295504L, tmp & 2147483647U, 1); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, tmp & 2147483647U, 1); return; } } static void i9xx_disable_backlight(struct intel_connector *connector ) { { intel_panel_actually_set_backlight(connector, 0U); return; } } static void i965_disable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_panel_actually_set_backlight(connector, 0U); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397904U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397904U), tmp & 2147483647U, 1); return; } } static void vlv_disable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; enum pipe pipe ; enum pipe tmp ; u32 tmp___0 ; int __ret_warn_on ; long tmp___1 ; long tmp___2 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = intel_get_pipe_from_connector(connector); pipe = tmp; __ret_warn_on = (int )pipe != 0 && (int )pipe != 1; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 751, "WARN_ON(pipe != PIPE_A && pipe != PIPE_B)"); } else { } tmp___2 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___2 != 0L) { return; } else { } intel_panel_actually_set_backlight(connector, 0U); tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397904U), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397904U), tmp___0 & 2147483647U, 1); return; } } static void bxt_disable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_panel_actually_set_backlight(connector, 0U); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, tmp & 2147483647U, 1); return; } } void intel_panel_disable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; long tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; if (! panel->backlight.present) { return; } else { } if (dev->switch_power_state == 2) { tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_panel_disable_backlight", "Skipping backlight disable on vga switch\n"); } else { } return; } else { } mutex_lock_nested(& dev_priv->backlight_lock, 0U); if ((unsigned long )panel->backlight.device != (unsigned long )((struct backlight_device *)0)) { (panel->backlight.device)->props.power = 4; } else { } panel->backlight.enabled = 0; (*(dev_priv->display.disable_backlight))(connector); mutex_unlock(& dev_priv->backlight_lock); return; } } static void bdw_enable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 pch_ctl1 ; u32 pch_ctl2 ; long tmp ; struct drm_i915_private *__p ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; pch_ctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 1); if ((int )pch_ctl1 < 0) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("bdw_enable_backlight", "pch backlight already enabled\n"); } else { } pch_ctl1 = pch_ctl1 & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, pch_ctl1, 1); } else { } pch_ctl2 = panel->backlight.max << 16; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819796L, pch_ctl2, 1); pch_ctl1 = 0U; if ((int )panel->backlight.active_low_pwm) { pch_ctl1 = pch_ctl1 | 536870912U; } else { } __p = dev_priv; if ((unsigned int )__p->pch_type == 3U) { pch_ctl1 = pch_ctl1 | 1073741824U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, pch_ctl1, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, pch_ctl1 | 2147483648U, 1); intel_panel_actually_set_backlight(connector, panel->backlight.level); return; } } static void pch_enable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; enum pipe pipe ; enum pipe tmp ; enum transcoder cpu_transcoder ; enum transcoder tmp___0 ; u32 cpu_ctl2 ; u32 pch_ctl1 ; u32 pch_ctl2 ; long tmp___1 ; long tmp___2 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; tmp = intel_get_pipe_from_connector(connector); pipe = tmp; tmp___0 = intel_pipe_to_cpu_transcoder(dev_priv, pipe); cpu_transcoder = tmp___0; cpu_ctl2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295504L, 1); if ((int )cpu_ctl2 < 0) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("pch_enable_backlight", "cpu backlight already enabled\n"); } else { } cpu_ctl2 = cpu_ctl2 & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 295504L, cpu_ctl2, 1); } else { } pch_ctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 1); if ((int )pch_ctl1 < 0) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("pch_enable_backlight", "pch backlight already enabled\n"); } else { } pch_ctl1 = pch_ctl1 & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, pch_ctl1, 1); } else { } if ((unsigned int )cpu_transcoder == 3U) { cpu_ctl2 = 1610612736U; } else { cpu_ctl2 = (unsigned int )cpu_transcoder << 29; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 295504L, cpu_ctl2, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295504L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 295504L, cpu_ctl2 | 2147483648U, 1); intel_panel_actually_set_backlight(connector, panel->backlight.level); pch_ctl2 = panel->backlight.max << 16; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819796L, pch_ctl2, 1); pch_ctl1 = 0U; if ((int )panel->backlight.active_low_pwm) { pch_ctl1 = pch_ctl1 | 536870912U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, pch_ctl1, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, pch_ctl1 | 2147483648U, 1); return; } } static void i9xx_enable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 ctl ; u32 freq ; long tmp ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), 1); if ((ctl & 65534U) != 0U) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i9xx_enable_backlight", "backlight already enabled\n"); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), 0U, 1); } else { } freq = panel->backlight.max; if ((int )panel->backlight.combination_mode) { freq = freq / 255U; } else { } ctl = freq << 17; if ((int )panel->backlight.combination_mode) { ctl = ctl | 65536U; } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U && (int )panel->backlight.active_low_pwm) { ctl = ctl | 1U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), ctl, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), 0); intel_panel_actually_set_backlight(connector, panel->backlight.level); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 2U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397920U), 2147483648U, 1); } else { } return; } } static void i965_enable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; enum pipe pipe ; enum pipe tmp ; u32 ctl ; u32 ctl2 ; u32 freq ; long tmp___0 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; tmp = intel_get_pipe_from_connector(connector); pipe = tmp; ctl2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397904U), 1); if ((int )ctl2 < 0) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("i965_enable_backlight", "backlight already enabled\n"); } else { } ctl2 = ctl2 & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397904U), ctl2, 1); } else { } freq = panel->backlight.max; if ((int )panel->backlight.combination_mode) { freq = freq / 255U; } else { } ctl = freq << 16; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), ctl, 1); ctl2 = (u32 )((int )pipe << 29); if ((int )panel->backlight.combination_mode) { ctl2 = ctl2 | 1073741824U; } else { } if ((int )panel->backlight.active_low_pwm) { ctl2 = ctl2 | 268435456U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397904U), ctl2, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397904U), 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397904U), ctl2 | 2147483648U, 1); intel_panel_actually_set_backlight(connector, panel->backlight.level); return; } } static void vlv_enable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; enum pipe pipe ; enum pipe tmp ; u32 ctl ; u32 ctl2 ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; tmp = intel_get_pipe_from_connector(connector); pipe = tmp; __ret_warn_on = (int )pipe != 0 && (int )pipe != 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 962, "WARN_ON(pipe != PIPE_A && pipe != PIPE_B)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return; } else { } ctl2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397904U), 1); if ((int )ctl2 < 0) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("vlv_enable_backlight", "backlight already enabled\n"); } else { } ctl2 = ctl2 & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397904U), ctl2, 1); } else { } ctl = panel->backlight.max << 16; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397908U), ctl, 1); intel_panel_actually_set_backlight(connector, panel->backlight.level); ctl2 = 0U; if ((int )panel->backlight.active_low_pwm) { ctl2 = ctl2 | 268435456U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397904U), ctl2, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397904U), 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397904U), ctl2 | 2147483648U, 1); return; } } static void bxt_enable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 pwm_ctl ; long tmp ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; pwm_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 1); if ((int )pwm_ctl < 0) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("bxt_enable_backlight", "backlight already enabled\n"); } else { } pwm_ctl = pwm_ctl & 2147483647U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, pwm_ctl, 1); } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819796L, panel->backlight.max, 1); intel_panel_actually_set_backlight(connector, panel->backlight.level); pwm_ctl = 0U; if ((int )panel->backlight.active_low_pwm) { pwm_ctl = pwm_ctl | 536870912U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, pwm_ctl, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 819792L, pwm_ctl | 2147483648U, 1); return; } } void intel_panel_enable_backlight(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; enum pipe pipe ; enum pipe tmp ; long tmp___0 ; int __ret_warn_on ; long tmp___1 ; u32 tmp___2 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; tmp = intel_get_pipe_from_connector(connector); pipe = tmp; if (! panel->backlight.present) { return; } else { } tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_panel_enable_backlight", "pipe %c\n", (int )pipe + 65); } else { } mutex_lock_nested(& dev_priv->backlight_lock, 0U); __ret_warn_on = panel->backlight.max == 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 1027, "WARN_ON(panel->backlight.max == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if (panel->backlight.level <= panel->backlight.min) { panel->backlight.level = panel->backlight.max; if ((unsigned long )panel->backlight.device != (unsigned long )((struct backlight_device *)0)) { tmp___2 = scale_hw_to_user(connector, panel->backlight.level, (u32 )(panel->backlight.device)->props.max_brightness); (panel->backlight.device)->props.brightness = (int )tmp___2; } else { } } else { } (*(dev_priv->display.enable_backlight))(connector); panel->backlight.enabled = 1; if ((unsigned long )panel->backlight.device != (unsigned long )((struct backlight_device *)0)) { (panel->backlight.device)->props.power = 0; } else { } mutex_unlock(& dev_priv->backlight_lock); return; } } static int intel_backlight_device_update_status(struct backlight_device *bd ) { struct intel_connector *connector ; void *tmp ; struct intel_panel *panel ; struct drm_device *dev ; long tmp___0 ; bool enable ; { tmp = bl_get_data(bd); connector = (struct intel_connector *)tmp; panel = & connector->panel; dev = connector->base.dev; drm_modeset_lock(& dev->mode_config.connection_mutex, (struct drm_modeset_acquire_ctx *)0); tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_backlight_device_update_status", "updating intel_backlight, brightness=%d/%d\n", bd->props.brightness, bd->props.max_brightness); } else { } intel_panel_set_backlight(connector, (u32 )bd->props.brightness, (u32 )bd->props.max_brightness); if ((int )panel->backlight.enabled) { if ((unsigned long )panel->backlight_power != (unsigned long )((void (*)(struct intel_connector * , bool ))0)) { enable = (bool )(bd->props.power == 0 && bd->props.brightness != 0); (*(panel->backlight_power))(connector, (int )enable); } else { } } else { bd->props.power = 4; } drm_modeset_unlock(& dev->mode_config.connection_mutex); return (0); } } static int intel_backlight_device_get_brightness(struct backlight_device *bd ) { struct intel_connector *connector ; void *tmp ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 hw_level ; int ret ; u32 tmp___0 ; { tmp = bl_get_data(bd); connector = (struct intel_connector *)tmp; dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; intel_runtime_pm_get(dev_priv); drm_modeset_lock(& dev->mode_config.connection_mutex, (struct drm_modeset_acquire_ctx *)0); hw_level = intel_panel_get_backlight(connector); tmp___0 = scale_hw_to_user(connector, hw_level, (u32 )bd->props.max_brightness); ret = (int )tmp___0; drm_modeset_unlock(& dev->mode_config.connection_mutex); intel_runtime_pm_put(dev_priv); return (ret); } } static struct backlight_ops const intel_backlight_device_ops = {0U, & intel_backlight_device_update_status, & intel_backlight_device_get_brightness, 0}; static int intel_backlight_device_register(struct intel_connector *connector ) { struct intel_panel *panel ; struct backlight_properties props ; int __ret_warn_on ; long tmp ; long tmp___0 ; int __ret_warn_on___0 ; long tmp___1 ; u32 tmp___2 ; long tmp___3 ; bool tmp___4 ; long tmp___5 ; { panel = & connector->panel; __ret_warn_on = (unsigned long )panel->backlight.device != (unsigned long )((struct backlight_device *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 1109, "WARN_ON(panel->backlight.device)"); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { return (-19); } else { } if (! panel->backlight.present) { return (0); } else { } __ret_warn_on___0 = panel->backlight.max == 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 1115, "WARN_ON(panel->backlight.max == 0)"); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); memset((void *)(& props), 0, 24UL); props.type = 1; props.max_brightness = (int )panel->backlight.max; tmp___2 = scale_hw_to_user(connector, panel->backlight.level, (u32 )props.max_brightness); props.brightness = (int )tmp___2; if ((int )panel->backlight.enabled) { props.power = 0; } else { props.power = 4; } panel->backlight.device = backlight_device_register("intel_backlight", connector->base.kdev, (void *)connector, & intel_backlight_device_ops, (struct backlight_properties const *)(& props)); tmp___4 = IS_ERR((void const *)panel->backlight.device); if ((int )tmp___4) { tmp___3 = PTR_ERR((void const *)panel->backlight.device); drm_err("Failed to register backlight: %ld\n", tmp___3); panel->backlight.device = (struct backlight_device *)0; return (-19); } else { } tmp___5 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_backlight_device_register", "Connector %s backlight sysfs interface registered\n", connector->base.name); } else { } return (0); } } static void intel_backlight_device_unregister(struct intel_connector *connector ) { struct intel_panel *panel ; { panel = & connector->panel; if ((unsigned long )panel->backlight.device != (unsigned long )((struct backlight_device *)0)) { backlight_device_unregister(panel->backlight.device); panel->backlight.device = (struct backlight_device *)0; } else { } return; } } static u32 get_backlight_min_vbt(struct intel_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; int min ; int __ret_warn_on ; long tmp ; int __min1 ; int __max1 ; int __max2 ; int __min2 ; long tmp___0 ; uint32_t tmp___1 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; __ret_warn_on = panel->backlight.max == 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 1189, "WARN_ON(panel->backlight.max == 0)"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __max1 = (int )dev_priv->vbt.backlight.min_brightness; __max2 = 0; __min1 = __max1 > __max2 ? __max1 : __max2; __min2 = 64; min = __min1 < __min2 ? __min1 : __min2; if ((int )dev_priv->vbt.backlight.min_brightness != min) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("get_backlight_min_vbt", "clamping VBT min backlight %d/255 to %d/255\n", (int )dev_priv->vbt.backlight.min_brightness, min); } else { } } else { } tmp___1 = scale((uint32_t )min, 0U, 255U, 0U, panel->backlight.max); return (tmp___1); } } static int bdw_setup_backlight(struct intel_connector *connector , enum pipe unused ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 pch_ctl1 ; u32 pch_ctl2 ; u32 val ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; pch_ctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 1); panel->backlight.active_low_pwm = (pch_ctl1 & 536870912U) != 0U; pch_ctl2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819796L, 1); panel->backlight.max = pch_ctl2 >> 16; if (panel->backlight.max == 0U) { return (-19); } else { } panel->backlight.min = get_backlight_min_vbt(connector); val = bdw_get_backlight(connector); panel->backlight.level = intel_panel_compute_brightness(connector, val); panel->backlight.enabled = (bool )((int )pch_ctl1 < 0 && panel->backlight.level != 0U); return (0); } } static int pch_setup_backlight(struct intel_connector *connector , enum pipe unused ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 cpu_ctl2 ; u32 pch_ctl1 ; u32 pch_ctl2 ; u32 val ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; pch_ctl1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 1); panel->backlight.active_low_pwm = (pch_ctl1 & 536870912U) != 0U; pch_ctl2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819796L, 1); panel->backlight.max = pch_ctl2 >> 16; if (panel->backlight.max == 0U) { return (-19); } else { } panel->backlight.min = get_backlight_min_vbt(connector); val = pch_get_backlight(connector); panel->backlight.level = intel_panel_compute_brightness(connector, val); cpu_ctl2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 295504L, 1); panel->backlight.enabled = (bool )(((int )cpu_ctl2 < 0 && (int )pch_ctl1 < 0) && panel->backlight.level != 0U); return (0); } } static int i9xx_setup_backlight(struct intel_connector *connector , enum pipe unused ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 ctl ; u32 val ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), 1); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 2U) { panel->backlight.combination_mode = (ctl & 65536U) != 0U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 9618U) { panel->backlight.combination_mode = (ctl & 65536U) != 0U; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { panel->backlight.combination_mode = (ctl & 65536U) != 0U; } else { } } } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 44UL) != 0U) { panel->backlight.active_low_pwm = (ctl & 1U) != 0U; } else { } panel->backlight.max = ctl >> 17; if ((int )panel->backlight.combination_mode) { panel->backlight.max = panel->backlight.max * 255U; } else { } if (panel->backlight.max == 0U) { return (-19); } else { } panel->backlight.min = get_backlight_min_vbt(connector); val = i9xx_get_backlight(connector); panel->backlight.level = intel_panel_compute_brightness(connector, val); panel->backlight.enabled = panel->backlight.level != 0U; return (0); } } static int i965_setup_backlight(struct intel_connector *connector , enum pipe unused ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 ctl ; u32 ctl2 ; u32 val ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; ctl2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397904U), 1); panel->backlight.combination_mode = (ctl2 & 1073741824U) != 0U; panel->backlight.active_low_pwm = (ctl2 & 268435456U) != 0U; ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((unsigned int )dev_priv->info.display_mmio_offset + 397908U), 1); panel->backlight.max = ctl >> 16; if ((int )panel->backlight.combination_mode) { panel->backlight.max = panel->backlight.max * 255U; } else { } if (panel->backlight.max == 0U) { return (-19); } else { } panel->backlight.min = get_backlight_min_vbt(connector); val = i9xx_get_backlight(connector); panel->backlight.level = intel_panel_compute_brightness(connector, val); panel->backlight.enabled = (bool )((int )ctl2 < 0 && panel->backlight.level != 0U); return (0); } } static int vlv_setup_backlight(struct intel_connector *connector , enum pipe pipe ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; enum pipe p ; u32 ctl ; u32 ctl2 ; u32 val ; u32 cur_val ; uint32_t tmp ; struct drm_i915_private *__p ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; p = 0; goto ldv_48551; ldv_48550: tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )p * 256U) + 397908U), 1); cur_val = tmp; if ((cur_val & 4294901760U) != 0U) { goto ldv_48549; } else { } cur_val = cur_val & 65535U; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )p * 256U) + 397908U), cur_val | 255983616U, 1); ldv_48549: p = (enum pipe )((int )p + 1); ldv_48551: __p = dev_priv; if ((int )__p->info.num_pipes > (int )p) { goto ldv_48550; } else { } __ret_warn_on = (int )pipe != 0 && (int )pipe != 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_panel.c", 1343, "WARN_ON(pipe != PIPE_A && pipe != PIPE_B)"); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { return (-19); } else { } ctl2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397904U), 1); panel->backlight.active_low_pwm = (ctl2 & 268435456U) != 0U; ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )(((unsigned int )dev_priv->info.display_mmio_offset + (unsigned int )pipe * 256U) + 397908U), 1); panel->backlight.max = ctl >> 16; if (panel->backlight.max == 0U) { return (-19); } else { } panel->backlight.min = get_backlight_min_vbt(connector); val = _vlv_get_backlight(dev, pipe); panel->backlight.level = intel_panel_compute_brightness(connector, val); panel->backlight.enabled = (bool )((int )ctl2 < 0 && panel->backlight.level != 0U); return (0); } } static int bxt_setup_backlight(struct intel_connector *connector , enum pipe unused ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_panel *panel ; u32 pwm_ctl ; u32 val ; { dev = connector->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; panel = & connector->panel; pwm_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819792L, 1); panel->backlight.active_low_pwm = (pwm_ctl & 536870912U) != 0U; panel->backlight.max = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 819796L, 1); if (panel->backlight.max == 0U) { return (-19); } else { } val = bxt_get_backlight(connector); panel->backlight.level = intel_panel_compute_brightness(connector, val); panel->backlight.enabled = (bool )((int )pwm_ctl < 0 && panel->backlight.level != 0U); return (0); } } int intel_panel_setup_backlight(struct drm_connector *connector , enum pipe pipe ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct intel_panel *panel ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { dev = connector->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; panel = & intel_connector->panel; if (! dev_priv->vbt.backlight.present) { if ((dev_priv->quirks & 8UL) != 0UL) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_panel_setup_backlight", "no backlight present per VBT, but present per quirk\n"); } else { } } else { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_panel_setup_backlight", "no backlight present per VBT\n"); } else { } return (0); } } else { } mutex_lock_nested(& dev_priv->backlight_lock, 0U); ret = (*(dev_priv->display.setup_backlight))(intel_connector, pipe); mutex_unlock(& dev_priv->backlight_lock); if (ret != 0) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_panel_setup_backlight", "failed to setup backlight for connector %s\n", connector->name); } else { } return (ret); } else { } panel->backlight.present = 1; tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_panel_setup_backlight", "Connector %s backlight initialized, %s, brightness %u/%u\n", connector->name, (int )panel->backlight.enabled ? (char *)"enabled" : (char *)"disabled", panel->backlight.level, panel->backlight.max); } else { } return (0); } } void intel_panel_destroy_backlight(struct drm_connector *connector ) { struct intel_connector *intel_connector ; struct drm_connector const *__mptr ; struct intel_panel *panel ; { __mptr = (struct drm_connector const *)connector; intel_connector = (struct intel_connector *)__mptr; panel = & intel_connector->panel; panel->backlight.present = 0; return; } } void intel_panel_init_backlight_funcs(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 45UL) == 0U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 9U) { dev_priv->display.setup_backlight = & bxt_setup_backlight; dev_priv->display.enable_backlight = & bxt_enable_backlight; dev_priv->display.disable_backlight = & bxt_disable_backlight; dev_priv->display.set_backlight = & bxt_set_backlight; dev_priv->display.get_backlight = & bxt_get_backlight; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { dev_priv->display.setup_backlight = & bdw_setup_backlight; dev_priv->display.enable_backlight = & bdw_enable_backlight; dev_priv->display.disable_backlight = & pch_disable_backlight; dev_priv->display.set_backlight = & bdw_set_backlight; dev_priv->display.get_backlight = & bdw_get_backlight; } else { goto _L; } } else { _L: /* CIL Label */ __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { dev_priv->display.setup_backlight = & bdw_setup_backlight; dev_priv->display.enable_backlight = & bdw_enable_backlight; dev_priv->display.disable_backlight = & pch_disable_backlight; dev_priv->display.set_backlight = & bdw_set_backlight; dev_priv->display.get_backlight = & bdw_get_backlight; } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type != 0U) { dev_priv->display.setup_backlight = & pch_setup_backlight; dev_priv->display.enable_backlight = & pch_enable_backlight; dev_priv->display.disable_backlight = & pch_disable_backlight; dev_priv->display.set_backlight = & pch_set_backlight; dev_priv->display.get_backlight = & pch_get_backlight; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { dev_priv->display.setup_backlight = & vlv_setup_backlight; dev_priv->display.enable_backlight = & vlv_enable_backlight; dev_priv->display.disable_backlight = & vlv_disable_backlight; dev_priv->display.set_backlight = & vlv_set_backlight; dev_priv->display.get_backlight = & vlv_get_backlight; } else { __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) == 4U) { dev_priv->display.setup_backlight = & i965_setup_backlight; dev_priv->display.enable_backlight = & i965_enable_backlight; dev_priv->display.disable_backlight = & i965_disable_backlight; dev_priv->display.set_backlight = & i9xx_set_backlight; dev_priv->display.get_backlight = & i9xx_get_backlight; } else { dev_priv->display.setup_backlight = & i9xx_setup_backlight; dev_priv->display.enable_backlight = & i9xx_enable_backlight; dev_priv->display.disable_backlight = & i9xx_disable_backlight; dev_priv->display.set_backlight = & i9xx_set_backlight; dev_priv->display.get_backlight = & i9xx_get_backlight; } } } } } } return; } } int intel_panel_init(struct intel_panel *panel , struct drm_display_mode *fixed_mode , struct drm_display_mode *downclock_mode ) { { panel->fixed_mode = fixed_mode; panel->downclock_mode = downclock_mode; return (0); } } void intel_panel_fini(struct intel_panel *panel ) { struct intel_connector *intel_connector ; struct intel_panel const *__mptr ; { __mptr = (struct intel_panel const *)panel; intel_connector = (struct intel_connector *)__mptr + 0xfffffffffffffc38UL; if ((unsigned long )panel->fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); } else { } if ((unsigned long )panel->downclock_mode != (unsigned long )((struct drm_display_mode *)0)) { drm_mode_destroy(intel_connector->base.dev, panel->downclock_mode); } else { } return; } } void intel_backlight_register(struct drm_device *dev ) { struct intel_connector *connector ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_48655; ldv_48654: intel_backlight_device_register(connector); __mptr___0 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_48655: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_48654; } else { } return; } } void intel_backlight_unregister(struct drm_device *dev ) { struct intel_connector *connector ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct intel_connector *)__mptr + 0xffffffffffffffe8UL; goto ldv_48666; ldv_48665: intel_backlight_device_unregister(connector); __mptr___0 = (struct list_head const *)connector->base.head.next; connector = (struct intel_connector *)__mptr___0 + 0xffffffffffffffe8UL; ldv_48666: ; if ((unsigned long )(& connector->base.head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_48665; } else { } return; } } void ldv_initialize_backlight_ops_31(void) { void *tmp ; { tmp = ldv_init_zalloc(1848UL); intel_backlight_device_ops_group0 = (struct backlight_device *)tmp; return; } } void ldv_main_exported_31(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_31 == 1) { intel_backlight_device_get_brightness(intel_backlight_device_ops_group0); ldv_state_variable_31 = 1; } else { } goto ldv_48675; case 1: ; if (ldv_state_variable_31 == 1) { intel_backlight_device_update_status(intel_backlight_device_ops_group0); ldv_state_variable_31 = 1; } else { } goto ldv_48675; default: ldv_stop(); } ldv_48675: ; return; } } bool ldv_queue_work_on_1007(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_1008(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_1009(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_1010(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_1011(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static unsigned int __arch_hweight16(unsigned int w ) { unsigned int tmp ; { tmp = __arch_hweight32(w & 65535U); return (tmp); } } bool ldv_queue_work_on_1021(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_1023(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_1022(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_1025(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_1024(struct workqueue_struct *ldv_func_arg1 ) ; extern struct drm_property *drm_property_create(struct drm_device * , int , char const * , int ) ; extern struct drm_property *drm_property_create_range(struct drm_device * , int , char const * , uint64_t , uint64_t ) ; extern int drm_property_add_enum(struct drm_property * , int , uint64_t , char const * ) ; static char const *tv_format_names[19U] = { "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B", "PAL_D", "PAL_G", "PAL_H", "PAL_I", "PAL_M", "PAL_N", "PAL_NC", "PAL_60", "SECAM_B", "SECAM_D", "SECAM_G", "SECAM_K", "SECAM_K1", "SECAM_L", "SECAM_60"}; static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder ) { struct intel_encoder const *__mptr ; { __mptr = (struct intel_encoder const *)encoder; return ((struct intel_sdvo *)__mptr); } } static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector ) { struct intel_encoder *tmp ; struct intel_sdvo *tmp___0 ; { tmp = intel_attached_encoder(connector); tmp___0 = to_sdvo(tmp); return (tmp___0); } } static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector ) { struct intel_connector const *__mptr ; struct drm_connector const *__mptr___0 ; { __mptr___0 = (struct drm_connector const *)connector; __mptr = (struct intel_connector const *)((struct intel_connector *)__mptr___0); return ((struct intel_sdvo_connector *)__mptr); } } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo , uint16_t flags ) ; static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo , struct intel_sdvo_connector *intel_sdvo_connector , int type ) ; static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo , struct intel_sdvo_connector *intel_sdvo_connector ) ; static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo , u32 val ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 bval ; u32 cval ; int i ; struct drm_i915_private *__p ; { dev = intel_sdvo->base.base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; bval = val; cval = val; if (intel_sdvo->sdvo_reg == 921920U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_sdvo->sdvo_reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_sdvo->sdvo_reg, 0); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 1U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )intel_sdvo->sdvo_reg, val, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_sdvo->sdvo_reg, 0); } else { } return; } else { } if (intel_sdvo->sdvo_reg == 397632U) { cval = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397664L, 1); } else { bval = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397632L, 1); } i = 0; goto ldv_48647; ldv_48646: (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397632L, bval, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397632L, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 397664L, cval, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 397664L, 0); i = i + 1; ldv_48647: ; if (i <= 1) { goto ldv_48646; } else { } return; } } static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo , u8 addr , u8 *ch ) { struct i2c_msg msgs[2U] ; int ret ; long tmp ; { msgs[0].addr = (unsigned short )intel_sdvo->slave_addr; msgs[0].flags = 0U; msgs[0].len = 1U; msgs[0].buf = & addr; msgs[1].addr = (unsigned short )intel_sdvo->slave_addr; msgs[1].flags = 1U; msgs[1].len = 1U; msgs[1].buf = ch; ret = i2c_transfer(intel_sdvo->i2c, (struct i2c_msg *)(& msgs), 2); if (ret == 2) { return (1); } else { } tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_sdvo_read_byte", "i2c transfer returned %d\n", ret); } else { } return (0); } } static struct _sdvo_cmd_name const sdvo_cmd_names[107U] = { {1U, "SDVO_CMD_RESET"}, {2U, "SDVO_CMD_GET_DEVICE_CAPS"}, {134U, "SDVO_CMD_GET_FIRMWARE_REV"}, {3U, "SDVO_CMD_GET_TRAINED_INPUTS"}, {4U, "SDVO_CMD_GET_ACTIVE_OUTPUTS"}, {5U, "SDVO_CMD_SET_ACTIVE_OUTPUTS"}, {6U, "SDVO_CMD_GET_IN_OUT_MAP"}, {7U, "SDVO_CMD_SET_IN_OUT_MAP"}, {11U, "SDVO_CMD_GET_ATTACHED_DISPLAYS"}, {12U, "SDVO_CMD_GET_HOT_PLUG_SUPPORT"}, {13U, "SDVO_CMD_SET_ACTIVE_HOT_PLUG"}, {14U, "SDVO_CMD_GET_ACTIVE_HOT_PLUG"}, {15U, "SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE"}, {16U, "SDVO_CMD_SET_TARGET_INPUT"}, {17U, "SDVO_CMD_SET_TARGET_OUTPUT"}, {18U, "SDVO_CMD_GET_INPUT_TIMINGS_PART1"}, {19U, "SDVO_CMD_GET_INPUT_TIMINGS_PART2"}, {20U, "SDVO_CMD_SET_INPUT_TIMINGS_PART1"}, {21U, "SDVO_CMD_SET_INPUT_TIMINGS_PART2"}, {20U, "SDVO_CMD_SET_INPUT_TIMINGS_PART1"}, {22U, "SDVO_CMD_SET_OUTPUT_TIMINGS_PART1"}, {23U, "SDVO_CMD_SET_OUTPUT_TIMINGS_PART2"}, {24U, "SDVO_CMD_GET_OUTPUT_TIMINGS_PART1"}, {25U, "SDVO_CMD_GET_OUTPUT_TIMINGS_PART2"}, {26U, "SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING"}, {27U, "SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1"}, {28U, "SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2"}, {29U, "SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE"}, {30U, "SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE"}, {31U, "SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS"}, {32U, "SDVO_CMD_GET_CLOCK_RATE_MULT"}, {33U, "SDVO_CMD_SET_CLOCK_RATE_MULT"}, {39U, "SDVO_CMD_GET_SUPPORTED_TV_FORMATS"}, {40U, "SDVO_CMD_GET_TV_FORMAT"}, {41U, "SDVO_CMD_SET_TV_FORMAT"}, {42U, "SDVO_CMD_GET_SUPPORTED_POWER_STATES"}, {43U, "SDVO_CMD_GET_POWER_STATE"}, {44U, "SDVO_CMD_SET_ENCODER_POWER_STATE"}, {125U, "SDVO_CMD_SET_DISPLAY_POWER_STATE"}, {122U, "SDVO_CMD_SET_CONTROL_BUS_SWITCH"}, {131U, "SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT"}, {133U, "SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT"}, {132U, "SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS"}, {103U, "SDVO_CMD_GET_MAX_HPOS"}, {104U, "SDVO_CMD_GET_HPOS"}, {105U, "SDVO_CMD_SET_HPOS"}, {106U, "SDVO_CMD_GET_MAX_VPOS"}, {107U, "SDVO_CMD_GET_VPOS"}, {108U, "SDVO_CMD_SET_VPOS"}, {85U, "SDVO_CMD_GET_MAX_SATURATION"}, {86U, "SDVO_CMD_GET_SATURATION"}, {87U, "SDVO_CMD_SET_SATURATION"}, {88U, "SDVO_CMD_GET_MAX_HUE"}, {89U, "SDVO_CMD_GET_HUE"}, {90U, "SDVO_CMD_SET_HUE"}, {94U, "SDVO_CMD_GET_MAX_CONTRAST"}, {95U, "SDVO_CMD_GET_CONTRAST"}, {96U, "SDVO_CMD_SET_CONTRAST"}, {91U, "SDVO_CMD_GET_MAX_BRIGHTNESS"}, {92U, "SDVO_CMD_GET_BRIGHTNESS"}, {93U, "SDVO_CMD_SET_BRIGHTNESS"}, {97U, "SDVO_CMD_GET_MAX_OVERSCAN_H"}, {98U, "SDVO_CMD_GET_OVERSCAN_H"}, {99U, "SDVO_CMD_SET_OVERSCAN_H"}, {100U, "SDVO_CMD_GET_MAX_OVERSCAN_V"}, {101U, "SDVO_CMD_GET_OVERSCAN_V"}, {102U, "SDVO_CMD_SET_OVERSCAN_V"}, {77U, "SDVO_CMD_GET_MAX_FLICKER_FILTER"}, {78U, "SDVO_CMD_GET_FLICKER_FILTER"}, {79U, "SDVO_CMD_SET_FLICKER_FILTER"}, {123U, "SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE"}, {80U, "SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE"}, {81U, "SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE"}, {82U, "SDVO_CMD_GET_MAX_FLICKER_FILTER_2D"}, {83U, "SDVO_CMD_GET_FLICKER_FILTER_2D"}, {84U, "SDVO_CMD_SET_FLICKER_FILTER_2D"}, {109U, "SDVO_CMD_GET_MAX_SHARPNESS"}, {110U, "SDVO_CMD_GET_SHARPNESS"}, {111U, "SDVO_CMD_SET_SHARPNESS"}, {112U, "SDVO_CMD_GET_DOT_CRAWL"}, {113U, "SDVO_CMD_SET_DOT_CRAWL"}, {116U, "SDVO_CMD_GET_MAX_TV_CHROMA_FILTER"}, {117U, "SDVO_CMD_GET_TV_CHROMA_FILTER"}, {118U, "SDVO_CMD_SET_TV_CHROMA_FILTER"}, {119U, "SDVO_CMD_GET_MAX_TV_LUMA_FILTER"}, {120U, "SDVO_CMD_GET_TV_LUMA_FILTER"}, {121U, "SDVO_CMD_SET_TV_LUMA_FILTER"}, {157U, "SDVO_CMD_GET_SUPP_ENCODE"}, {158U, "SDVO_CMD_GET_ENCODE"}, {159U, "SDVO_CMD_SET_ENCODE"}, {139U, "SDVO_CMD_SET_PIXEL_REPLI"}, {140U, "SDVO_CMD_GET_PIXEL_REPLI"}, {141U, "SDVO_CMD_GET_COLORIMETRY_CAP"}, {142U, "SDVO_CMD_SET_COLORIMETRY"}, {143U, "SDVO_CMD_GET_COLORIMETRY"}, {144U, "SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER"}, {145U, "SDVO_CMD_SET_AUDIO_STAT"}, {146U, "SDVO_CMD_GET_AUDIO_STAT"}, {148U, "SDVO_CMD_GET_HBUF_INDEX"}, {147U, "SDVO_CMD_SET_HBUF_INDEX"}, {149U, "SDVO_CMD_GET_HBUF_INFO"}, {151U, "SDVO_CMD_GET_HBUF_AV_SPLIT"}, {150U, "SDVO_CMD_SET_HBUF_AV_SPLIT"}, {155U, "SDVO_CMD_GET_HBUF_TXRATE"}, {154U, "SDVO_CMD_SET_HBUF_TXRATE"}, {152U, "SDVO_CMD_SET_HBUF_DATA"}, {153U, "SDVO_CMD_GET_HBUF_DATA"}}; static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo , u8 cmd , void const *args , int args_len ) { int i ; int pos ; char buffer[256U] ; int __max1 ; int __max2 ; int tmp ; int __max1___0 ; int __max2___0 ; int tmp___0 ; int __max1___1 ; int __max2___1 ; int tmp___1 ; int __max1___2 ; int __max2___2 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; { pos = 0; i = 0; goto ldv_48674; ldv_48673: __max1 = 256 - pos; __max2 = 0; tmp = snprintf((char *)(& buffer) + (unsigned long )pos, (size_t )(__max1 > __max2 ? __max1 : __max2), "%02X ", (int )*((u8 *)args + (unsigned long )i)); pos = tmp + pos; i = i + 1; ldv_48674: ; if (i < args_len) { goto ldv_48673; } else { } goto ldv_48680; ldv_48679: __max1___0 = 256 - pos; __max2___0 = 0; tmp___0 = snprintf((char *)(& buffer) + (unsigned long )pos, (size_t )(__max1___0 > __max2___0 ? __max1___0 : __max2___0), " "); pos = tmp___0 + pos; i = i + 1; ldv_48680: ; if (i <= 7) { goto ldv_48679; } else { } i = 0; goto ldv_48689; ldv_48688: ; if ((int )((unsigned char )sdvo_cmd_names[i].cmd) == (int )cmd) { __max1___1 = 256 - pos; __max2___1 = 0; tmp___1 = snprintf((char *)(& buffer) + (unsigned long )pos, (size_t )(__max1___1 > __max2___1 ? __max1___1 : __max2___1), "(%s)", sdvo_cmd_names[i].name); pos = tmp___1 + pos; goto ldv_48687; } else { } i = i + 1; ldv_48689: ; if ((unsigned int )i <= 106U) { goto ldv_48688; } else { } ldv_48687: ; if (i == 107) { __max1___2 = 256 - pos; __max2___2 = 0; tmp___2 = snprintf((char *)(& buffer) + (unsigned long )pos, (size_t )(__max1___2 > __max2___2 ? __max1___2 : __max2___2), "(%02X)", (int )cmd); pos = tmp___2 + pos; } else { } tmp___3 = ldv__builtin_expect(pos > 254, 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sdvo.c"), "i" (448), "i" (12UL)); ldv_48695: ; goto ldv_48695; } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_sdvo_debug_write", "%s: W: %02X %s\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC", (int )cmd, (char *)(& buffer)); } else { } return; } } static char const *cmd_status_names[7U] = { "Power on", "Success", "Not supported", "Invalid arg", "Pending", "Target not specified", "Scaling not supported"}; static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo , u8 cmd , void const *args , int args_len ) { u8 *buf ; u8 status ; struct i2c_msg *msgs ; int i ; int ret ; void *tmp ; void *tmp___0 ; long tmp___1 ; long tmp___2 ; { ret = 1; tmp = kzalloc((size_t )((args_len + 1) * 2), 208U); buf = (u8 *)tmp; if ((unsigned long )buf == (unsigned long )((u8 *)0U)) { return (0); } else { } tmp___0 = kcalloc((size_t )(args_len + 3), 16UL, 208U); msgs = (struct i2c_msg *)tmp___0; if ((unsigned long )msgs == (unsigned long )((struct i2c_msg *)0)) { kfree((void const *)buf); return (0); } else { } intel_sdvo_debug_write(intel_sdvo, (int )cmd, args, args_len); i = 0; goto ldv_48710; ldv_48709: (msgs + (unsigned long )i)->addr = (__u16 )intel_sdvo->slave_addr; (msgs + (unsigned long )i)->flags = 0U; (msgs + (unsigned long )i)->len = 2U; (msgs + (unsigned long )i)->buf = buf + (unsigned long )(i * 2); *(buf + (unsigned long )(i * 2)) = 7U - (unsigned int )((u8 )i); *(buf + ((unsigned long )(i * 2) + 1UL)) = *((u8 *)args + (unsigned long )i); i = i + 1; ldv_48710: ; if (i < args_len) { goto ldv_48709; } else { } (msgs + (unsigned long )i)->addr = (__u16 )intel_sdvo->slave_addr; (msgs + (unsigned long )i)->flags = 0U; (msgs + (unsigned long )i)->len = 2U; (msgs + (unsigned long )i)->buf = buf + (unsigned long )(i * 2); *(buf + (unsigned long )(i * 2)) = 8U; *(buf + ((unsigned long )(i * 2) + 1UL)) = cmd; status = 9U; (msgs + ((unsigned long )i + 1UL))->addr = (__u16 )intel_sdvo->slave_addr; (msgs + ((unsigned long )i + 1UL))->flags = 0U; (msgs + ((unsigned long )i + 1UL))->len = 1U; (msgs + ((unsigned long )i + 1UL))->buf = & status; (msgs + ((unsigned long )i + 2UL))->addr = (__u16 )intel_sdvo->slave_addr; (msgs + ((unsigned long )i + 2UL))->flags = 1U; (msgs + ((unsigned long )i + 2UL))->len = 1U; (msgs + ((unsigned long )i + 2UL))->buf = & status; ret = i2c_transfer(intel_sdvo->i2c, msgs, i + 3); if (ret < 0) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_sdvo_write_cmd", "I2c transfer returned %d\n", ret); } else { } ret = 0; goto out; } else { } if (i + 3 != ret) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_sdvo_write_cmd", "I2c transfer returned %d/%d\n", ret, i + 3); } else { } ret = 0; } else { } out: kfree((void const *)msgs); kfree((void const *)buf); return (ret != 0); } } static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo , void *response , int response_len ) { u8 retry ; u8 status ; int i ; int pos ; char buffer[256U] ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; int __max1 ; int __max2 ; int tmp___3 ; int __max1___0 ; int __max2___0 ; int tmp___4 ; bool tmp___5 ; int tmp___6 ; int __max1___1 ; int __max2___1 ; int tmp___7 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; { retry = 15U; pos = 0; tmp = intel_sdvo_read_byte(intel_sdvo, 9, & status); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto log_fail; } else { } goto ldv_48726; ldv_48725: ; if ((unsigned int )retry <= 9U) { msleep(15U); } else { __const_udelay(64425UL); } tmp___1 = intel_sdvo_read_byte(intel_sdvo, 9, & status); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto log_fail; } else { } ldv_48726: ; if ((unsigned int )status == 4U || (unsigned int )status == 5U) { retry = (u8 )((int )retry - 1); if ((unsigned int )retry != 0U) { goto ldv_48725; } else { goto ldv_48727; } } else { } ldv_48727: ; if ((unsigned int )status <= 6U) { __max1 = 256 - pos; __max2 = 0; tmp___3 = snprintf((char *)(& buffer) + (unsigned long )pos, (size_t )(__max1 > __max2 ? __max1 : __max2), "(%s)", cmd_status_names[(int )status]); pos = tmp___3 + pos; } else { __max1___0 = 256 - pos; __max2___0 = 0; tmp___4 = snprintf((char *)(& buffer) + (unsigned long )pos, (size_t )(__max1___0 > __max2___0 ? __max1___0 : __max2___0), "(??? %d)", (int )status); pos = tmp___4 + pos; } if ((unsigned int )status != 1U) { goto log_fail; } else { } i = 0; goto ldv_48738; ldv_48737: tmp___5 = intel_sdvo_read_byte(intel_sdvo, (int )((unsigned int )((u8 )i) + 10U), (u8 *)response + (unsigned long )i); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { goto log_fail; } else { } __max1___1 = 256 - pos; __max2___1 = 0; tmp___7 = snprintf((char *)(& buffer) + (unsigned long )pos, (size_t )(__max1___1 > __max2___1 ? __max1___1 : __max2___1), " %02X", (int )*((u8 *)response + (unsigned long )i)); pos = tmp___7 + pos; i = i + 1; ldv_48738: ; if (i < response_len) { goto ldv_48737; } else { } tmp___8 = ldv__builtin_expect(pos > 254, 0L); if (tmp___8 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sdvo.c"), "i" (594), "i" (12UL)); ldv_48740: ; goto ldv_48740; } else { } tmp___9 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___9 != 0L) { drm_ut_debug_printk("intel_sdvo_read_response", "%s: R: %s\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC", (char *)(& buffer)); } else { } return (1); log_fail: tmp___10 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___10 != 0L) { drm_ut_debug_printk("intel_sdvo_read_response", "%s: R: ... failed\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC"); } else { } return (0); } } static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode ) { { if (mode->clock > 99999) { return (1); } else if (mode->clock > 49999) { return (2); } else { return (4); } } } static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo , u8 ddc_bus ) { bool tmp ; { tmp = intel_sdvo_write_cmd(intel_sdvo, 122, (void const *)(& ddc_bus), 1); return (tmp); } } static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo , u8 cmd , void const *data , int len ) { bool tmp ; int tmp___0 ; bool tmp___1 ; { tmp = intel_sdvo_write_cmd(intel_sdvo, (int )cmd, data, len); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = intel_sdvo_read_response(intel_sdvo, (void *)0, 0); return (tmp___1); } } static bool intel_sdvo_get_value(struct intel_sdvo *intel_sdvo , u8 cmd , void *value , int len ) { bool tmp ; int tmp___0 ; bool tmp___1 ; { tmp = intel_sdvo_write_cmd(intel_sdvo, (int )cmd, (void const *)0, 0); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = intel_sdvo_read_response(intel_sdvo, value, len); return (tmp___1); } } static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo ) { struct intel_sdvo_set_target_input_args targets ; bool tmp ; { targets.target_1 = 0U; targets.pad = (unsigned char)0; tmp = intel_sdvo_set_value(intel_sdvo, 16, (void const *)(& targets), 1); return (tmp); } } static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo , bool *input_1 , bool *input_2 ) { struct intel_sdvo_get_trained_inputs_response response ; bool tmp ; int tmp___0 ; { tmp = intel_sdvo_get_value(intel_sdvo, 3, (void *)(& response), 1); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } *input_1 = (int )response.input0_trained != 0; *input_2 = (int )response.input1_trained != 0; return (1); } } static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo , u16 outputs ) { bool tmp ; { tmp = intel_sdvo_set_value(intel_sdvo, 5, (void const *)(& outputs), 2); return (tmp); } } static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo , u16 *outputs ) { bool tmp ; { tmp = intel_sdvo_get_value(intel_sdvo, 4, (void *)outputs, 2); return (tmp); } } static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo , int *clock_min , int *clock_max ) { struct intel_sdvo_pixel_clock_range clocks ; bool tmp ; int tmp___0 ; { tmp = intel_sdvo_get_value(intel_sdvo, 29, (void *)(& clocks), 4); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } *clock_min = (int )clocks.min * 10; *clock_max = (int )clocks.max * 10; return (1); } } static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo , u16 outputs ) { bool tmp ; { tmp = intel_sdvo_set_value(intel_sdvo, 17, (void const *)(& outputs), 2); return (tmp); } } static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo , u8 cmd , struct intel_sdvo_dtd *dtd ) { bool tmp ; bool tmp___0 ; int tmp___1 ; { tmp = intel_sdvo_set_value(intel_sdvo, (int )cmd, (void const *)(& dtd->part1), 8); if ((int )tmp) { tmp___0 = intel_sdvo_set_value(intel_sdvo, (int )((unsigned int )cmd + 1U), (void const *)(& dtd->part2), 8); if ((int )tmp___0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } return ((bool )tmp___1); } } static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo , u8 cmd , struct intel_sdvo_dtd *dtd ) { bool tmp ; bool tmp___0 ; int tmp___1 ; { tmp = intel_sdvo_get_value(intel_sdvo, (int )cmd, (void *)(& dtd->part1), 8); if ((int )tmp) { tmp___0 = intel_sdvo_get_value(intel_sdvo, (int )((unsigned int )cmd + 1U), (void *)(& dtd->part2), 8); if ((int )tmp___0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } return ((bool )tmp___1); } } static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo , struct intel_sdvo_dtd *dtd ) { bool tmp ; { tmp = intel_sdvo_set_timing(intel_sdvo, 20, dtd); return (tmp); } } static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo , struct intel_sdvo_dtd *dtd ) { bool tmp ; { tmp = intel_sdvo_set_timing(intel_sdvo, 22, dtd); return (tmp); } } static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo , struct intel_sdvo_dtd *dtd ) { bool tmp ; { tmp = intel_sdvo_get_timing(intel_sdvo, 18, dtd); return (tmp); } } static bool intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo , uint16_t clock , uint16_t width , uint16_t height ) { struct intel_sdvo_preferred_input_timing_args args ; bool tmp ; { memset((void *)(& args), 0, 7UL); args.clock = clock; args.width = width; args.height = height; args.interlace = 0U; if ((int )intel_sdvo->is_lvds && ((intel_sdvo->sdvo_lvds_fixed_mode)->hdisplay != (int )width || (intel_sdvo->sdvo_lvds_fixed_mode)->vdisplay != (int )height)) { args.scaled = 1U; } else { } tmp = intel_sdvo_set_value(intel_sdvo, 26, (void const *)(& args), 7); return (tmp); } } static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo , struct intel_sdvo_dtd *dtd ) { bool tmp ; bool tmp___0 ; int tmp___1 ; { tmp = intel_sdvo_get_value(intel_sdvo, 27, (void *)(& dtd->part1), 8); if ((int )tmp) { tmp___0 = intel_sdvo_get_value(intel_sdvo, 28, (void *)(& dtd->part2), 8); if ((int )tmp___0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } return ((bool )tmp___1); } } static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo , u8 val ) { bool tmp ; { tmp = intel_sdvo_set_value(intel_sdvo, 33, (void const *)(& val), 1); return (tmp); } } static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd , struct drm_display_mode const *mode ) { uint16_t width ; uint16_t height ; uint16_t h_blank_len ; uint16_t h_sync_len ; uint16_t v_blank_len ; uint16_t v_sync_len ; uint16_t h_sync_offset ; uint16_t v_sync_offset ; int mode_clock ; { memset((void *)dtd, 0, 16UL); width = (uint16_t )mode->hdisplay; height = (uint16_t )mode->vdisplay; h_blank_len = (int )((uint16_t )mode->htotal) - (int )((uint16_t )mode->hdisplay); h_sync_len = (int )((uint16_t )mode->hsync_end) - (int )((uint16_t )mode->hsync_start); v_blank_len = (int )((uint16_t )mode->vtotal) - (int )((uint16_t )mode->vdisplay); v_sync_len = (int )((uint16_t )mode->vsync_end) - (int )((uint16_t )mode->vsync_start); h_sync_offset = (int )((uint16_t )mode->hsync_start) - (int )((uint16_t )mode->hdisplay); v_sync_offset = (int )((uint16_t )mode->vsync_start) - (int )((uint16_t )mode->vdisplay); mode_clock = mode->clock; mode_clock = mode_clock / 10; dtd->part1.clock = (u16 )mode_clock; dtd->part1.h_active = (u8 )width; dtd->part1.h_blank = (u8 )h_blank_len; dtd->part1.h_high = (u8 )((int )((signed char )(((int )width >> 8) << 4)) | ((int )((signed char )((int )h_blank_len >> 8)) & 15)); dtd->part1.v_active = (u8 )height; dtd->part1.v_blank = (u8 )v_blank_len; dtd->part1.v_high = (u8 )((int )((signed char )(((int )height >> 8) << 4)) | ((int )((signed char )((int )v_blank_len >> 8)) & 15)); dtd->part2.h_sync_off = (u8 )h_sync_offset; dtd->part2.h_sync_width = (u8 )h_sync_len; dtd->part2.v_sync_off_width = (u8 )((int )((signed char )((int )v_sync_offset << 4)) | ((int )((signed char )v_sync_len) & 15)); dtd->part2.sync_off_width_high = (u8 )((((int )((signed char )(((int )h_sync_offset & 768) >> 2)) | (int )((signed char )(((int )h_sync_len & 768) >> 4))) | (int )((signed char )(((int )v_sync_offset & 48) >> 2))) | (int )((signed char )(((int )v_sync_len & 48) >> 4))); dtd->part2.dtd_flags = 24U; if (((unsigned int )mode->flags & 16U) != 0U) { dtd->part2.dtd_flags = (u8 )((unsigned int )dtd->part2.dtd_flags | 128U); } else { } if ((int )mode->flags & 1) { dtd->part2.dtd_flags = (u8 )((unsigned int )dtd->part2.dtd_flags | 2U); } else { } if (((unsigned int )mode->flags & 4U) != 0U) { dtd->part2.dtd_flags = (u8 )((unsigned int )dtd->part2.dtd_flags | 4U); } else { } dtd->part2.v_sync_off_high = (unsigned int )((u8 )v_sync_offset) & 192U; return; } } static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode , struct intel_sdvo_dtd const *dtd ) { struct drm_display_mode mode ; { mode.head.next = 0; mode.head.prev = 0; mode.base.id = 0U; mode.base.type = 0U; mode.base.properties = 0; mode.name[0] = (char)0; mode.name[1] = (char)0; mode.name[2] = (char)0; mode.name[3] = (char)0; mode.name[4] = (char)0; mode.name[5] = (char)0; mode.name[6] = (char)0; mode.name[7] = (char)0; mode.name[8] = (char)0; mode.name[9] = (char)0; mode.name[10] = (char)0; mode.name[11] = (char)0; mode.name[12] = (char)0; mode.name[13] = (char)0; mode.name[14] = (char)0; mode.name[15] = (char)0; mode.name[16] = (char)0; mode.name[17] = (char)0; mode.name[18] = (char)0; mode.name[19] = (char)0; mode.name[20] = (char)0; mode.name[21] = (char)0; mode.name[22] = (char)0; mode.name[23] = (char)0; mode.name[24] = (char)0; mode.name[25] = (char)0; mode.name[26] = (char)0; mode.name[27] = (char)0; mode.name[28] = (char)0; mode.name[29] = (char)0; mode.name[30] = (char)0; mode.name[31] = (char)0; mode.status = 0; mode.type = 0U; mode.clock = 0; mode.hdisplay = 0; mode.hsync_start = 0; mode.hsync_end = 0; mode.htotal = 0; mode.hskew = 0; mode.vdisplay = 0; mode.vsync_start = 0; mode.vsync_end = 0; mode.vtotal = 0; mode.vscan = 0; mode.flags = 0U; mode.width_mm = 0; mode.height_mm = 0; mode.crtc_clock = 0; mode.crtc_hdisplay = 0; mode.crtc_hblank_start = 0; mode.crtc_hblank_end = 0; mode.crtc_hsync_start = 0; mode.crtc_hsync_end = 0; mode.crtc_htotal = 0; mode.crtc_hskew = 0; mode.crtc_vdisplay = 0; mode.crtc_vblank_start = 0; mode.crtc_vblank_end = 0; mode.crtc_vsync_start = 0; mode.crtc_vsync_end = 0; mode.crtc_vtotal = 0; mode.private = 0; mode.private_flags = 0; mode.vrefresh = 0; mode.hsync = 0; mode.picture_aspect_ratio = 0; mode.hdisplay = (int )dtd->part1.h_active; mode.hdisplay = mode.hdisplay + ((((int )((unsigned char )dtd->part1.h_high) >> 4) & 15) << 8); mode.hsync_start = mode.hdisplay + (int )dtd->part2.h_sync_off; mode.hsync_start = mode.hsync_start + (((int )dtd->part2.sync_off_width_high & 192) << 2); mode.hsync_end = mode.hsync_start + (int )dtd->part2.h_sync_width; mode.hsync_end = mode.hsync_end + (((int )dtd->part2.sync_off_width_high & 48) << 4); mode.htotal = mode.hdisplay + (int )dtd->part1.h_blank; mode.htotal = mode.htotal + (((int )dtd->part1.h_high & 15) << 8); mode.vdisplay = (int )dtd->part1.v_active; mode.vdisplay = mode.vdisplay + ((((int )((unsigned char )dtd->part1.v_high) >> 4) & 15) << 8); mode.vsync_start = mode.vdisplay; mode.vsync_start = mode.vsync_start + (((int )((unsigned char )dtd->part2.v_sync_off_width) >> 4) & 15); mode.vsync_start = mode.vsync_start + (((int )dtd->part2.sync_off_width_high & 12) << 2); mode.vsync_start = mode.vsync_start + ((int )dtd->part2.v_sync_off_high & 192); mode.vsync_end = mode.vsync_start + ((int )dtd->part2.v_sync_off_width & 15); mode.vsync_end = mode.vsync_end + (((int )dtd->part2.sync_off_width_high & 3) << 4); mode.vtotal = mode.vdisplay + (int )dtd->part1.v_blank; mode.vtotal = mode.vtotal + (((int )dtd->part1.v_high & 15) << 8); mode.clock = (int )dtd->part1.clock * 10; if ((int )((signed char )dtd->part2.dtd_flags) < 0) { mode.flags = mode.flags | 16U; } else { } if (((int )dtd->part2.dtd_flags & 2) != 0) { mode.flags = mode.flags | 1U; } else { mode.flags = mode.flags | 2U; } if (((int )dtd->part2.dtd_flags & 4) != 0) { mode.flags = mode.flags | 4U; } else { mode.flags = mode.flags | 8U; } drm_mode_set_crtcinfo(& mode, 0); drm_mode_copy(pmode, (struct drm_display_mode const *)(& mode)); return; } } static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo ) { struct intel_sdvo_encode encode ; bool tmp ; { tmp = intel_sdvo_get_value(intel_sdvo, 157, (void *)(& encode), 2); return (tmp); } } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo , uint8_t mode ) { bool tmp ; { tmp = intel_sdvo_set_value(intel_sdvo, 159, (void const *)(& mode), 1); return (tmp); } } static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo , uint8_t mode ) { bool tmp ; { tmp = intel_sdvo_set_value(intel_sdvo, 142, (void const *)(& mode), 1); return (tmp); } } static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo , unsigned int if_index , uint8_t tx_rate , uint8_t const *data , unsigned int length ) { uint8_t set_buf_index[2U] ; uint8_t hbuf_size ; uint8_t tmp[8U] ; int i ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; unsigned int __min1 ; unsigned int __min2 ; bool tmp___5 ; int tmp___6 ; bool tmp___7 ; { set_buf_index[0] = (unsigned char )if_index; set_buf_index[1] = 0U; tmp___0 = intel_sdvo_set_value(intel_sdvo, 147, (void const *)(& set_buf_index), 2); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = intel_sdvo_get_value(intel_sdvo, 149, (void *)(& hbuf_size), 1); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } hbuf_size = (uint8_t )((int )hbuf_size + 1); tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_sdvo_write_infoframe", "writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", if_index, length, (int )hbuf_size); } else { } i = 0; goto ldv_48882; ldv_48881: memset((void *)(& tmp), 0, 8UL); if ((unsigned int )i < length) { __min1 = 8U; __min2 = length - (unsigned int )i; memcpy((void *)(& tmp), (void const *)data + (unsigned long )i, (size_t )(__min1 < __min2 ? __min1 : __min2)); } else { } tmp___5 = intel_sdvo_set_value(intel_sdvo, 152, (void const *)(& tmp), 8); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { return (0); } else { } i = i + 8; ldv_48882: ; if ((int )hbuf_size > i) { goto ldv_48881; } else { } tmp___7 = intel_sdvo_set_value(intel_sdvo, 154, (void const *)(& tx_rate), 1); return (tmp___7); } } static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo , struct drm_display_mode const *adjusted_mode ) { uint8_t sdvo_data[17U] ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; union hdmi_infoframe frame ; int ret ; ssize_t len ; bool tmp ; { crtc = intel_sdvo->base.base.crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; ret = drm_hdmi_avi_infoframe_from_display_mode(& frame.avi, adjusted_mode); if (ret < 0) { drm_err("couldn\'t fill AVI infoframe\n"); return (0); } else { } if ((int )intel_sdvo->rgb_quant_range_selectable) { if ((int )(intel_crtc->config)->limited_color_range) { frame.avi.quantization_range = 1; } else { frame.avi.quantization_range = 2; } } else { } len = hdmi_infoframe_pack(& frame, (void *)(& sdvo_data), 17UL); if (len < 0L) { return (0); } else { } tmp = intel_sdvo_write_infoframe(intel_sdvo, 1U, 192, (uint8_t const *)(& sdvo_data), 17U); return (tmp); } } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo ) { struct intel_sdvo_tv_format format ; uint32_t format_map ; unsigned long _min1 ; unsigned long _min2 ; bool tmp ; { format_map = (uint32_t )(1 << intel_sdvo->tv_format_index); memset((void *)(& format), 0, 6UL); _min1 = 6UL; _min2 = 4UL; memcpy((void *)(& format), (void const *)(& format_map), _min1 < _min2 ? _min1 : _min2); tmp = intel_sdvo_set_value(intel_sdvo, 41, (void const *)(& format), 6); return (tmp); } } static bool intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo , struct drm_display_mode const *mode ) { struct intel_sdvo_dtd output_dtd ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { tmp = intel_sdvo_set_target_output(intel_sdvo, (int )intel_sdvo->attached_output); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } intel_sdvo_get_dtd_from_mode(& output_dtd, mode); tmp___1 = intel_sdvo_set_output_timing(intel_sdvo, & output_dtd); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } return (1); } } static bool intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo , struct drm_display_mode const *mode , struct drm_display_mode *adjusted_mode ) { struct intel_sdvo_dtd input_dtd ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; { tmp = intel_sdvo_set_target_input(intel_sdvo); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = intel_sdvo_create_preferred_input_timing(intel_sdvo, (int )((uint16_t )((int )mode->clock / 10)), (int )((uint16_t )mode->hdisplay), (int )((uint16_t )mode->vdisplay)); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } tmp___3 = intel_sdvo_get_preferred_input_timing(intel_sdvo, & input_dtd); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return (0); } else { } intel_sdvo_get_mode_from_dtd(adjusted_mode, (struct intel_sdvo_dtd const *)(& input_dtd)); intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags; return (1); } } static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config ) { unsigned int dotclock ; struct dpll *clock ; int __ret_warn_on ; long tmp ; { dotclock = (unsigned int )pipe_config->port_clock; clock = & pipe_config->dpll; if (dotclock > 99999U && dotclock <= 140499U) { clock->p1 = 2; clock->p2 = 10; clock->n = 3; clock->m1 = 16; clock->m2 = 8; } else if (dotclock > 140499U && dotclock <= 200000U) { clock->p1 = 1; clock->p2 = 10; clock->n = 6; clock->m1 = 12; clock->m2 = 8; } else { __ret_warn_on = 1; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sdvo.c", 1117, "SDVO TV clock out of range: %i\n", dotclock); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } pipe_config->clock_set = 1; return; } } static bool intel_sdvo_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; struct drm_display_mode *adjusted_mode ; struct drm_display_mode *mode ; long tmp___0 ; struct drm_i915_private *__p ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; int tmp___5 ; u8 tmp___6 ; { tmp = to_sdvo(encoder); intel_sdvo = tmp; adjusted_mode = & pipe_config->base.adjusted_mode; mode = & pipe_config->base.mode; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_sdvo_compute_config", "forcing bpc to 8 for SDVO\n"); } else { } pipe_config->pipe_bpp = 24; __p = to_i915((struct drm_device const *)encoder->base.dev); if ((unsigned int )__p->pch_type != 0U) { pipe_config->has_pch_encoder = 1; } else { } if ((int )intel_sdvo->is_tv) { tmp___1 = intel_sdvo_set_output_timings_from_mode(intel_sdvo, (struct drm_display_mode const *)mode); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } intel_sdvo_get_preferred_input_mode(intel_sdvo, (struct drm_display_mode const *)mode, adjusted_mode); pipe_config->sdvo_tv_clock = 1; } else if ((int )intel_sdvo->is_lvds) { tmp___3 = intel_sdvo_set_output_timings_from_mode(intel_sdvo, (struct drm_display_mode const *)intel_sdvo->sdvo_lvds_fixed_mode); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return (0); } else { } intel_sdvo_get_preferred_input_mode(intel_sdvo, (struct drm_display_mode const *)mode, adjusted_mode); } else { } tmp___5 = intel_sdvo_get_pixel_multiplier(adjusted_mode); pipe_config->pixel_multiplier = (unsigned int )tmp___5; pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor; if ((int )intel_sdvo->color_range_auto) { if ((int )pipe_config->has_hdmi_sink) { tmp___6 = drm_match_cea_mode((struct drm_display_mode const *)adjusted_mode); if ((unsigned int )tmp___6 > 1U) { pipe_config->limited_color_range = 1; } else { } } else { } } else if ((int )pipe_config->has_hdmi_sink && intel_sdvo->color_range == 256U) { pipe_config->limited_color_range = 1; } else { } if ((int )intel_sdvo->is_tv) { i9xx_adjust_sdvo_tv_clock(pipe_config); } else { } return (1); } } static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; struct drm_display_mode *adjusted_mode ; struct drm_display_mode *mode ; struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; u32 sdvox ; struct intel_sdvo_in_out_map in_out ; struct intel_sdvo_dtd input_dtd ; struct intel_sdvo_dtd output_dtd ; int rate ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; bool tmp___4 ; int tmp___5 ; bool tmp___6 ; int tmp___7 ; bool tmp___8 ; int tmp___9 ; int __ret_warn_on ; long tmp___10 ; bool tmp___11 ; int tmp___12 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; { dev = intel_encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)intel_encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; adjusted_mode = & (crtc->config)->base.adjusted_mode; mode = & (crtc->config)->base.mode; tmp = to_sdvo(intel_encoder); intel_sdvo = tmp; if ((unsigned long )mode == (unsigned long )((struct drm_display_mode *)0)) { return; } else { } in_out.in0 = intel_sdvo->attached_output; in_out.in1 = 0U; intel_sdvo_set_value(intel_sdvo, 7, (void const *)(& in_out), 4); tmp___0 = intel_sdvo_set_target_output(intel_sdvo, (int )intel_sdvo->attached_output); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return; } else { } if ((int )intel_sdvo->is_lvds) { intel_sdvo_get_dtd_from_mode(& output_dtd, (struct drm_display_mode const *)intel_sdvo->sdvo_lvds_fixed_mode); } else { intel_sdvo_get_dtd_from_mode(& output_dtd, (struct drm_display_mode const *)mode); } tmp___2 = intel_sdvo_set_output_timing(intel_sdvo, & output_dtd); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { printk("\016[drm] Setting output timings on %s failed\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC"); } else { } tmp___4 = intel_sdvo_set_target_input(intel_sdvo); if (tmp___4) { tmp___5 = 0; } else { tmp___5 = 1; } if (tmp___5) { return; } else { } if ((int )(crtc->config)->has_hdmi_sink) { intel_sdvo_set_encode(intel_sdvo, 1); intel_sdvo_set_colorimetry(intel_sdvo, 0); intel_sdvo_set_avi_infoframe(intel_sdvo, (struct drm_display_mode const *)adjusted_mode); } else { intel_sdvo_set_encode(intel_sdvo, 0); } if ((int )intel_sdvo->is_tv) { tmp___6 = intel_sdvo_set_tv_format(intel_sdvo); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } if (tmp___7) { return; } else { } } else { } intel_sdvo_get_dtd_from_mode(& input_dtd, (struct drm_display_mode const *)adjusted_mode); if ((int )intel_sdvo->is_tv || (int )intel_sdvo->is_lvds) { input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; } else { } tmp___8 = intel_sdvo_set_input_timing(intel_sdvo, & input_dtd); if (tmp___8) { tmp___9 = 0; } else { tmp___9 = 1; } if (tmp___9) { printk("\016[drm] Setting input timings on %s failed\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC"); } else { } switch ((crtc->config)->pixel_multiplier) { default: __ret_warn_on = 1; tmp___10 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___10 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sdvo.c", 1258, "unknown pixel multiplier specified\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); case 1U: rate = 1; goto ldv_48956; case 2U: rate = 2; goto ldv_48956; case 4U: rate = 8; goto ldv_48956; } ldv_48956: tmp___11 = intel_sdvo_set_clock_rate_mult(intel_sdvo, (int )((u8 )rate)); if (tmp___11) { tmp___12 = 0; } else { tmp___12 = 1; } if (tmp___12) { return; } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 3U) { sdvox = 24U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 0U && (int )(crtc->config)->limited_color_range) { sdvox = sdvox | 256U; } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) <= 4U) { sdvox = sdvox | 128U; } else { } } else { sdvox = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_sdvo->sdvo_reg, 1); switch (intel_sdvo->sdvo_reg) { case 397632U: sdvox = sdvox & 67321856U; goto ldv_48978; case 397664U: sdvox = sdvox & 67239936U; goto ldv_48978; } ldv_48978: sdvox = sdvox | 4718720U; } __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___2->pch_type > 1U) { sdvox = (u32 )((int )crtc->pipe << 29) | sdvox; } else { sdvox = (u32 )((int )crtc->pipe << 30) | sdvox; } if ((int )intel_sdvo->has_hdmi_audio) { sdvox = sdvox | 64U; } else { } __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) > 3U) { } else { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___3->info.device_id) == 10098U) { } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 44UL) != 0U) { } else { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___5 + 44UL) != 0U) { } else { sdvox = (((crtc->config)->pixel_multiplier - 1U) << 23) | sdvox; } } } } if ((int )((signed char )input_dtd.part2.sdvo_flags) < 0) { __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___7->info.gen) <= 4U) { sdvox = sdvox | 536870912U; } else { } } else { } intel_sdvo_write_sdvox(intel_sdvo, sdvox); return; } } static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector ) { struct intel_sdvo_connector *intel_sdvo_connector ; struct intel_sdvo_connector *tmp ; struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp___0 ; u16 active_outputs ; { tmp = to_intel_sdvo_connector(& connector->base); intel_sdvo_connector = tmp; tmp___0 = intel_attached_sdvo(& connector->base); intel_sdvo = tmp___0; active_outputs = 0U; intel_sdvo_get_active_outputs(intel_sdvo, & active_outputs); if ((unsigned int )((int )intel_sdvo_connector->output_flag & (int )active_outputs) != 0U) { return (1); } else { return (0); } } } static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; u16 active_outputs ; u32 tmp___0 ; struct drm_i915_private *__p ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = to_sdvo(encoder); intel_sdvo = tmp; active_outputs = 0U; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_sdvo->sdvo_reg, 1); intel_sdvo_get_active_outputs(intel_sdvo, & active_outputs); if ((int )tmp___0 >= 0 && (unsigned int )active_outputs == 0U) { return (0); } else { } __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type == 2U) { *pipe = (enum pipe )((tmp___0 & 1610612736U) >> 29); } else { *pipe = (enum pipe )((tmp___0 & 1073741824U) >> 30); } return (1); } } static void intel_sdvo_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; struct intel_sdvo_dtd dtd ; int encoder_pixel_multiplier ; int dotclock ; u32 flags ; u32 sdvox ; u8 val ; bool ret ; long tmp___0 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; bool tmp___1 ; bool tmp___2 ; int __ret_warn_on ; long tmp___3 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = to_sdvo(encoder); intel_sdvo = tmp; encoder_pixel_multiplier = 0; flags = 0U; sdvox = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_sdvo->sdvo_reg, 1); ret = intel_sdvo_get_input_timing(intel_sdvo, & dtd); if (! ret) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_sdvo_get_config", "failed to retrieve SDVO DTD\n"); } else { } pipe_config->quirks = pipe_config->quirks | 1UL; } else { if (((int )dtd.part2.dtd_flags & 2) != 0) { flags = flags | 1U; } else { flags = flags | 2U; } if (((int )dtd.part2.dtd_flags & 4) != 0) { flags = flags | 4U; } else { flags = flags | 8U; } } pipe_config->base.adjusted_mode.flags = pipe_config->base.adjusted_mode.flags | flags; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 44UL) != 0U) { pipe_config->pixel_multiplier = ((sdvox & 58720256U) >> 23) + 1U; } else { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 9618U) { pipe_config->pixel_multiplier = ((sdvox & 58720256U) >> 23) + 1U; } else { } } dotclock = pipe_config->port_clock; if (pipe_config->pixel_multiplier != 0U) { dotclock = (int )((unsigned int )dotclock / pipe_config->pixel_multiplier); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )__p___1->pch_type != 0U) { ironlake_check_encoder_dotclock((struct intel_crtc_state const *)pipe_config, dotclock); } else { } pipe_config->base.adjusted_mode.crtc_clock = dotclock; tmp___1 = intel_sdvo_get_value(intel_sdvo, 32, (void *)(& val), 1); if ((int )tmp___1) { switch ((int )val) { case 1: encoder_pixel_multiplier = 1; goto ldv_49071; case 2: encoder_pixel_multiplier = 2; goto ldv_49071; case 8: encoder_pixel_multiplier = 4; goto ldv_49071; } ldv_49071: ; } else { } if ((sdvox & 256U) != 0U) { pipe_config->limited_color_range = 1; } else { } tmp___2 = intel_sdvo_get_value(intel_sdvo, 158, (void *)(& val), 1); if ((int )tmp___2) { if ((unsigned int )val == 1U) { pipe_config->has_hdmi_sink = 1; } else { } } else { } __ret_warn_on = (unsigned int )encoder_pixel_multiplier != pipe_config->pixel_multiplier; tmp___3 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___3 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/intel_sdvo.c", 1433, "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", pipe_config->pixel_multiplier, encoder_pixel_multiplier); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } static void intel_disable_sdvo(struct intel_encoder *encoder ) { struct drm_i915_private *dev_priv ; struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; struct intel_crtc *crtc ; struct drm_crtc const *__mptr ; u32 temp ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)(encoder->base.dev)->dev_private; tmp = to_sdvo(encoder); intel_sdvo = tmp; __mptr = (struct drm_crtc const *)encoder->base.crtc; crtc = (struct intel_crtc *)__mptr; intel_sdvo_set_active_outputs(intel_sdvo, 0); temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_sdvo->sdvo_reg, 1); temp = temp & 2147483647U; intel_sdvo_write_sdvox(intel_sdvo, temp); __p = dev_priv; if ((unsigned int )__p->pch_type == 1U && (int )crtc->pipe == 1) { temp = temp & 3221225471U; temp = temp | 2147483648U; intel_sdvo_write_sdvox(intel_sdvo, temp); temp = temp & 2147483647U; intel_sdvo_write_sdvox(intel_sdvo, temp); } else { } return; } } static void pch_disable_sdvo(struct intel_encoder *encoder ) { { return; } } static void pch_post_disable_sdvo(struct intel_encoder *encoder ) { { intel_disable_sdvo(encoder); return; } } static void intel_enable_sdvo(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; u32 temp ; bool input1 ; bool input2 ; int i ; bool success ; long tmp___0 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = to_sdvo(encoder); intel_sdvo = tmp; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; temp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )intel_sdvo->sdvo_reg, 1); temp = temp | 2147483648U; intel_sdvo_write_sdvox(intel_sdvo, temp); i = 0; goto ldv_49112; ldv_49111: intel_wait_for_vblank(dev, (int )intel_crtc->pipe); i = i + 1; ldv_49112: ; if (i <= 1) { goto ldv_49111; } else { } success = intel_sdvo_get_trained_inputs(intel_sdvo, & input1, & input2); if ((int )success && ! input1) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_enable_sdvo", "First %s output reported failure to sync\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC"); } else { } } else { } intel_sdvo_set_active_outputs(intel_sdvo, (int )intel_sdvo->attached_output); return; } } static void intel_sdvo_dpms(struct drm_connector *connector , int mode ) { struct drm_crtc *crtc ; struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; { tmp = intel_attached_sdvo(connector); intel_sdvo = tmp; if (mode != 0) { mode = 3; } else { } if (connector->dpms == mode) { return; } else { } connector->dpms = mode; crtc = intel_sdvo->base.base.crtc; if ((unsigned long )crtc == (unsigned long )((struct drm_crtc *)0)) { intel_sdvo->base.connectors_active = 0; return; } else { } if (mode != 0) { intel_sdvo_set_active_outputs(intel_sdvo, 0); intel_sdvo->base.connectors_active = 0; intel_crtc_update_dpms(crtc); } else { intel_sdvo->base.connectors_active = 1; intel_crtc_update_dpms(crtc); intel_sdvo_set_active_outputs(intel_sdvo, (int )intel_sdvo->attached_output); } intel_modeset_check_state(connector->dev); return; } } static enum drm_mode_status intel_sdvo_mode_valid(struct drm_connector *connector , struct drm_display_mode *mode ) { struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; { tmp = intel_attached_sdvo(connector); intel_sdvo = tmp; if ((mode->flags & 32U) != 0U) { return (8); } else { } if (intel_sdvo->pixel_clock_min > mode->clock) { return (16); } else { } if (intel_sdvo->pixel_clock_max < mode->clock) { return (15); } else { } if ((int )intel_sdvo->is_lvds) { if (mode->hdisplay > (intel_sdvo->sdvo_lvds_fixed_mode)->hdisplay) { return (29); } else { } if (mode->vdisplay > (intel_sdvo->sdvo_lvds_fixed_mode)->vdisplay) { return (29); } else { } } else { } return (0); } } static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo , struct intel_sdvo_caps *caps ) { bool tmp ; int tmp___0 ; long tmp___1 ; { tmp = intel_sdvo_get_value(intel_sdvo, 2, (void *)caps, 8); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_sdvo_get_capabilities", "SDVO capabilities:\n vendor_id: %d\n device_id: %d\n device_rev_id: %d\n sdvo_version_major: %d\n sdvo_version_minor: %d\n sdvo_inputs_mask: %d\n smooth_scaling: %d\n sharp_scaling: %d\n up_scaling: %d\n down_scaling: %d\n stall_support: %d\n output_flags: %d\n", (int )caps->vendor_id, (int )caps->device_id, (int )caps->device_rev_id, (int )caps->sdvo_version_major, (int )caps->sdvo_version_minor, (int )caps->sdvo_inputs_mask, (int )caps->smooth_scaling, (int )caps->sharp_scaling, (int )caps->up_scaling, (int )caps->down_scaling, (int )caps->stall_support, (int )caps->output_flags); } else { } return (1); } } static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo ) { struct drm_device *dev ; uint16_t hotplug ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; bool tmp ; int tmp___0 ; { dev = intel_sdvo->base.base.dev; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 46UL) == 0U) { return (0U); } else { } __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___0->info.device_id) == 10098U) { return (0U); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { return (0U); } else { } } tmp = intel_sdvo_get_value(intel_sdvo, 12, (void *)(& hotplug), 2); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0U); } else { } return (hotplug); } } static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder ) { struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; { tmp = to_sdvo(encoder); intel_sdvo = tmp; intel_sdvo_write_cmd(intel_sdvo, 13, (void const *)(& intel_sdvo->hotplug_active), 2); return; } } static bool intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo ) { unsigned int tmp ; { tmp = __arch_hweight16((unsigned int )intel_sdvo->caps.output_flags); return (tmp > 1U); } } static struct edid *intel_sdvo_get_edid(struct drm_connector *connector ) { struct intel_sdvo *sdvo ; struct intel_sdvo *tmp ; struct edid *tmp___0 ; { tmp = intel_attached_sdvo(connector); sdvo = tmp; tmp___0 = drm_get_edid(connector, & sdvo->ddc); return (tmp___0); } } static struct edid *intel_sdvo_get_analog_edid(struct drm_connector *connector ) { struct drm_i915_private *dev_priv ; struct i2c_adapter *tmp ; struct edid *tmp___0 ; { dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; tmp = intel_gmbus_get_adapter(dev_priv, (unsigned int )dev_priv->vbt.crt_ddc_pin); tmp___0 = drm_get_edid(connector, tmp); return (tmp___0); } } static enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *connector ) { struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; enum drm_connector_status status ; struct edid *edid ; u8 ddc ; u8 saved_ddc ; bool tmp___0 ; struct intel_sdvo_connector *intel_sdvo_connector ; struct intel_sdvo_connector *tmp___1 ; { tmp = intel_attached_sdvo(connector); intel_sdvo = tmp; edid = intel_sdvo_get_edid(connector); if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { tmp___0 = intel_sdvo_multifunc_encoder(intel_sdvo); if ((int )tmp___0) { saved_ddc = intel_sdvo->ddc_bus; ddc = (u8 )((int )intel_sdvo->ddc_bus >> 1); goto ldv_49179; ldv_49178: intel_sdvo->ddc_bus = ddc; edid = intel_sdvo_get_edid(connector); if ((unsigned long )edid != (unsigned long )((struct edid *)0)) { goto ldv_49177; } else { } ddc = (u8 )((int )ddc >> 1); ldv_49179: ; if ((unsigned int )ddc > 1U) { goto ldv_49178; } else { } ldv_49177: ; if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { intel_sdvo->ddc_bus = saved_ddc; } else { } } else { } } else { } if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { edid = intel_sdvo_get_analog_edid(connector); } else { } status = 3; if ((unsigned long )edid != (unsigned long )((struct edid *)0)) { if ((int )((signed char )edid->input) < 0) { status = 1; if ((int )intel_sdvo->is_hdmi) { intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); intel_sdvo->rgb_quant_range_selectable = drm_rgb_quant_range_selectable(edid); } else { } } else { status = 2; } kfree((void const *)edid); } else { } if ((unsigned int )status == 1U) { tmp___1 = to_intel_sdvo_connector(connector); intel_sdvo_connector = tmp___1; if ((int )intel_sdvo_connector->force_audio != 0) { intel_sdvo->has_hdmi_audio = (int )intel_sdvo_connector->force_audio == 1; } else { } } else { } return (status); } } static bool intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo , struct edid *edid ) { bool monitor_is_digital ; bool connector_is_digital ; long tmp ; { monitor_is_digital = (int )((signed char )edid->input) < 0; connector_is_digital = ((int )sdvo->output_flag & 16705) != 0; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_sdvo_connector_matches_edid", "connector_is_digital? %d, monitor_is_digital? %d\n", (int )connector_is_digital, (int )monitor_is_digital); } else { } return ((int )connector_is_digital == (int )monitor_is_digital); } } static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector , bool force ) { uint16_t response ; struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; struct intel_sdvo_connector *intel_sdvo_connector ; struct intel_sdvo_connector *tmp___0 ; enum drm_connector_status ret ; long tmp___1 ; bool tmp___2 ; int tmp___3 ; long tmp___4 ; struct edid *edid ; bool tmp___5 ; { tmp = intel_attached_sdvo(connector); intel_sdvo = tmp; tmp___0 = to_intel_sdvo_connector(connector); intel_sdvo_connector = tmp___0; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_sdvo_detect", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } tmp___2 = intel_sdvo_get_value(intel_sdvo, 11, (void *)(& response), 2); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (3); } else { } tmp___4 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_sdvo_detect", "SDVO response %d %d [%x]\n", (int )response & 255, (int )response >> 8, (int )intel_sdvo_connector->output_flag); } else { } if ((unsigned int )response == 0U) { return (2); } else { } intel_sdvo->attached_output = response; intel_sdvo->has_hdmi_monitor = 0; intel_sdvo->has_hdmi_audio = 0; intel_sdvo->rgb_quant_range_selectable = 0; if ((unsigned int )((int )intel_sdvo_connector->output_flag & (int )response) == 0U) { ret = 2; } else if (((int )intel_sdvo_connector->output_flag & 257) != 0) { ret = intel_sdvo_tmds_sink_detect(connector); } else { edid = intel_sdvo_get_edid(connector); if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { edid = intel_sdvo_get_analog_edid(connector); } else { } if ((unsigned long )edid != (unsigned long )((struct edid *)0)) { tmp___5 = intel_sdvo_connector_matches_edid(intel_sdvo_connector, edid); if ((int )tmp___5) { ret = 1; } else { ret = 2; } kfree((void const *)edid); } else { ret = 1; } } if ((unsigned int )ret == 1U) { intel_sdvo->is_tv = 0; intel_sdvo->is_lvds = 0; if (((int )response & 28) != 0) { intel_sdvo->is_tv = 1; } else { } if (((int )response & 16448) != 0) { intel_sdvo->is_lvds = (unsigned long )intel_sdvo->sdvo_lvds_fixed_mode != (unsigned long )((struct drm_display_mode *)0); } else { } } else { } return (ret); } } static void intel_sdvo_get_ddc_modes(struct drm_connector *connector ) { struct edid *edid ; long tmp ; struct intel_sdvo_connector *tmp___0 ; bool tmp___1 ; { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_sdvo_get_ddc_modes", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } edid = intel_sdvo_get_edid(connector); if ((unsigned long )edid == (unsigned long )((struct edid *)0)) { edid = intel_sdvo_get_analog_edid(connector); } else { } if ((unsigned long )edid != (unsigned long )((struct edid *)0)) { tmp___0 = to_intel_sdvo_connector(connector); tmp___1 = intel_sdvo_connector_matches_edid(tmp___0, edid); if ((int )tmp___1) { drm_mode_connector_update_edid_property(connector, (struct edid const *)edid); drm_add_edid_modes(connector, edid); } else { } kfree((void const *)edid); } else { } return; } } static struct drm_display_mode const sdvo_tv_modes[19U] = { {{0, 0}, {0U, 3739147998U, 0}, {'3', '2', '0', 'x', '2', '0', '0', '\000'}, 0, 64U, 5815, 320, 321, 384, 416, 0, 200, 201, 232, 233, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'3', '2', '0', 'x', '2', '4', '0', '\000'}, 0, 64U, 6814, 320, 321, 384, 416, 0, 240, 241, 272, 273, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'4', '0', '0', 'x', '3', '0', '0', '\000'}, 0, 64U, 9910, 400, 401, 464, 496, 0, 300, 301, 332, 333, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'6', '4', '0', 'x', '3', '5', '0', '\000'}, 0, 64U, 16913, 640, 641, 704, 736, 0, 350, 351, 382, 383, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'6', '4', '0', 'x', '4', '0', '0', '\000'}, 0, 64U, 19121, 640, 641, 704, 736, 0, 400, 401, 432, 433, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'6', '4', '0', 'x', '4', '8', '0', '\000'}, 0, 64U, 22654, 640, 641, 704, 736, 0, 480, 481, 512, 513, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'7', '0', '4', 'x', '4', '8', '0', '\000'}, 0, 64U, 24624, 704, 705, 768, 800, 0, 480, 481, 512, 513, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'7', '0', '4', 'x', '5', '7', '6', '\000'}, 0, 64U, 29232, 704, 705, 768, 800, 0, 576, 577, 608, 609, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'7', '2', '0', 'x', '3', '5', '0', '\000'}, 0, 64U, 18751, 720, 721, 784, 816, 0, 350, 351, 382, 383, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'7', '2', '0', 'x', '4', '0', '0', '\000'}, 0, 64U, 21199, 720, 721, 784, 816, 0, 400, 401, 432, 433, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'7', '2', '0', 'x', '4', '8', '0', '\000'}, 0, 64U, 25116, 720, 721, 784, 816, 0, 480, 481, 512, 513, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'7', '2', '0', 'x', '5', '4', '0', '\000'}, 0, 64U, 28054, 720, 721, 784, 816, 0, 540, 541, 572, 573, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'7', '2', '0', 'x', '5', '7', '6', '\000'}, 0, 64U, 29816, 720, 721, 784, 816, 0, 576, 577, 608, 609, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'7', '6', '8', 'x', '5', '7', '6', '\000'}, 0, 64U, 31570, 768, 769, 832, 864, 0, 576, 577, 608, 609, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'8', '0', '0', 'x', '6', '0', '0', '\000'}, 0, 64U, 34030, 800, 801, 864, 896, 0, 600, 601, 632, 633, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'8', '3', '2', 'x', '6', '2', '4', '\000'}, 0, 64U, 36581, 832, 833, 896, 928, 0, 624, 625, 656, 657, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'9', '2', '0', 'x', '7', '6', '6', '\000'}, 0, 64U, 48707, 920, 921, 984, 1016, 0, 766, 767, 798, 799, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'1', '0', '2', '4', 'x', '7', '6', '8', '\000'}, 0, 64U, 53827, 1024, 1025, 1088, 1120, 0, 768, 769, 800, 801, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{0, 0}, {0U, 3739147998U, 0}, {'1', '2', '8', '0', 'x', '1', '0', '2', '4', '\000'}, 0, 64U, 87265, 1280, 1281, 1344, 1376, 0, 1024, 1025, 1056, 1057, 0, 5U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}; static void intel_sdvo_get_tv_modes(struct drm_connector *connector ) { struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; struct intel_sdvo_sdtv_resolution_request tv_res ; uint32_t reply ; uint32_t format_map ; int i ; long tmp___0 ; unsigned long _min1 ; unsigned long _min2 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; bool tmp___5 ; int tmp___6 ; struct drm_display_mode *nmode ; { tmp = intel_attached_sdvo(connector); intel_sdvo = tmp; reply = 0U; format_map = 0U; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_sdvo_get_tv_modes", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } format_map = (uint32_t )(1 << intel_sdvo->tv_format_index); _min1 = 4UL; _min2 = 3UL; memcpy((void *)(& tv_res), (void const *)(& format_map), _min1 < _min2 ? _min1 : _min2); tmp___1 = intel_sdvo_set_target_output(intel_sdvo, (int )intel_sdvo->attached_output); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return; } else { } tmp___3 = intel_sdvo_write_cmd(intel_sdvo, 131, (void const *)(& tv_res), 3); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return; } else { } tmp___5 = intel_sdvo_read_response(intel_sdvo, (void *)(& reply), 3); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { return; } else { } i = 0; goto ldv_49220; ldv_49219: ; if (((uint32_t )(1 << i) & reply) != 0U) { nmode = drm_mode_duplicate(connector->dev, (struct drm_display_mode const *)(& sdvo_tv_modes) + (unsigned long )i); if ((unsigned long )nmode != (unsigned long )((struct drm_display_mode *)0)) { drm_mode_probed_add(connector, nmode); } else { } } else { } i = i + 1; ldv_49220: ; if ((unsigned int )i <= 18U) { goto ldv_49219; } else { } return; } } static void intel_sdvo_get_lvds_modes(struct drm_connector *connector ) { struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; struct drm_i915_private *dev_priv ; struct drm_display_mode *newmode ; long tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = intel_attached_sdvo(connector); intel_sdvo = tmp; dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_sdvo_get_lvds_modes", "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } else { } if ((unsigned long )dev_priv->vbt.sdvo_lvds_vbt_mode != (unsigned long )((struct drm_display_mode *)0)) { newmode = drm_mode_duplicate(connector->dev, (struct drm_display_mode const *)dev_priv->vbt.sdvo_lvds_vbt_mode); if ((unsigned long )newmode != (unsigned long )((struct drm_display_mode *)0)) { newmode->type = 72U; drm_mode_probed_add(connector, newmode); } else { } } else { } intel_ddc_get_modes(connector, & intel_sdvo->ddc); __mptr = (struct list_head const *)connector->probed_modes.next; newmode = (struct drm_display_mode *)__mptr; goto ldv_49235; ldv_49234: ; if ((newmode->type & 8U) != 0U) { intel_sdvo->sdvo_lvds_fixed_mode = drm_mode_duplicate(connector->dev, (struct drm_display_mode const *)newmode); intel_sdvo->is_lvds = 1; goto ldv_49233; } else { } __mptr___0 = (struct list_head const *)newmode->head.next; newmode = (struct drm_display_mode *)__mptr___0; ldv_49235: ; if ((unsigned long )(& newmode->head) != (unsigned long )(& connector->probed_modes)) { goto ldv_49234; } else { } ldv_49233: ; return; } } static int intel_sdvo_get_modes(struct drm_connector *connector ) { struct intel_sdvo_connector *intel_sdvo_connector ; struct intel_sdvo_connector *tmp ; int tmp___0 ; { tmp = to_intel_sdvo_connector(connector); intel_sdvo_connector = tmp; if (((int )intel_sdvo_connector->output_flag & 28) != 0) { intel_sdvo_get_tv_modes(connector); } else if (((int )intel_sdvo_connector->output_flag & 16448) != 0) { intel_sdvo_get_lvds_modes(connector); } else { intel_sdvo_get_ddc_modes(connector); } tmp___0 = list_empty((struct list_head const *)(& connector->probed_modes)); return (tmp___0 == 0); } } static void intel_sdvo_destroy(struct drm_connector *connector ) { struct intel_sdvo_connector *intel_sdvo_connector ; struct intel_sdvo_connector *tmp ; { tmp = to_intel_sdvo_connector(connector); intel_sdvo_connector = tmp; drm_connector_cleanup(connector); kfree((void const *)intel_sdvo_connector); return; } } static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector ) { struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; struct edid *edid ; bool has_audio ; { tmp = intel_attached_sdvo(connector); intel_sdvo = tmp; has_audio = 0; if (! intel_sdvo->is_hdmi) { return (0); } else { } edid = intel_sdvo_get_edid(connector); if ((unsigned long )edid != (unsigned long )((struct edid *)0) && (int )((signed char )edid->input) < 0) { has_audio = drm_detect_monitor_audio(edid); } else { } kfree((void const *)edid); return (has_audio); } } static int intel_sdvo_set_property(struct drm_connector *connector , struct drm_property *property , uint64_t val ) { struct intel_sdvo *intel_sdvo ; struct intel_sdvo *tmp ; struct intel_sdvo_connector *intel_sdvo_connector ; struct intel_sdvo_connector *tmp___0 ; struct drm_i915_private *dev_priv ; uint16_t temp_value ; uint8_t cmd ; int ret ; int i ; bool has_audio ; bool old_auto ; uint32_t old_range ; bool tmp___1 ; int tmp___2 ; { tmp = intel_attached_sdvo(connector); intel_sdvo = tmp; tmp___0 = to_intel_sdvo_connector(connector); intel_sdvo_connector = tmp___0; dev_priv = (struct drm_i915_private *)(connector->dev)->dev_private; ret = drm_object_property_set_value(& connector->base, property, val); if (ret != 0) { return (ret); } else { } if ((unsigned long )dev_priv->force_audio_property == (unsigned long )property) { i = (int )val; if ((int )intel_sdvo_connector->force_audio == i) { return (0); } else { } intel_sdvo_connector->force_audio = (enum hdmi_force_audio )i; if (i == 0) { has_audio = intel_sdvo_detect_hdmi_audio(connector); } else { has_audio = i == 1; } if ((int )intel_sdvo->has_hdmi_audio == (int )has_audio) { return (0); } else { } intel_sdvo->has_hdmi_audio = has_audio; goto done; } else { } if ((unsigned long )dev_priv->broadcast_rgb_property == (unsigned long )property) { old_auto = intel_sdvo->color_range_auto; old_range = intel_sdvo->color_range; switch (val) { case 0ULL: intel_sdvo->color_range_auto = 1; goto ldv_49267; case 1ULL: intel_sdvo->color_range_auto = 0; intel_sdvo->color_range = 0U; goto ldv_49267; case 2ULL: intel_sdvo->color_range_auto = 0; intel_sdvo->color_range = 256U; goto ldv_49267; default: ; return (-22); } ldv_49267: ; if ((int )intel_sdvo->color_range_auto == (int )old_auto && intel_sdvo->color_range == old_range) { return (0); } else { } goto done; } else { } if ((unsigned long )intel_sdvo_connector->tv_format == (unsigned long )property) { if (val > 18ULL) { return (-22); } else { } if (intel_sdvo->tv_format_index == (int )intel_sdvo_connector->tv_format_supported[val]) { return (0); } else { } intel_sdvo->tv_format_index = (int )intel_sdvo_connector->tv_format_supported[val]; goto done; } else if (((int )intel_sdvo_connector->output_flag & 16476) != 0) { temp_value = (uint16_t )val; if ((unsigned long )intel_sdvo_connector->left == (unsigned long )property) { drm_object_property_set_value(& connector->base, intel_sdvo_connector->right, val); if (intel_sdvo_connector->left_margin == (u32 )temp_value) { return (0); } else { } intel_sdvo_connector->left_margin = (u32 )temp_value; intel_sdvo_connector->right_margin = (u32 )temp_value; temp_value = (int )((uint16_t )intel_sdvo_connector->max_hscan) - (int )((uint16_t )intel_sdvo_connector->left_margin); cmd = 99U; goto set_value; } else if ((unsigned long )intel_sdvo_connector->right == (unsigned long )property) { drm_object_property_set_value(& connector->base, intel_sdvo_connector->left, val); if (intel_sdvo_connector->right_margin == (u32 )temp_value) { return (0); } else { } intel_sdvo_connector->left_margin = (u32 )temp_value; intel_sdvo_connector->right_margin = (u32 )temp_value; temp_value = (int )((uint16_t )intel_sdvo_connector->max_hscan) - (int )((uint16_t )intel_sdvo_connector->left_margin); cmd = 99U; goto set_value; } else if ((unsigned long )intel_sdvo_connector->top == (unsigned long )property) { drm_object_property_set_value(& connector->base, intel_sdvo_connector->bottom, val); if (intel_sdvo_connector->top_margin == (u32 )temp_value) { return (0); } else { } intel_sdvo_connector->top_margin = (u32 )temp_value; intel_sdvo_connector->bottom_margin = (u32 )temp_value; temp_value = (int )((uint16_t )intel_sdvo_connector->max_vscan) - (int )((uint16_t )intel_sdvo_connector->top_margin); cmd = 102U; goto set_value; } else if ((unsigned long )intel_sdvo_connector->bottom == (unsigned long )property) { drm_object_property_set_value(& connector->base, intel_sdvo_connector->top, val); if (intel_sdvo_connector->bottom_margin == (u32 )temp_value) { return (0); } else { } intel_sdvo_connector->top_margin = (u32 )temp_value; intel_sdvo_connector->bottom_margin = (u32 )temp_value; temp_value = (int )((uint16_t )intel_sdvo_connector->max_vscan) - (int )((uint16_t )intel_sdvo_connector->top_margin); cmd = 102U; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->hpos == (unsigned long )property) { if (intel_sdvo_connector->cur_hpos == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_hpos < (u32 )temp_value) { return (-22); } else { } cmd = 105U; intel_sdvo_connector->cur_hpos = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->vpos == (unsigned long )property) { if (intel_sdvo_connector->cur_vpos == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_vpos < (u32 )temp_value) { return (-22); } else { } cmd = 108U; intel_sdvo_connector->cur_vpos = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->saturation == (unsigned long )property) { if (intel_sdvo_connector->cur_saturation == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_saturation < (u32 )temp_value) { return (-22); } else { } cmd = 87U; intel_sdvo_connector->cur_saturation = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->contrast == (unsigned long )property) { if (intel_sdvo_connector->cur_contrast == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_contrast < (u32 )temp_value) { return (-22); } else { } cmd = 96U; intel_sdvo_connector->cur_contrast = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->hue == (unsigned long )property) { if (intel_sdvo_connector->cur_hue == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_hue < (u32 )temp_value) { return (-22); } else { } cmd = 90U; intel_sdvo_connector->cur_hue = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->brightness == (unsigned long )property) { if (intel_sdvo_connector->cur_brightness == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_brightness < (u32 )temp_value) { return (-22); } else { } cmd = 93U; intel_sdvo_connector->cur_brightness = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->sharpness == (unsigned long )property) { if (intel_sdvo_connector->cur_sharpness == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_sharpness < (u32 )temp_value) { return (-22); } else { } cmd = 111U; intel_sdvo_connector->cur_sharpness = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->flicker_filter == (unsigned long )property) { if (intel_sdvo_connector->cur_flicker_filter == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_flicker_filter < (u32 )temp_value) { return (-22); } else { } cmd = 79U; intel_sdvo_connector->cur_flicker_filter = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->flicker_filter_2d == (unsigned long )property) { if (intel_sdvo_connector->cur_flicker_filter_2d == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_flicker_filter_2d < (u32 )temp_value) { return (-22); } else { } cmd = 84U; intel_sdvo_connector->cur_flicker_filter_2d = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->flicker_filter_adaptive == (unsigned long )property) { if (intel_sdvo_connector->cur_flicker_filter_adaptive == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_flicker_filter_adaptive < (u32 )temp_value) { return (-22); } else { } cmd = 81U; intel_sdvo_connector->cur_flicker_filter_adaptive = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->tv_chroma_filter == (unsigned long )property) { if (intel_sdvo_connector->cur_tv_chroma_filter == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_tv_chroma_filter < (u32 )temp_value) { return (-22); } else { } cmd = 118U; intel_sdvo_connector->cur_tv_chroma_filter = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->tv_luma_filter == (unsigned long )property) { if (intel_sdvo_connector->cur_tv_luma_filter == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_tv_luma_filter < (u32 )temp_value) { return (-22); } else { } cmd = 121U; intel_sdvo_connector->cur_tv_luma_filter = (u32 )temp_value; goto set_value; } else { } if ((unsigned long )intel_sdvo_connector->dot_crawl == (unsigned long )property) { if (intel_sdvo_connector->cur_dot_crawl == (u32 )temp_value) { return (0); } else { } if (intel_sdvo_connector->max_dot_crawl < (u32 )temp_value) { return (-22); } else { } cmd = 113U; intel_sdvo_connector->cur_dot_crawl = (u32 )temp_value; goto set_value; } else { } } else { } return (-22); set_value: tmp___1 = intel_sdvo_set_value(intel_sdvo, (int )cmd, (void const *)(& temp_value), 2); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (-5); } else { } done: ; if ((unsigned long )intel_sdvo->base.base.crtc != (unsigned long )((struct drm_crtc *)0)) { intel_crtc_restore_mode(intel_sdvo->base.base.crtc); } else { } return (0); } } static struct drm_connector_funcs const intel_sdvo_connector_funcs = {& intel_sdvo_dpms, 0, 0, 0, & intel_sdvo_detect, & drm_helper_probe_single_connector_modes, & intel_sdvo_set_property, & intel_sdvo_destroy, 0, & drm_atomic_helper_connector_duplicate_state, & drm_atomic_helper_connector_destroy_state, 0, & intel_connector_atomic_get_property}; static struct drm_connector_helper_funcs const intel_sdvo_connector_helper_funcs = {& intel_sdvo_get_modes, & intel_sdvo_mode_valid, & intel_best_encoder}; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder ) { struct intel_sdvo *intel_sdvo ; struct drm_encoder const *__mptr ; struct intel_sdvo *tmp ; { __mptr = (struct drm_encoder const *)encoder; tmp = to_sdvo((struct intel_encoder *)__mptr); intel_sdvo = tmp; if ((unsigned long )intel_sdvo->sdvo_lvds_fixed_mode != (unsigned long )((struct drm_display_mode *)0)) { drm_mode_destroy(encoder->dev, intel_sdvo->sdvo_lvds_fixed_mode); } else { } i2c_del_adapter(& intel_sdvo->ddc); intel_encoder_destroy(encoder); return; } } static struct drm_encoder_funcs const intel_sdvo_enc_funcs = {0, & intel_sdvo_enc_destroy}; static void intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo ) { uint16_t mask ; unsigned int num_bits ; { mask = 0U; switch ((int )sdvo->controlled_output) { case 16384: mask = (uint16_t )((unsigned int )mask | 16384U); case 64: mask = (uint16_t )((unsigned int )mask | 64U); case 256: mask = (uint16_t )((unsigned int )mask | 256U); case 1: mask = (uint16_t )((unsigned int )mask | 1U); case 512: mask = (uint16_t )((unsigned int )mask | 512U); case 2: mask = (uint16_t )((unsigned int )mask | 2U); goto ldv_49292; } ldv_49292: mask = (uint16_t )((int )sdvo->caps.output_flags & (int )mask); num_bits = __arch_hweight16((unsigned int )mask); if (num_bits > 3U) { num_bits = 3U; } else { } sdvo->ddc_bus = (uint8_t )(1 << (int )num_bits); return; } } static void intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv , struct intel_sdvo *sdvo , u32 reg ) { struct sdvo_device_mapping *mapping ; { if ((int )sdvo->is_sdvob) { mapping = (struct sdvo_device_mapping *)(& dev_priv->sdvo_mappings); } else { mapping = (struct sdvo_device_mapping *)(& dev_priv->sdvo_mappings) + 1UL; } if ((unsigned int )mapping->initialized != 0U) { sdvo->ddc_bus = (uint8_t )(1 << ((int )mapping->ddc_pin >> 4)); } else { intel_sdvo_guess_ddc_bus(sdvo); } return; } } static void intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv , struct intel_sdvo *sdvo , u32 reg ) { struct sdvo_device_mapping *mapping ; u8 pin ; bool tmp ; { if ((int )sdvo->is_sdvob) { mapping = (struct sdvo_device_mapping *)(& dev_priv->sdvo_mappings); } else { mapping = (struct sdvo_device_mapping *)(& dev_priv->sdvo_mappings) + 1UL; } if ((unsigned int )mapping->initialized != 0U) { tmp = intel_gmbus_is_valid_pin(dev_priv, (unsigned int )mapping->i2c_pin); if ((int )tmp) { pin = mapping->i2c_pin; } else { pin = 5U; } } else { pin = 5U; } sdvo->i2c = intel_gmbus_get_adapter(dev_priv, (unsigned int )pin); intel_gmbus_force_bit(sdvo->i2c, 1); return; } } static void intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo ) { { intel_gmbus_force_bit(sdvo->i2c, 0); return; } } static bool intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo , int device ) { bool tmp ; { tmp = intel_sdvo_check_supp_encode(intel_sdvo); return (tmp); } } static u8 intel_sdvo_get_slave_addr(struct drm_device *dev , struct intel_sdvo *sdvo ) { struct drm_i915_private *dev_priv ; struct sdvo_device_mapping *my_mapping ; struct sdvo_device_mapping *other_mapping ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if ((int )sdvo->is_sdvob) { my_mapping = (struct sdvo_device_mapping *)(& dev_priv->sdvo_mappings); other_mapping = (struct sdvo_device_mapping *)(& dev_priv->sdvo_mappings) + 1UL; } else { my_mapping = (struct sdvo_device_mapping *)(& dev_priv->sdvo_mappings) + 1UL; other_mapping = (struct sdvo_device_mapping *)(& dev_priv->sdvo_mappings); } if ((unsigned int )my_mapping->slave_addr != 0U) { return (my_mapping->slave_addr); } else { } if ((unsigned int )other_mapping->slave_addr != 0U) { if ((unsigned int )other_mapping->slave_addr == 112U) { return (114U); } else { return (112U); } } else { } if ((int )sdvo->is_sdvob) { return (112U); } else { return (114U); } } } static void intel_sdvo_connector_unregister(struct intel_connector *intel_connector ) { struct drm_connector *drm_connector ; struct intel_sdvo *sdvo_encoder ; { drm_connector = & intel_connector->base; sdvo_encoder = intel_attached_sdvo(& intel_connector->base); sysfs_remove_link(& (drm_connector->kdev)->kobj, sdvo_encoder->ddc.dev.kobj.name); intel_connector_unregister(intel_connector); return; } } static int intel_sdvo_connector_init(struct intel_sdvo_connector *connector , struct intel_sdvo *encoder ) { struct drm_connector *drm_connector ; int ret ; { drm_connector = & connector->base.base; ret = drm_connector_init(encoder->base.base.dev, drm_connector, & intel_sdvo_connector_funcs, connector->base.base.connector_type); if (ret < 0) { return (ret); } else { } drm_connector_helper_add(drm_connector, & intel_sdvo_connector_helper_funcs); connector->base.base.interlace_allowed = 1; connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = 1; connector->base.get_hw_state = & intel_sdvo_connector_get_hw_state; connector->base.unregister = & intel_sdvo_connector_unregister; intel_connector_attach_encoder(& connector->base, & encoder->base); ret = drm_connector_register(drm_connector); if (ret < 0) { goto err1; } else { } ret = sysfs_create_link(& (drm_connector->kdev)->kobj, & encoder->ddc.dev.kobj, encoder->ddc.dev.kobj.name); if (ret < 0) { goto err2; } else { } return (0); err2: drm_connector_unregister(drm_connector); err1: drm_connector_cleanup(drm_connector); return (ret); } } static void intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo , struct intel_sdvo_connector *connector ) { struct drm_device *dev ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; { dev = connector->base.base.dev; intel_attach_force_audio_property(& connector->base.base); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p->info.gen) > 3U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { intel_attach_broadcast_rgb_property(& connector->base.base); intel_sdvo->color_range_auto = 1; } else { } } else { } return; } } static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void) { struct intel_sdvo_connector *sdvo_connector ; void *tmp ; int tmp___0 ; { tmp = kzalloc(1376UL, 208U); sdvo_connector = (struct intel_sdvo_connector *)tmp; if ((unsigned long )sdvo_connector == (unsigned long )((struct intel_sdvo_connector *)0)) { return ((struct intel_sdvo_connector *)0); } else { } tmp___0 = intel_connector_init(& sdvo_connector->base); if (tmp___0 < 0) { kfree((void const *)sdvo_connector); return ((struct intel_sdvo_connector *)0); } else { } return (sdvo_connector); } } static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo , int device ) { struct drm_encoder *encoder ; struct drm_connector *connector ; struct intel_encoder *intel_encoder ; struct drm_encoder const *__mptr ; struct intel_connector *intel_connector ; struct intel_sdvo_connector *intel_sdvo_connector ; long tmp ; uint16_t tmp___0 ; bool tmp___1 ; int tmp___2 ; { encoder = & intel_sdvo->base.base; __mptr = (struct drm_encoder const *)encoder; intel_encoder = (struct intel_encoder *)__mptr; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_sdvo_dvi_init", "initialising DVI device %d\n", device); } else { } intel_sdvo_connector = intel_sdvo_connector_alloc(); if ((unsigned long )intel_sdvo_connector == (unsigned long )((struct intel_sdvo_connector *)0)) { return (0); } else { } if (device == 0) { intel_sdvo->controlled_output = (uint16_t )((unsigned int )intel_sdvo->controlled_output | 1U); intel_sdvo_connector->output_flag = 1U; } else if (device == 1) { intel_sdvo->controlled_output = (uint16_t )((unsigned int )intel_sdvo->controlled_output | 256U); intel_sdvo_connector->output_flag = 256U; } else { } intel_connector = & intel_sdvo_connector->base; connector = & intel_connector->base; tmp___0 = intel_sdvo_get_hotplug_support(intel_sdvo); if ((unsigned int )((int )tmp___0 & (int )intel_sdvo_connector->output_flag) != 0U) { intel_sdvo->hotplug_active = (uint16_t )((int )intel_sdvo->hotplug_active | (int )intel_sdvo_connector->output_flag); intel_encoder->hot_plug = & intel_sdvo_enable_hotplug; intel_sdvo_enable_hotplug(intel_encoder); } else { intel_connector->polled = 6U; } encoder->encoder_type = 2; connector->connector_type = 3; tmp___1 = intel_sdvo_is_hdmi_connector(intel_sdvo, device); if ((int )tmp___1) { connector->connector_type = 11; intel_sdvo->is_hdmi = 1; } else { } tmp___2 = intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (tmp___2 < 0) { kfree((void const *)intel_sdvo_connector); return (0); } else { } if ((int )intel_sdvo->is_hdmi) { intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); } else { } return (1); } } static bool intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo , int type ) { struct drm_encoder *encoder ; struct drm_connector *connector ; struct intel_connector *intel_connector ; struct intel_sdvo_connector *intel_sdvo_connector ; long tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; { encoder = & intel_sdvo->base.base; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_sdvo_tv_init", "initialising TV type %d\n", type); } else { } intel_sdvo_connector = intel_sdvo_connector_alloc(); if ((unsigned long )intel_sdvo_connector == (unsigned long )((struct intel_sdvo_connector *)0)) { return (0); } else { } intel_connector = & intel_sdvo_connector->base; connector = & intel_connector->base; encoder->encoder_type = 4; connector->connector_type = 6; intel_sdvo->controlled_output = (uint16_t )((int )((short )intel_sdvo->controlled_output) | (int )((short )type)); intel_sdvo_connector->output_flag = (uint16_t )type; intel_sdvo->is_tv = 1; tmp___0 = intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (tmp___0 < 0) { kfree((void const *)intel_sdvo_connector); return (0); } else { } tmp___1 = intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto err; } else { } tmp___3 = intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { goto err; } else { } return (1); err: drm_connector_unregister(connector); intel_sdvo_destroy(connector); return (0); } } static bool intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo , int device ) { struct drm_encoder *encoder ; struct drm_connector *connector ; struct intel_connector *intel_connector ; struct intel_sdvo_connector *intel_sdvo_connector ; long tmp ; int tmp___0 ; { encoder = & intel_sdvo->base.base; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_sdvo_analog_init", "initialising analog device %d\n", device); } else { } intel_sdvo_connector = intel_sdvo_connector_alloc(); if ((unsigned long )intel_sdvo_connector == (unsigned long )((struct intel_sdvo_connector *)0)) { return (0); } else { } intel_connector = & intel_sdvo_connector->base; connector = & intel_connector->base; intel_connector->polled = 2U; encoder->encoder_type = 1; connector->connector_type = 1; if (device == 0) { intel_sdvo->controlled_output = (uint16_t )((unsigned int )intel_sdvo->controlled_output | 2U); intel_sdvo_connector->output_flag = 2U; } else if (device == 1) { intel_sdvo->controlled_output = (uint16_t )((unsigned int )intel_sdvo->controlled_output | 512U); intel_sdvo_connector->output_flag = 512U; } else { } tmp___0 = intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (tmp___0 < 0) { kfree((void const *)intel_sdvo_connector); return (0); } else { } return (1); } } static bool intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo , int device ) { struct drm_encoder *encoder ; struct drm_connector *connector ; struct intel_connector *intel_connector ; struct intel_sdvo_connector *intel_sdvo_connector ; long tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { encoder = & intel_sdvo->base.base; tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_sdvo_lvds_init", "initialising LVDS device %d\n", device); } else { } intel_sdvo_connector = intel_sdvo_connector_alloc(); if ((unsigned long )intel_sdvo_connector == (unsigned long )((struct intel_sdvo_connector *)0)) { return (0); } else { } intel_connector = & intel_sdvo_connector->base; connector = & intel_connector->base; encoder->encoder_type = 3; connector->connector_type = 7; if (device == 0) { intel_sdvo->controlled_output = (uint16_t )((unsigned int )intel_sdvo->controlled_output | 64U); intel_sdvo_connector->output_flag = 64U; } else if (device == 1) { intel_sdvo->controlled_output = (uint16_t )((unsigned int )intel_sdvo->controlled_output | 16384U); intel_sdvo_connector->output_flag = 16384U; } else { } tmp___0 = intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (tmp___0 < 0) { kfree((void const *)intel_sdvo_connector); return (0); } else { } tmp___1 = intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto err; } else { } return (1); err: drm_connector_unregister(connector); intel_sdvo_destroy(connector); return (0); } } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo , uint16_t flags ) { bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; bool tmp___5 ; int tmp___6 ; bool tmp___7 ; int tmp___8 ; bool tmp___9 ; int tmp___10 ; bool tmp___11 ; int tmp___12 ; bool tmp___13 ; int tmp___14 ; bool tmp___15 ; int tmp___16 ; unsigned char bytes[2U] ; long tmp___17 ; { intel_sdvo->is_tv = 0; intel_sdvo->is_lvds = 0; if ((int )flags & 1) { tmp = intel_sdvo_dvi_init(intel_sdvo, 0); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } } else { } if (((int )flags & 257) == 257) { tmp___1 = intel_sdvo_dvi_init(intel_sdvo, 1); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } } else { } if (((int )flags & 8) != 0) { tmp___3 = intel_sdvo_tv_init(intel_sdvo, 8); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { return (0); } else { } } else { } if (((int )flags & 4) != 0) { tmp___5 = intel_sdvo_tv_init(intel_sdvo, 4); if (tmp___5) { tmp___6 = 0; } else { tmp___6 = 1; } if (tmp___6) { return (0); } else { } } else { } if (((int )flags & 16) != 0) { tmp___7 = intel_sdvo_tv_init(intel_sdvo, 16); if (tmp___7) { tmp___8 = 0; } else { tmp___8 = 1; } if (tmp___8) { return (0); } else { } } else { } if (((int )flags & 2) != 0) { tmp___9 = intel_sdvo_analog_init(intel_sdvo, 0); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } if (tmp___10) { return (0); } else { } } else { } if (((int )flags & 514) == 514) { tmp___11 = intel_sdvo_analog_init(intel_sdvo, 1); if (tmp___11) { tmp___12 = 0; } else { tmp___12 = 1; } if (tmp___12) { return (0); } else { } } else { } if (((int )flags & 64) != 0) { tmp___13 = intel_sdvo_lvds_init(intel_sdvo, 0); if (tmp___13) { tmp___14 = 0; } else { tmp___14 = 1; } if (tmp___14) { return (0); } else { } } else { } if (((int )flags & 16448) == 16448) { tmp___15 = intel_sdvo_lvds_init(intel_sdvo, 1); if (tmp___15) { tmp___16 = 0; } else { tmp___16 = 1; } if (tmp___16) { return (0); } else { } } else { } if (((int )flags & 17247) == 0) { intel_sdvo->controlled_output = 0U; memcpy((void *)(& bytes), (void const *)(& intel_sdvo->caps.output_flags), 2UL); tmp___17 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___17 != 0L) { drm_ut_debug_printk("intel_sdvo_output_setup", "%s: Unknown SDVO output type (0x%02x%02x)\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC", (int )bytes[0], (int )bytes[1]); } else { } return (0); } else { } intel_sdvo->base.crtc_mask = 7; return (1); } } static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo ) { struct drm_device *dev ; struct drm_connector *connector ; struct drm_connector *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct intel_encoder *tmp___0 ; struct list_head const *__mptr___1 ; { dev = intel_sdvo->base.base.dev; __mptr = (struct list_head const *)dev->mode_config.connector_list.next; connector = (struct drm_connector *)__mptr + 0xffffffffffffffe8UL; __mptr___0 = (struct list_head const *)connector->head.next; tmp = (struct drm_connector *)__mptr___0 + 0xffffffffffffffe8UL; goto ldv_49414; ldv_49413: tmp___0 = intel_attached_encoder(connector); if ((unsigned long )tmp___0 == (unsigned long )(& intel_sdvo->base)) { drm_connector_unregister(connector); intel_sdvo_destroy(connector); } else { } connector = tmp; __mptr___1 = (struct list_head const *)tmp->head.next; tmp = (struct drm_connector *)__mptr___1 + 0xffffffffffffffe8UL; ldv_49414: ; if ((unsigned long )(& connector->head) != (unsigned long )(& dev->mode_config.connector_list)) { goto ldv_49413; } else { } return; } } static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo , struct intel_sdvo_connector *intel_sdvo_connector , int type ) { struct drm_device *dev ; struct intel_sdvo_tv_format format ; uint32_t format_map ; uint32_t i ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; unsigned long _min1 ; unsigned long _min2 ; int tmp___3 ; { dev = intel_sdvo->base.base.dev; tmp = intel_sdvo_set_target_output(intel_sdvo, (int )((u16 )type)); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = intel_sdvo_get_value(intel_sdvo, 39, (void *)(& format), 6); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } _min1 = 4UL; _min2 = 6UL; memcpy((void *)(& format_map), (void const *)(& format), _min1 < _min2 ? _min1 : _min2); if (format_map == 0U) { return (0); } else { } intel_sdvo_connector->format_supported_num = 0; i = 0U; goto ldv_49429; ldv_49428: ; if (((uint32_t )(1 << (int )i) & format_map) != 0U) { tmp___3 = intel_sdvo_connector->format_supported_num; intel_sdvo_connector->format_supported_num = intel_sdvo_connector->format_supported_num + 1; intel_sdvo_connector->tv_format_supported[tmp___3] = (u8 )i; } else { } i = i + 1U; ldv_49429: ; if (i <= 18U) { goto ldv_49428; } else { } intel_sdvo_connector->tv_format = drm_property_create(dev, 8, "mode", intel_sdvo_connector->format_supported_num); if ((unsigned long )intel_sdvo_connector->tv_format == (unsigned long )((struct drm_property *)0)) { return (0); } else { } i = 0U; goto ldv_49432; ldv_49431: drm_property_add_enum(intel_sdvo_connector->tv_format, (int )i, (uint64_t )i, tv_format_names[(int )intel_sdvo_connector->tv_format_supported[i]]); i = i + 1U; ldv_49432: ; if ((uint32_t )intel_sdvo_connector->format_supported_num > i) { goto ldv_49431; } else { } intel_sdvo->tv_format_index = (int )intel_sdvo_connector->tv_format_supported[0]; drm_object_attach_property(& intel_sdvo_connector->base.base.base, intel_sdvo_connector->tv_format, 0ULL); return (1); } } static bool intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo , struct intel_sdvo_connector *intel_sdvo_connector , struct intel_sdvo_enhancements_reply enhancements ) { struct drm_device *dev ; struct drm_connector *connector ; uint16_t response ; uint16_t data_value[2U] ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; bool tmp___4 ; int tmp___5 ; bool tmp___6 ; int tmp___7 ; long tmp___8 ; bool tmp___9 ; int tmp___10 ; bool tmp___11 ; int tmp___12 ; long tmp___13 ; bool tmp___14 ; int tmp___15 ; bool tmp___16 ; int tmp___17 ; long tmp___18 ; bool tmp___19 ; int tmp___20 ; bool tmp___21 ; int tmp___22 ; long tmp___23 ; bool tmp___24 ; int tmp___25 ; bool tmp___26 ; int tmp___27 ; long tmp___28 ; bool tmp___29 ; int tmp___30 ; bool tmp___31 ; int tmp___32 ; long tmp___33 ; bool tmp___34 ; int tmp___35 ; bool tmp___36 ; int tmp___37 ; long tmp___38 ; bool tmp___39 ; int tmp___40 ; bool tmp___41 ; int tmp___42 ; long tmp___43 ; bool tmp___44 ; int tmp___45 ; bool tmp___46 ; int tmp___47 ; long tmp___48 ; bool tmp___49 ; int tmp___50 ; bool tmp___51 ; int tmp___52 ; long tmp___53 ; bool tmp___54 ; int tmp___55 ; bool tmp___56 ; int tmp___57 ; long tmp___58 ; bool tmp___59 ; int tmp___60 ; bool tmp___61 ; int tmp___62 ; long tmp___63 ; bool tmp___64 ; int tmp___65 ; bool tmp___66 ; int tmp___67 ; long tmp___68 ; bool tmp___69 ; int tmp___70 ; long tmp___71 ; { dev = intel_sdvo->base.base.dev; connector = & intel_sdvo_connector->base.base; if ((unsigned int )*((unsigned char *)(& enhancements) + 0UL) != 0U) { tmp = intel_sdvo_get_value(intel_sdvo, 97, (void *)(& data_value), 4); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = intel_sdvo_get_value(intel_sdvo, 98, (void *)(& response), 2); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } intel_sdvo_connector->max_hscan = (u32 )data_value[0]; intel_sdvo_connector->left_margin = (u32 )((int )data_value[0] - (int )response); intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; intel_sdvo_connector->left = drm_property_create_range(dev, 0, "left_margin", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->left == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->left, (uint64_t )intel_sdvo_connector->left_margin); intel_sdvo_connector->right = drm_property_create_range(dev, 0, "right_margin", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->right == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->right, (uint64_t )intel_sdvo_connector->right_margin); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "h_overscan: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 1UL) != 0U) { tmp___4 = intel_sdvo_get_value(intel_sdvo, 100, (void *)(& data_value), 4); if (tmp___4) { tmp___5 = 0; } else { tmp___5 = 1; } if (tmp___5) { return (0); } else { } tmp___6 = intel_sdvo_get_value(intel_sdvo, 101, (void *)(& response), 2); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } if (tmp___7) { return (0); } else { } intel_sdvo_connector->max_vscan = (u32 )data_value[0]; intel_sdvo_connector->top_margin = (u32 )((int )data_value[0] - (int )response); intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; intel_sdvo_connector->top = drm_property_create_range(dev, 0, "top_margin", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->top == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->top, (uint64_t )intel_sdvo_connector->top_margin); intel_sdvo_connector->bottom = drm_property_create_range(dev, 0, "bottom_margin", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->bottom == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->bottom, (uint64_t )intel_sdvo_connector->bottom_margin); tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "v_overscan: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 1UL) != 0U) { tmp___9 = intel_sdvo_get_value(intel_sdvo, 103, (void *)(& data_value), 4); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } if (tmp___10) { return (0); } else { tmp___11 = intel_sdvo_get_value(intel_sdvo, 104, (void *)(& response), 2); if (tmp___11) { tmp___12 = 0; } else { tmp___12 = 1; } if (tmp___12) { return (0); } else { } } intel_sdvo_connector->max_hpos = (u32 )data_value[0]; intel_sdvo_connector->cur_hpos = (u32 )response; intel_sdvo_connector->hpos = drm_property_create_range(dev, 0, "hpos", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->hpos == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->hpos, (uint64_t )intel_sdvo_connector->cur_hpos); tmp___13 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___13 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "hpos: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 1UL) != 0U) { tmp___14 = intel_sdvo_get_value(intel_sdvo, 106, (void *)(& data_value), 4); if (tmp___14) { tmp___15 = 0; } else { tmp___15 = 1; } if (tmp___15) { return (0); } else { tmp___16 = intel_sdvo_get_value(intel_sdvo, 107, (void *)(& response), 2); if (tmp___16) { tmp___17 = 0; } else { tmp___17 = 1; } if (tmp___17) { return (0); } else { } } intel_sdvo_connector->max_vpos = (u32 )data_value[0]; intel_sdvo_connector->cur_vpos = (u32 )response; intel_sdvo_connector->vpos = drm_property_create_range(dev, 0, "vpos", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->vpos == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->vpos, (uint64_t )intel_sdvo_connector->cur_vpos); tmp___18 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___18 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "vpos: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 0UL) != 0U) { tmp___19 = intel_sdvo_get_value(intel_sdvo, 85, (void *)(& data_value), 4); if (tmp___19) { tmp___20 = 0; } else { tmp___20 = 1; } if (tmp___20) { return (0); } else { tmp___21 = intel_sdvo_get_value(intel_sdvo, 86, (void *)(& response), 2); if (tmp___21) { tmp___22 = 0; } else { tmp___22 = 1; } if (tmp___22) { return (0); } else { } } intel_sdvo_connector->max_saturation = (u32 )data_value[0]; intel_sdvo_connector->cur_saturation = (u32 )response; intel_sdvo_connector->saturation = drm_property_create_range(dev, 0, "saturation", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->saturation == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->saturation, (uint64_t )intel_sdvo_connector->cur_saturation); tmp___23 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___23 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "saturation: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 0UL) != 0U) { tmp___24 = intel_sdvo_get_value(intel_sdvo, 94, (void *)(& data_value), 4); if (tmp___24) { tmp___25 = 0; } else { tmp___25 = 1; } if (tmp___25) { return (0); } else { tmp___26 = intel_sdvo_get_value(intel_sdvo, 95, (void *)(& response), 2); if (tmp___26) { tmp___27 = 0; } else { tmp___27 = 1; } if (tmp___27) { return (0); } else { } } intel_sdvo_connector->max_contrast = (u32 )data_value[0]; intel_sdvo_connector->cur_contrast = (u32 )response; intel_sdvo_connector->contrast = drm_property_create_range(dev, 0, "contrast", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->contrast == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->contrast, (uint64_t )intel_sdvo_connector->cur_contrast); tmp___28 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___28 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "contrast: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 0UL) != 0U) { tmp___29 = intel_sdvo_get_value(intel_sdvo, 88, (void *)(& data_value), 4); if (tmp___29) { tmp___30 = 0; } else { tmp___30 = 1; } if (tmp___30) { return (0); } else { tmp___31 = intel_sdvo_get_value(intel_sdvo, 89, (void *)(& response), 2); if (tmp___31) { tmp___32 = 0; } else { tmp___32 = 1; } if (tmp___32) { return (0); } else { } } intel_sdvo_connector->max_hue = (u32 )data_value[0]; intel_sdvo_connector->cur_hue = (u32 )response; intel_sdvo_connector->hue = drm_property_create_range(dev, 0, "hue", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->hue == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->hue, (uint64_t )intel_sdvo_connector->cur_hue); tmp___33 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___33 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "hue: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 1UL) != 0U) { tmp___34 = intel_sdvo_get_value(intel_sdvo, 109, (void *)(& data_value), 4); if (tmp___34) { tmp___35 = 0; } else { tmp___35 = 1; } if (tmp___35) { return (0); } else { tmp___36 = intel_sdvo_get_value(intel_sdvo, 110, (void *)(& response), 2); if (tmp___36) { tmp___37 = 0; } else { tmp___37 = 1; } if (tmp___37) { return (0); } else { } } intel_sdvo_connector->max_sharpness = (u32 )data_value[0]; intel_sdvo_connector->cur_sharpness = (u32 )response; intel_sdvo_connector->sharpness = drm_property_create_range(dev, 0, "sharpness", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->sharpness == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->sharpness, (uint64_t )intel_sdvo_connector->cur_sharpness); tmp___38 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___38 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "sharpness: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 0UL) != 0U) { tmp___39 = intel_sdvo_get_value(intel_sdvo, 91, (void *)(& data_value), 4); if (tmp___39) { tmp___40 = 0; } else { tmp___40 = 1; } if (tmp___40) { return (0); } else { tmp___41 = intel_sdvo_get_value(intel_sdvo, 92, (void *)(& response), 2); if (tmp___41) { tmp___42 = 0; } else { tmp___42 = 1; } if (tmp___42) { return (0); } else { } } intel_sdvo_connector->max_brightness = (u32 )data_value[0]; intel_sdvo_connector->cur_brightness = (u32 )response; intel_sdvo_connector->brightness = drm_property_create_range(dev, 0, "brightness", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->brightness == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->brightness, (uint64_t )intel_sdvo_connector->cur_brightness); tmp___43 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___43 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "brightness: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 0UL) != 0U) { tmp___44 = intel_sdvo_get_value(intel_sdvo, 77, (void *)(& data_value), 4); if (tmp___44) { tmp___45 = 0; } else { tmp___45 = 1; } if (tmp___45) { return (0); } else { tmp___46 = intel_sdvo_get_value(intel_sdvo, 78, (void *)(& response), 2); if (tmp___46) { tmp___47 = 0; } else { tmp___47 = 1; } if (tmp___47) { return (0); } else { } } intel_sdvo_connector->max_flicker_filter = (u32 )data_value[0]; intel_sdvo_connector->cur_flicker_filter = (u32 )response; intel_sdvo_connector->flicker_filter = drm_property_create_range(dev, 0, "flicker_filter", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->flicker_filter == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->flicker_filter, (uint64_t )intel_sdvo_connector->cur_flicker_filter); tmp___48 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___48 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "flicker_filter: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 0UL) != 0U) { tmp___49 = intel_sdvo_get_value(intel_sdvo, 123, (void *)(& data_value), 4); if (tmp___49) { tmp___50 = 0; } else { tmp___50 = 1; } if (tmp___50) { return (0); } else { tmp___51 = intel_sdvo_get_value(intel_sdvo, 80, (void *)(& response), 2); if (tmp___51) { tmp___52 = 0; } else { tmp___52 = 1; } if (tmp___52) { return (0); } else { } } intel_sdvo_connector->max_flicker_filter_adaptive = (u32 )data_value[0]; intel_sdvo_connector->cur_flicker_filter_adaptive = (u32 )response; intel_sdvo_connector->flicker_filter_adaptive = drm_property_create_range(dev, 0, "flicker_filter_adaptive", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->flicker_filter_adaptive == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->flicker_filter_adaptive, (uint64_t )intel_sdvo_connector->cur_flicker_filter_adaptive); tmp___53 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___53 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "flicker_filter_adaptive: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 0UL) != 0U) { tmp___54 = intel_sdvo_get_value(intel_sdvo, 82, (void *)(& data_value), 4); if (tmp___54) { tmp___55 = 0; } else { tmp___55 = 1; } if (tmp___55) { return (0); } else { tmp___56 = intel_sdvo_get_value(intel_sdvo, 83, (void *)(& response), 2); if (tmp___56) { tmp___57 = 0; } else { tmp___57 = 1; } if (tmp___57) { return (0); } else { } } intel_sdvo_connector->max_flicker_filter_2d = (u32 )data_value[0]; intel_sdvo_connector->cur_flicker_filter_2d = (u32 )response; intel_sdvo_connector->flicker_filter_2d = drm_property_create_range(dev, 0, "flicker_filter_2d", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->flicker_filter_2d == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->flicker_filter_2d, (uint64_t )intel_sdvo_connector->cur_flicker_filter_2d); tmp___58 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___58 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "flicker_filter_2d: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 1UL) != 0U) { tmp___59 = intel_sdvo_get_value(intel_sdvo, 116, (void *)(& data_value), 4); if (tmp___59) { tmp___60 = 0; } else { tmp___60 = 1; } if (tmp___60) { return (0); } else { tmp___61 = intel_sdvo_get_value(intel_sdvo, 117, (void *)(& response), 2); if (tmp___61) { tmp___62 = 0; } else { tmp___62 = 1; } if (tmp___62) { return (0); } else { } } intel_sdvo_connector->max_tv_chroma_filter = (u32 )data_value[0]; intel_sdvo_connector->cur_tv_chroma_filter = (u32 )response; intel_sdvo_connector->tv_chroma_filter = drm_property_create_range(dev, 0, "tv_chroma_filter", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->tv_chroma_filter == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->tv_chroma_filter, (uint64_t )intel_sdvo_connector->cur_tv_chroma_filter); tmp___63 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___63 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "tv_chroma_filter: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 1UL) != 0U) { tmp___64 = intel_sdvo_get_value(intel_sdvo, 119, (void *)(& data_value), 4); if (tmp___64) { tmp___65 = 0; } else { tmp___65 = 1; } if (tmp___65) { return (0); } else { tmp___66 = intel_sdvo_get_value(intel_sdvo, 120, (void *)(& response), 2); if (tmp___66) { tmp___67 = 0; } else { tmp___67 = 1; } if (tmp___67) { return (0); } else { } } intel_sdvo_connector->max_tv_luma_filter = (u32 )data_value[0]; intel_sdvo_connector->cur_tv_luma_filter = (u32 )response; intel_sdvo_connector->tv_luma_filter = drm_property_create_range(dev, 0, "tv_luma_filter", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->tv_luma_filter == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->tv_luma_filter, (uint64_t )intel_sdvo_connector->cur_tv_luma_filter); tmp___68 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___68 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "tv_luma_filter: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } if ((unsigned int )*((unsigned char *)(& enhancements) + 1UL) != 0U) { tmp___69 = intel_sdvo_get_value(intel_sdvo, 112, (void *)(& response), 2); if (tmp___69) { tmp___70 = 0; } else { tmp___70 = 1; } if (tmp___70) { return (0); } else { } intel_sdvo_connector->max_dot_crawl = 1U; intel_sdvo_connector->cur_dot_crawl = (u32 )response & 1U; intel_sdvo_connector->dot_crawl = drm_property_create_range(dev, 0, "dot_crawl", 0ULL, 1ULL); if ((unsigned long )intel_sdvo_connector->dot_crawl == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->dot_crawl, (uint64_t )intel_sdvo_connector->cur_dot_crawl); tmp___71 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___71 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_tv", "dot crawl: current %d\n", (int )response); } else { } } else { } return (1); } } static bool intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo , struct intel_sdvo_connector *intel_sdvo_connector , struct intel_sdvo_enhancements_reply enhancements ) { struct drm_device *dev ; struct drm_connector *connector ; uint16_t response ; uint16_t data_value[2U] ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; long tmp___3 ; { dev = intel_sdvo->base.base.dev; connector = & intel_sdvo_connector->base.base; if ((unsigned int )*((unsigned char *)(& enhancements) + 0UL) != 0U) { tmp = intel_sdvo_get_value(intel_sdvo, 91, (void *)(& data_value), 4); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { tmp___1 = intel_sdvo_get_value(intel_sdvo, 92, (void *)(& response), 2); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } } intel_sdvo_connector->max_brightness = (u32 )data_value[0]; intel_sdvo_connector->cur_brightness = (u32 )response; intel_sdvo_connector->brightness = drm_property_create_range(dev, 0, "brightness", 0ULL, (uint64_t )data_value[0]); if ((unsigned long )intel_sdvo_connector->brightness == (unsigned long )((struct drm_property *)0)) { return (0); } else { } drm_object_attach_property(& connector->base, intel_sdvo_connector->brightness, (uint64_t )intel_sdvo_connector->cur_brightness); tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property_lvds", "brightness: max %d, default %d, current %d\n", (int )data_value[0], (int )data_value[1], (int )response); } else { } } else { } return (1); } } static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo , struct intel_sdvo_connector *intel_sdvo_connector ) { union __anonunion_enhancements_450 enhancements ; long tmp ; bool tmp___0 ; bool tmp___1 ; { enhancements.response = 0U; intel_sdvo_get_value(intel_sdvo, 132, (void *)(& enhancements), 2); if ((unsigned int )enhancements.response == 0U) { tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_sdvo_create_enhance_property", "No enhancement is supported\n"); } else { } return (1); } else { } if (((int )intel_sdvo_connector->output_flag & 28) != 0) { tmp___0 = intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); return (tmp___0); } else if (((int )intel_sdvo_connector->output_flag & 16448) != 0) { tmp___1 = intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); return (tmp___1); } else { return (1); } } } static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter , struct i2c_msg *msgs , int num ) { struct intel_sdvo *sdvo ; bool tmp ; int tmp___0 ; int tmp___1 ; { sdvo = (struct intel_sdvo *)adapter->algo_data; tmp = intel_sdvo_set_control_bus_switch(sdvo, (int )sdvo->ddc_bus); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-5); } else { } tmp___1 = (*(((sdvo->i2c)->algo)->master_xfer))(sdvo->i2c, msgs, num); return (tmp___1); } } static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter ) { struct intel_sdvo *sdvo ; u32 tmp ; { sdvo = (struct intel_sdvo *)adapter->algo_data; tmp = (*(((sdvo->i2c)->algo)->functionality))(sdvo->i2c); return (tmp); } } static struct i2c_algorithm const intel_sdvo_ddc_proxy = {& intel_sdvo_ddc_proxy_xfer, 0, & intel_sdvo_ddc_proxy_func, 0, 0}; static bool intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo , struct drm_device *dev ) { int tmp ; { sdvo->ddc.owner = & __this_module; sdvo->ddc.class = 8U; snprintf((char *)(& sdvo->ddc.name), 20UL, "SDVO DDC proxy"); sdvo->ddc.dev.parent = & (dev->pdev)->dev; sdvo->ddc.algo_data = (void *)sdvo; sdvo->ddc.algo = & intel_sdvo_ddc_proxy; tmp = i2c_add_adapter(& sdvo->ddc); return (tmp == 0); } } bool intel_sdvo_init(struct drm_device *dev , uint32_t sdvo_reg , bool is_sdvob ) { struct drm_i915_private *dev_priv ; struct intel_encoder *intel_encoder ; struct intel_sdvo *intel_sdvo ; int i ; void *tmp ; u8 tmp___0 ; bool tmp___1 ; int tmp___2 ; u8 byte ; long tmp___3 ; bool tmp___4 ; int tmp___5 ; struct drm_i915_private *__p ; bool tmp___6 ; int tmp___7 ; long tmp___8 ; bool tmp___9 ; int tmp___10 ; bool tmp___11 ; int tmp___12 ; bool tmp___13 ; int tmp___14 ; long tmp___15 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = kzalloc(2232UL, 208U); intel_sdvo = (struct intel_sdvo *)tmp; if ((unsigned long )intel_sdvo == (unsigned long )((struct intel_sdvo *)0)) { return (0); } else { } intel_sdvo->sdvo_reg = sdvo_reg; intel_sdvo->is_sdvob = is_sdvob; tmp___0 = intel_sdvo_get_slave_addr(dev, intel_sdvo); intel_sdvo->slave_addr = (u8 )((int )tmp___0 >> 1); intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); tmp___1 = intel_sdvo_init_ddc_proxy(intel_sdvo, dev); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto err_i2c_bus; } else { } intel_encoder = & intel_sdvo->base; intel_encoder->type = 3; drm_encoder_init(dev, & intel_encoder->base, & intel_sdvo_enc_funcs, 0); i = 0; goto ldv_49492; ldv_49491: tmp___4 = intel_sdvo_read_byte(intel_sdvo, (int )((u8 )i), & byte); if (tmp___4) { tmp___5 = 0; } else { tmp___5 = 1; } if (tmp___5) { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_sdvo_init", "No SDVO device found on %s\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC"); } else { } goto err; } else { } i = i + 1; ldv_49492: ; if (i <= 63) { goto ldv_49491; } else { } intel_encoder->compute_config = & intel_sdvo_compute_config; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )__p->pch_type != 0U) { intel_encoder->disable = & pch_disable_sdvo; intel_encoder->post_disable = & pch_post_disable_sdvo; } else { intel_encoder->disable = & intel_disable_sdvo; } intel_encoder->pre_enable = & intel_sdvo_pre_enable; intel_encoder->enable = & intel_enable_sdvo; intel_encoder->get_hw_state = & intel_sdvo_get_hw_state; intel_encoder->get_config = & intel_sdvo_get_config; tmp___6 = intel_sdvo_get_capabilities(intel_sdvo, & intel_sdvo->caps); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } if (tmp___7) { goto err; } else { } tmp___9 = intel_sdvo_output_setup(intel_sdvo, (int )intel_sdvo->caps.output_flags); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } if (tmp___10) { tmp___8 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_sdvo_init", "SDVO output failed to setup on %s\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC"); } else { } goto err_output; } else { } if ((unsigned int )intel_sdvo->hotplug_active != 0U) { intel_encoder->hpd_pin = (int )intel_sdvo->is_sdvob ? 2 : 3; } else { } intel_sdvo->base.cloneable = 0U; intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); tmp___11 = intel_sdvo_set_target_input(intel_sdvo); if (tmp___11) { tmp___12 = 0; } else { tmp___12 = 1; } if (tmp___12) { goto err_output; } else { } tmp___13 = intel_sdvo_get_input_pixel_clock_range(intel_sdvo, & intel_sdvo->pixel_clock_min, & intel_sdvo->pixel_clock_max); if (tmp___13) { tmp___14 = 0; } else { tmp___14 = 1; } if (tmp___14) { goto err_output; } else { } tmp___15 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___15 != 0L) { drm_ut_debug_printk("intel_sdvo_init", "%s device VID/DID: %02X:%02X.%02X, clock range %dMHz - %dMHz, input 1: %c, input 2: %c, output 1: %c, output 2: %c\n", (int )intel_sdvo->is_sdvob ? (char *)"SDVOB" : (char *)"SDVOC", (int )intel_sdvo->caps.vendor_id, (int )intel_sdvo->caps.device_id, (int )intel_sdvo->caps.device_rev_id, intel_sdvo->pixel_clock_min / 1000, intel_sdvo->pixel_clock_max / 1000, (int )intel_sdvo->caps.sdvo_inputs_mask & 1 ? 89 : 78, ((int )intel_sdvo->caps.sdvo_inputs_mask & 2) != 0 ? 89 : 78, ((int )intel_sdvo->caps.output_flags & 3) != 0 ? 89 : 78, ((int )intel_sdvo->caps.output_flags & 768) != 0 ? 89 : 78); } else { } return (1); err_output: intel_sdvo_output_cleanup(intel_sdvo); err: drm_encoder_cleanup(& intel_encoder->base); i2c_del_adapter(& intel_sdvo->ddc); err_i2c_bus: intel_sdvo_unselect_i2c_bus(intel_sdvo); kfree((void const *)intel_sdvo); return (0); } } extern int ldv_probe_30(void) ; extern int ldv_probe_28(void) ; void ldv_initialize_drm_connector_funcs_30(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(104UL); intel_sdvo_connector_funcs_group0 = (struct drm_property *)tmp; tmp___0 = ldv_init_zalloc(936UL); intel_sdvo_connector_funcs_group1 = (struct drm_connector *)tmp___0; return; } } void ldv_initialize_i2c_algorithm_27(void) { void *tmp ; { tmp = ldv_init_zalloc(1936UL); intel_sdvo_ddc_proxy_group0 = (struct i2c_adapter *)tmp; return; } } void ldv_initialize_drm_connector_helper_funcs_29(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_sdvo_connector_helper_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_main_exported_27(void) { int ldvarg28 ; struct i2c_msg *ldvarg29 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(16UL); ldvarg29 = (struct i2c_msg *)tmp; ldv_memset((void *)(& ldvarg28), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_27 == 1) { intel_sdvo_ddc_proxy_xfer(intel_sdvo_ddc_proxy_group0, ldvarg29, ldvarg28); ldv_state_variable_27 = 1; } else { } goto ldv_49520; case 1: ; if (ldv_state_variable_27 == 1) { intel_sdvo_ddc_proxy_func(intel_sdvo_ddc_proxy_group0); ldv_state_variable_27 = 1; } else { } goto ldv_49520; default: ldv_stop(); } ldv_49520: ; return; } } void ldv_main_exported_28(void) { struct drm_encoder *ldvarg472 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(96UL); ldvarg472 = (struct drm_encoder *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_28 == 2) { intel_sdvo_enc_destroy(ldvarg472); ldv_state_variable_28 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_49528; case 1: ; if (ldv_state_variable_28 == 1) { ldv_probe_28(); ldv_state_variable_28 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_49528; default: ldv_stop(); } ldv_49528: ; return; } } void ldv_main_exported_30(void) { uint64_t ldvarg462 ; uint32_t ldvarg461 ; struct drm_connector_state *ldvarg464 ; void *tmp ; uint64_t *ldvarg459 ; void *tmp___0 ; int ldvarg465 ; bool ldvarg463 ; struct drm_connector_state *ldvarg458 ; void *tmp___1 ; uint32_t ldvarg460 ; int tmp___2 ; { tmp = ldv_init_zalloc(32UL); ldvarg464 = (struct drm_connector_state *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg459 = (uint64_t *)tmp___0; tmp___1 = ldv_init_zalloc(32UL); ldvarg458 = (struct drm_connector_state *)tmp___1; ldv_memset((void *)(& ldvarg462), 0, 8UL); ldv_memset((void *)(& ldvarg461), 0, 4UL); ldv_memset((void *)(& ldvarg465), 0, 4UL); ldv_memset((void *)(& ldvarg463), 0, 1UL); ldv_memset((void *)(& ldvarg460), 0, 4UL); tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_30 == 2) { intel_sdvo_dpms(intel_sdvo_connector_funcs_group1, ldvarg465); ldv_state_variable_30 = 2; } else { } if (ldv_state_variable_30 == 1) { intel_sdvo_dpms(intel_sdvo_connector_funcs_group1, ldvarg465); ldv_state_variable_30 = 1; } else { } goto ldv_49543; case 1: ; if (ldv_state_variable_30 == 2) { drm_atomic_helper_connector_destroy_state(intel_sdvo_connector_funcs_group1, ldvarg464); ldv_state_variable_30 = 2; } else { } if (ldv_state_variable_30 == 1) { drm_atomic_helper_connector_destroy_state(intel_sdvo_connector_funcs_group1, ldvarg464); ldv_state_variable_30 = 1; } else { } goto ldv_49543; case 2: ; if (ldv_state_variable_30 == 2) { drm_atomic_helper_connector_duplicate_state(intel_sdvo_connector_funcs_group1); ldv_state_variable_30 = 2; } else { } if (ldv_state_variable_30 == 1) { drm_atomic_helper_connector_duplicate_state(intel_sdvo_connector_funcs_group1); ldv_state_variable_30 = 1; } else { } goto ldv_49543; case 3: ; if (ldv_state_variable_30 == 2) { intel_sdvo_detect(intel_sdvo_connector_funcs_group1, (int )ldvarg463); ldv_state_variable_30 = 2; } else { } if (ldv_state_variable_30 == 1) { intel_sdvo_detect(intel_sdvo_connector_funcs_group1, (int )ldvarg463); ldv_state_variable_30 = 1; } else { } goto ldv_49543; case 4: ; if (ldv_state_variable_30 == 2) { intel_sdvo_set_property(intel_sdvo_connector_funcs_group1, intel_sdvo_connector_funcs_group0, ldvarg462); ldv_state_variable_30 = 2; } else { } if (ldv_state_variable_30 == 1) { intel_sdvo_set_property(intel_sdvo_connector_funcs_group1, intel_sdvo_connector_funcs_group0, ldvarg462); ldv_state_variable_30 = 1; } else { } goto ldv_49543; case 5: ; if (ldv_state_variable_30 == 2) { intel_sdvo_destroy(intel_sdvo_connector_funcs_group1); ldv_state_variable_30 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_49543; case 6: ; if (ldv_state_variable_30 == 2) { drm_helper_probe_single_connector_modes(intel_sdvo_connector_funcs_group1, ldvarg461, ldvarg460); ldv_state_variable_30 = 2; } else { } if (ldv_state_variable_30 == 1) { drm_helper_probe_single_connector_modes(intel_sdvo_connector_funcs_group1, ldvarg461, ldvarg460); ldv_state_variable_30 = 1; } else { } goto ldv_49543; case 7: ; if (ldv_state_variable_30 == 2) { intel_connector_atomic_get_property(intel_sdvo_connector_funcs_group1, (struct drm_connector_state const *)ldvarg458, intel_sdvo_connector_funcs_group0, ldvarg459); ldv_state_variable_30 = 2; } else { } if (ldv_state_variable_30 == 1) { intel_connector_atomic_get_property(intel_sdvo_connector_funcs_group1, (struct drm_connector_state const *)ldvarg458, intel_sdvo_connector_funcs_group0, ldvarg459); ldv_state_variable_30 = 1; } else { } goto ldv_49543; case 8: ; if (ldv_state_variable_30 == 1) { ldv_probe_30(); ldv_state_variable_30 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_49543; default: ldv_stop(); } ldv_49543: ; return; } } void ldv_main_exported_29(void) { struct drm_display_mode *ldvarg105 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(208UL); ldvarg105 = (struct drm_display_mode *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_29 == 1) { intel_sdvo_get_modes(intel_sdvo_connector_helper_funcs_group0); ldv_state_variable_29 = 1; } else { } goto ldv_49558; case 1: ; if (ldv_state_variable_29 == 1) { intel_sdvo_mode_valid(intel_sdvo_connector_helper_funcs_group0, ldvarg105); ldv_state_variable_29 = 1; } else { } goto ldv_49558; case 2: ; if (ldv_state_variable_29 == 1) { intel_best_encoder(intel_sdvo_connector_helper_funcs_group0); ldv_state_variable_29 = 1; } else { } goto ldv_49558; default: ldv_stop(); } ldv_49558: ; return; } } bool ldv_queue_work_on_1021(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_1022(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_1023(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_1024(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_1025(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern char *strncpy(char * , char const * , __kernel_size_t ) ; bool ldv_queue_work_on_1035(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_1037(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_1036(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_1039(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_1038(struct workqueue_struct *ldv_func_arg1 ) ; extern struct drm_display_mode *drm_mode_create(struct drm_device * ) ; extern int drm_mode_vrefresh(struct drm_display_mode const * ) ; extern int drm_mode_create_tv_properties(struct drm_device * , unsigned int , char ** ) ; static u32 const filter_table[206U] = { 2973773824U, 773862656U, 889204256U, 805351744U, 899723616U, 767569536U, 2973774976U, 2975870976U, 782251584U, 872426880U, 805351712U, 920695136U, 757083888U, 2971677568U, 2975870976U, 791689088U, 855649472U, 805351680U, 941666656U, 746598224U, 2969580192U, 2975870976U, 797980864U, 840969248U, 805351648U, 964735328U, 734015424U, 2967482816U, 2975870976U, 804272672U, 828386144U, 2954932416U, 987804000U, 721426448U, 2965385504U, 2973806624U, 405289888U, 817900160U, 2954932384U, 1012969792U, 706746424U, 2963288192U, 2971709472U, 406338848U, 809511360U, 2957029504U, 1038135552U, 694163528U, 2961190912U, 2969612352U, 407387840U, 2956994816U, 2957029472U, 1065398464U, 679483480U, 2959126656U, 2963320928U, 408954912U, 2963286048U, 45152U, 2973773824U, 773862656U, 889204256U, 805351744U, 899723616U, 767569536U, 2973774976U, 2975870976U, 782251584U, 872426880U, 805351712U, 920695136U, 757083888U, 2971677568U, 2975870976U, 791689088U, 855649472U, 805351680U, 941666656U, 746598224U, 2969580192U, 2975870976U, 797980864U, 840969248U, 805351648U, 964735328U, 734015424U, 2967482816U, 2975870976U, 804272672U, 828386144U, 2954932416U, 987804000U, 721426448U, 2965385504U, 2973806624U, 405289888U, 817900160U, 2954932384U, 1012969792U, 706746424U, 2963288192U, 2971709472U, 406338848U, 809511360U, 2957029504U, 1038135552U, 694163528U, 2961190912U, 2969612352U, 407387840U, 2956994816U, 2957029472U, 1065398464U, 679483480U, 2959126656U, 2963320928U, 408954912U, 2963286048U, 45152U, 910176256U, 754986176U, 805320256U, 754988736U, 901786816U, 926953472U, 746597696U, 805320000U, 763377600U, 885009472U, 943730688U, 734014976U, 805319744U, 773863616U, 872426368U, 964702208U, 725626432U, 805319552U, 778058240U, 859843328U, 981479488U, 713043616U, 809513728U, 784350016U, 847260224U, 1006645312U, 704655040U, 813707840U, 784350336U, 838871488U, 1027616896U, 692072192U, 813707712U, 790642112U, 830482688U, 1048588480U, 679489344U, 817901888U, 790642496U, 826288192U, 671101184U, 671100672U, 12544U, 910176256U, 754986176U, 805320256U, 754988736U, 901786816U, 926953472U, 746597696U, 805320000U, 763377600U, 885009472U, 943730688U, 734014976U, 805319744U, 773863616U, 872426368U, 964702208U, 725626432U, 805319552U, 778058240U, 859843328U, 981479488U, 713043616U, 809513728U, 784350016U, 847260224U, 1006645312U, 704655040U, 813707840U, 784350336U, 838871488U, 1027616896U, 692072192U, 813707712U, 790642112U, 830482688U, 1048588480U, 679489344U, 817901888U, 790642496U, 826288192U, 671101184U, 671100672U, 12544U}; static struct color_conversion const ntsc_m_csc_composite = {818U, 301U, 2003U, 260U, 1843U, 1325U, 1479U, 512U, 832U, 780U, 1744U, 512U}; static struct video_levels const ntsc_m_levels_composite = {225, 267, 113}; static struct color_conversion const ntsc_m_csc_svideo = {818U, 301U, 2003U, 307U, 1898U, 1380U, 781U, 512U, 890U, 829U, 1782U, 512U}; static struct video_levels const ntsc_m_levels_svideo = {266, 316, 133}; static struct color_conversion const ntsc_j_csc_composite = {818U, 301U, 2003U, 281U, 1868U, 1350U, 1516U, 512U, 858U, 802U, 1761U, 512U}; static struct video_levels const ntsc_j_levels_composite = {225, 225, 113}; static struct color_conversion const ntsc_j_csc_svideo = {818U, 301U, 2003U, 332U, 1928U, 1409U, 802U, 512U, 921U, 854U, 1802U, 512U}; static struct video_levels const ntsc_j_levels_svideo = {266, 266, 133}; static struct color_conversion const pal_csc_composite = {818U, 301U, 2003U, 275U, 1861U, 1343U, 1505U, 512U, 851U, 796U, 1756U, 512U}; static struct video_levels const pal_levels_composite = {237, 237, 118}; static struct color_conversion const pal_csc_svideo = {818U, 301U, 2003U, 325U, 1920U, 1401U, 796U, 512U, 912U, 847U, 1797U, 512U}; static struct video_levels const pal_levels_svideo = {280, 280, 139}; static struct color_conversion const pal_m_csc_composite = {818U, 301U, 2003U, 260U, 1843U, 1325U, 1479U, 512U, 832U, 780U, 1744U, 512U}; static struct video_levels const pal_m_levels_composite = {225, 267, 113}; static struct color_conversion const pal_m_csc_svideo = {818U, 301U, 2003U, 307U, 1898U, 1380U, 781U, 512U, 890U, 829U, 1782U, 512U}; static struct video_levels const pal_m_levels_svideo = {266, 316, 133}; static struct color_conversion const pal_n_csc_composite = {818U, 301U, 2003U, 260U, 1843U, 1325U, 1479U, 512U, 832U, 780U, 1744U, 512U}; static struct video_levels const pal_n_levels_composite = {225, 267, 118}; static struct color_conversion const pal_n_csc_svideo = {818U, 301U, 2003U, 307U, 1898U, 1380U, 781U, 512U, 890U, 829U, 1782U, 512U}; static struct video_levels const pal_n_levels_svideo = {266, 316, 139}; static struct color_conversion const sdtv_csc_yprpb = {818U, 301U, 2003U, 325U, 1369U, 851U, 256U, 512U, 256U, 941U, 1869U, 512U}; static struct color_conversion const hdtv_csc_yprpb = {1459U, 366U, 1832U, 325U, 2005U, 907U, 256U, 512U, 256U, 977U, 1724U, 512U}; static struct video_levels const component_levels = {279, 279, 0}; static struct tv_mode const tv_modes[12U] = { {"NTSC-M", 108000, 59940, 786432U, 64, 836, 124, 857, 0, 0, 0, 6, 7, 6, 1, 0, 1, 18, 20, 21, 240, 1, 72, 34, 9, 240, 10, 240, 9, 240, 10, 240, 27456, 0, 135, 20800, 0, 16777216U, 0, & ntsc_m_levels_composite, & ntsc_m_levels_svideo, & ntsc_m_csc_composite, & ntsc_m_csc_svideo, (u32 const *)(& filter_table), 0}, {"NTSC-443", 108000, 59940, 786432U, 64, 836, 124, 857, 0, 0, 0, 6, 7, 6, 1, 0, 1, 18, 20, 21, 240, 1, 72, 34, 9, 240, 10, 240, 9, 240, 10, 240, 27456, 525, 168, 4093, 310, 50331648U, 0, & ntsc_m_levels_composite, & ntsc_m_levels_svideo, & ntsc_m_csc_composite, & ntsc_m_csc_svideo, (u32 const *)(& filter_table), 0}, {"NTSC-J", 108000, 59940, 786432U, 64, 836, 124, 857, 0, 0, 0, 6, 7, 6, 1, 0, 1, 18, 20, 21, 240, 1, 72, 34, 9, 240, 10, 240, 9, 240, 10, 240, 27456, 0, 135, 20800, 0, 16777216U, 0, & ntsc_j_levels_composite, & ntsc_j_levels_svideo, & ntsc_j_csc_composite, & ntsc_j_csc_svideo, (u32 const *)(& filter_table), 0}, {"PAL-M", 108000, 59940, 786432U, 64, 836, 124, 857, 0, 0, 0, 6, 7, 6, 1, 0, 1, 18, 20, 21, 240, 1, 72, 34, 9, 240, 10, 240, 9, 240, 10, 240, 27456, 0, 135, 16704, 0, 33554432U, 1, & pal_m_levels_composite, & pal_m_levels_svideo, & pal_m_csc_composite, & pal_m_csc_svideo, (u32 const *)(& filter_table), 0}, {"PAL-N", 108000, 50000, 786432U, 64, 844, 128, 863, 0, 0, 0, 6, 7, 6, 1, 0, 1, 18, 24, 25, 286, 1, 73, 34, 8, 285, 8, 286, 9, 286, 9, 285, 27648, 625, 135, 23578, 134, 33554432U, 1, & pal_n_levels_composite, & pal_n_levels_svideo, & pal_n_csc_composite, & pal_n_csc_svideo, (u32 const *)(& filter_table), 0}, {"PAL", 108000, 50000, 786432U, 64, 844, 142, 863, 0, 0, 0, 5, 6, 5, 1, 0, 1, 15, 24, 25, 286, 1, 73, 32, 8, 285, 8, 286, 9, 286, 9, 285, 27648, 625, 168, 4122, 67, 33554432U, 1, & pal_levels_composite, & pal_levels_svideo, & pal_csc_composite, & pal_csc_svideo, (u32 const *)(& filter_table), 0}, {"480p", 107520, 59940, 0U, 64, 842, 122, 857, 1, 0, 1, 12, 12, 12, 0, 0, 0, 0, 44, 44, 479, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0U, (_Bool)0, 0, 0, 0, 0, (u32 const *)(& filter_table), 0}, {"576p", 107520, 50000, 0U, 64, 859, 139, 863, 1, 0, 1, 10, 10, 10, 0, 0, 0, 0, 48, 48, 575, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0U, (_Bool)0, 0, 0, 0, 0, (u32 const *)(& filter_table), 0}, {"720p@60Hz", 148800, 60000, 262144U, 80, 1580, 300, 1649, 1, 1, 1, 10, 10, 10, 0, 0, 0, 0, 29, 29, 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0U, (_Bool)0, 0, 0, 0, 0, (u32 const *)(& filter_table), 0}, {"720p@50Hz", 148800, 50000, 262144U, 80, 1580, 300, 1979, 1, 1, 1, 10, 10, 10, 0, 0, 0, 0, 29, 29, 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0U, (_Bool)0, 0, 0, 0, 0, (u32 const *)(& filter_table), 800}, {"1080i@50Hz", 148800, 50000, 262144U, 88, 2155, 235, 2639, 0, 1, 1, 4, 5, 10, 1, 4, 4, 10, 21, 22, 539, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0U, (_Bool)0, 0, 0, 0, 0, (u32 const *)(& filter_table), 0}, {"1080i@60Hz", 148800, 60000, 262144U, 88, 2155, 235, 2199, 0, 1, 1, 4, 5, 10, 1, 4, 4, 10, 21, 22, 539, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0U, (_Bool)0, 0, 0, 0, 0, (u32 const *)(& filter_table), 0}}; static struct intel_tv *enc_to_tv(struct intel_encoder *encoder ) { struct intel_encoder const *__mptr ; { __mptr = (struct intel_encoder const *)encoder; return ((struct intel_tv *)__mptr); } } static struct intel_tv *intel_attached_tv(struct drm_connector *connector ) { struct intel_encoder *tmp ; struct intel_tv *tmp___0 ; { tmp = intel_attached_encoder(connector); tmp___0 = enc_to_tv(tmp); return (tmp___0); } } static bool intel_tv_get_hw_state(struct intel_encoder *encoder , enum pipe *pipe ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 tmp ; uint32_t tmp___0 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425984L, 1); tmp = tmp___0; if ((int )tmp >= 0) { return (0); } else { } *pipe = (enum pipe )((tmp & 1073741824U) >> 30); return (1); } } static void intel_enable_tv(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct drm_crtc const *__mptr ; uint32_t tmp ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_wait_for_vblank(encoder->base.dev, (int )((struct intel_crtc *)__mptr)->pipe); tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425984L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425984L, tmp | 2147483648U, 1); return; } } static void intel_disable_tv(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; uint32_t tmp ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425984L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425984L, tmp & 2147483647U, 1); return; } } static struct tv_mode const *intel_tv_mode_lookup(char const *tv_format ) { int i ; struct tv_mode const *tv_mode ; int tmp ; { i = 0; goto ldv_48421; ldv_48420: tv_mode = (struct tv_mode const *)(& tv_modes) + (unsigned long )i; tmp = strcmp(tv_format, tv_mode->name); if (tmp == 0) { return (tv_mode); } else { } i = i + 1; ldv_48421: ; if ((unsigned int )i <= 11U) { goto ldv_48420; } else { } return ((struct tv_mode const *)0); } } static struct tv_mode const *intel_tv_mode_find(struct intel_tv *intel_tv ) { struct tv_mode const *tmp ; { tmp = intel_tv_mode_lookup(intel_tv->tv_format); return (tmp); } } static enum drm_mode_status intel_tv_mode_valid(struct drm_connector *connector , struct drm_display_mode *mode ) { struct intel_tv *intel_tv ; struct intel_tv *tmp ; struct tv_mode const *tv_mode ; struct tv_mode const *tmp___0 ; long ret ; int __x___0 ; int tmp___2 ; { tmp = intel_attached_tv(connector); intel_tv = tmp; tmp___0 = intel_tv_mode_find(intel_tv); tv_mode = tmp___0; if ((unsigned long )tv_mode != (unsigned long )((struct tv_mode const *)0)) { tmp___2 = drm_mode_vrefresh((struct drm_display_mode const *)mode); __x___0 = (int )tv_mode->refresh + tmp___2 * -1000; ret = (long )(__x___0 < 0 ? - __x___0 : __x___0); if (ret <= 999L) { return (0); } else { } } else { } return (17); } } static void intel_tv_get_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { { pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; return; } } static bool intel_tv_compute_config(struct intel_encoder *encoder , struct intel_crtc_state *pipe_config ) { struct intel_tv *intel_tv ; struct intel_tv *tmp ; struct tv_mode const *tv_mode ; struct tv_mode const *tmp___0 ; long tmp___1 ; { tmp = enc_to_tv(encoder); intel_tv = tmp; tmp___0 = intel_tv_mode_find(intel_tv); tv_mode = tmp___0; if ((unsigned long )tv_mode == (unsigned long )((struct tv_mode const *)0)) { return (0); } else { } pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock; tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_tv_compute_config", "forcing bpc to 8 for TV\n"); } else { } pipe_config->pipe_bpp = 24; pipe_config->base.adjusted_mode.flags = 0U; return (1); } } static void set_tv_mode_timings(struct drm_i915_private *dev_priv , struct tv_mode const *tv_mode , bool burst_ena ) { u32 hctl1 ; u32 hctl2 ; u32 hctl3 ; u32 vctl1 ; u32 vctl2 ; u32 vctl3 ; u32 vctl4 ; u32 vctl5 ; u32 vctl6 ; u32 vctl7 ; { hctl1 = (u32 )((int )(tv_mode->hsync_end << 16) | (int )tv_mode->htotal); hctl2 = (u32 )((int )(tv_mode->hburst_start << 16) | (int )tv_mode->hburst_len); if ((int )burst_ena) { hctl2 = hctl2 | 2147483648U; } else { } hctl3 = (u32 )((int )tv_mode->hblank_start | (int )(tv_mode->hblank_end << 16)); vctl1 = (u32 )(((int )(tv_mode->nbr_end << 16) | (int )(tv_mode->vi_end_f1 << 8)) | (int )tv_mode->vi_end_f2); vctl2 = (u32 )(((int )(tv_mode->vsync_len << 16) | (int )(tv_mode->vsync_start_f1 << 8)) | (int )tv_mode->vsync_start_f2); vctl3 = (u32 )(((int )(tv_mode->veq_len << 16) | (int )(tv_mode->veq_start_f1 << 8)) | (int )tv_mode->veq_start_f2); if ((int )tv_mode->veq_ena) { vctl3 = vctl3 | 2147483648U; } else { } vctl4 = (u32 )((int )(tv_mode->vburst_start_f1 << 16) | (int )tv_mode->vburst_end_f1); vctl5 = (u32 )((int )(tv_mode->vburst_start_f2 << 16) | (int )tv_mode->vburst_end_f2); vctl6 = (u32 )((int )(tv_mode->vburst_start_f3 << 16) | (int )tv_mode->vburst_end_f3); vctl7 = (u32 )((int )(tv_mode->vburst_start_f4 << 16) | (int )tv_mode->vburst_end_f4); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426032L, hctl1, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426036L, hctl2, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426040L, hctl3, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426044L, vctl1, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426048L, vctl2, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426052L, vctl3, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426056L, vctl4, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426060L, vctl5, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426064L, vctl6, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426068L, vctl7, 1); return; } } static void set_color_conversion(struct drm_i915_private *dev_priv , struct color_conversion const *color_conversion ) { { if ((unsigned long )color_conversion == (unsigned long )((struct color_conversion const *)0)) { return; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426000L, (uint32_t )(((int )color_conversion->ry << 16) | (int )color_conversion->gy), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426004L, (uint32_t )(((int )color_conversion->by << 16) | (int )color_conversion->ay), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426008L, (uint32_t )(((int )color_conversion->ru << 16) | (int )color_conversion->gu), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426012L, (uint32_t )(((int )color_conversion->bu << 16) | (int )color_conversion->au), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426016L, (uint32_t )(((int )color_conversion->rv << 16) | (int )color_conversion->gv), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426020L, (uint32_t )(((int )color_conversion->bv << 16) | (int )color_conversion->av), 1); return; } } static void intel_tv_pre_enable(struct intel_encoder *encoder ) { struct drm_device *dev ; struct drm_i915_private *dev_priv ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct intel_tv *intel_tv ; struct intel_tv *tmp ; struct tv_mode const *tv_mode ; struct tv_mode const *tmp___0 ; u32 tv_ctl ; u32 scctl1 ; u32 scctl2 ; u32 scctl3 ; int i ; int j ; struct video_levels const *video_levels ; struct color_conversion const *color_conversion ; bool burst_ena ; int xpos ; int ypos ; unsigned int xsize ; unsigned int ysize ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; uint32_t tmp___5 ; { dev = encoder->base.dev; dev_priv = (struct drm_i915_private *)dev->dev_private; __mptr = (struct drm_crtc const *)encoder->base.crtc; intel_crtc = (struct intel_crtc *)__mptr; tmp = enc_to_tv(encoder); intel_tv = tmp; tmp___0 = intel_tv_mode_find(intel_tv); tv_mode = tmp___0; xpos = 0; ypos = 0; if ((unsigned long )tv_mode == (unsigned long )((struct tv_mode const *)0)) { return; } else { } tv_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425984L, 1); tv_ctl = tv_ctl & 4047U; switch (intel_tv->type) { default: ; case 0: ; case 5: tv_ctl = tv_ctl; video_levels = tv_mode->composite_levels; color_conversion = tv_mode->composite_color; burst_ena = tv_mode->burst_ena; goto ldv_48492; case 8: tv_ctl = tv_ctl | 536870912U; video_levels = & component_levels; if ((int )tv_mode->burst_ena) { color_conversion = & sdtv_csc_yprpb; } else { color_conversion = & hdtv_csc_yprpb; } burst_ena = 0; goto ldv_48492; case 6: tv_ctl = tv_ctl | 268435456U; video_levels = tv_mode->svideo_levels; color_conversion = tv_mode->svideo_color; burst_ena = tv_mode->burst_ena; goto ldv_48492; } ldv_48492: ; if ((int )intel_crtc->pipe == 1) { tv_ctl = tv_ctl | 1073741824U; } else { } tv_ctl = (u32 )tv_mode->oversample | tv_ctl; if ((int )tv_mode->progressive) { tv_ctl = tv_ctl | 131072U; } else { } if ((int )tv_mode->trilevel_sync) { tv_ctl = tv_ctl | 2097152U; } else { } if ((int )tv_mode->pal_burst) { tv_ctl = tv_ctl | 65536U; } else { } scctl1 = 0U; if ((int )tv_mode->dda1_inc != 0) { scctl1 = scctl1 | 2147483648U; } else { } if ((int )tv_mode->dda2_inc != 0) { scctl1 = scctl1 | 1073741824U; } else { } if ((int )tv_mode->dda3_inc != 0) { scctl1 = scctl1 | 536870912U; } else { } scctl1 = (u32 )tv_mode->sc_reset | scctl1; if ((unsigned long )video_levels != (unsigned long )((struct video_levels const *)0)) { scctl1 = (u32 )(video_levels->burst << 16) | scctl1; } else { } scctl1 = (u32 )tv_mode->dda1_inc | scctl1; scctl2 = (u32 )((int )(tv_mode->dda2_size << 16) | (int )tv_mode->dda2_inc); scctl3 = (u32 )((int )(tv_mode->dda3_size << 16) | (int )tv_mode->dda3_inc); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 9618U) { tv_ctl = tv_ctl | 3072U; } else { } set_tv_mode_timings(dev_priv, tv_mode, (int )burst_ena); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426080L, scctl1, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426084L, scctl2, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426088L, scctl3, 1); set_color_conversion(dev_priv, color_conversion); __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 3U) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426024L, 4210688U, 1); } else { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426024L, 6316032U, 1); } if ((unsigned long )video_levels != (unsigned long )((struct video_levels const *)0)) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426028L, (uint32_t )((int )(video_levels->black << 16) | (int )video_levels->blank), 1); } else { } assert_pipe(dev_priv, intel_crtc->pipe, 0); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426112L, 2147483648U, 1); xsize = (unsigned int )((int )tv_mode->hblank_start - (int )tv_mode->hblank_end); if ((int )tv_mode->progressive) { ysize = (unsigned int )((int )tv_mode->nbr_end + 1); } else { ysize = (unsigned int )((int )tv_mode->nbr_end * 2 + 1); } xpos = intel_tv->margin[0] + xpos; ypos = intel_tv->margin[1] + ypos; xsize = xsize - (unsigned int )(intel_tv->margin[0] + intel_tv->margin[2]); ysize = ysize - (unsigned int )(intel_tv->margin[1] + intel_tv->margin[3]); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426096L, (uint32_t )((xpos << 16) | ypos), 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 426100L, (xsize << 16) | ysize, 1); j = 0; i = 0; goto ldv_48508; ldv_48507: tmp___1 = j; j = j + 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i << 2) + 426240), *(tv_mode->filter_table + (unsigned long )tmp___1), 1); i = i + 1; ldv_48508: ; if (i <= 59) { goto ldv_48507; } else { } i = 0; goto ldv_48511; ldv_48510: tmp___2 = j; j = j + 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i << 2) + 426496), *(tv_mode->filter_table + (unsigned long )tmp___2), 1); i = i + 1; ldv_48511: ; if (i <= 59) { goto ldv_48510; } else { } i = 0; goto ldv_48514; ldv_48513: tmp___3 = j; j = j + 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i << 2) + 426752), *(tv_mode->filter_table + (unsigned long )tmp___3), 1); i = i + 1; ldv_48514: ; if (i <= 42) { goto ldv_48513; } else { } i = 0; goto ldv_48517; ldv_48516: tmp___4 = j; j = j + 1; (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, (off_t )((i << 2) + 427008), *(tv_mode->filter_table + (unsigned long )tmp___4), 1); i = i + 1; ldv_48517: ; if (i <= 42) { goto ldv_48516; } else { } tmp___5 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425988L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425988L, tmp___5 & 16776960U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425984L, tv_ctl, 1); return; } } static struct drm_display_mode const reported_modes[1U] = { {{0, 0}, {0U, 0U, 0}, {'N', 'T', 'S', 'C', ' ', '4', '8', '0', 'i', '\000'}, 0, 64U, 107520, 1280, 1368, 1496, 1712, 0, 1024, 1027, 1034, 1104, 0, 0U, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}; static int intel_tv_detect_type(struct intel_tv *intel_tv , struct drm_connector *connector ) { struct drm_encoder *encoder ; struct drm_crtc *crtc ; struct intel_crtc *intel_crtc ; struct drm_crtc const *__mptr ; struct drm_device *dev ; struct drm_i915_private *dev_priv ; u32 tv_ctl ; u32 save_tv_ctl ; u32 tv_dac ; u32 save_tv_dac ; int type ; struct drm_i915_private *__p ; struct drm_crtc const *__mptr___0 ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; struct drm_crtc const *__mptr___1 ; { encoder = & intel_tv->base.base; crtc = encoder->crtc; __mptr = (struct drm_crtc const *)crtc; intel_crtc = (struct intel_crtc *)__mptr; dev = encoder->dev; dev_priv = (struct drm_i915_private *)dev->dev_private; if ((int )connector->polled & 1) { spin_lock_irq(& dev_priv->irq_lock); i915_disable_pipestat(dev_priv, 0, 1028U); spin_unlock_irq(& dev_priv->irq_lock); } else { } tv_dac = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425988L, 1); save_tv_dac = tv_dac; tv_ctl = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425984L, 1); save_tv_ctl = tv_ctl; tv_ctl = tv_ctl & 2147483640U; tv_ctl = tv_ctl | 7U; if ((int )intel_crtc->pipe == 1) { tv_ctl = tv_ctl | 1073741824U; } else { tv_ctl = tv_ctl & 3221225471U; } tv_dac = tv_dac & 2415919040U; tv_dac = tv_dac | 251658410U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p->info.device_id) == 10818U) { tv_dac = tv_dac & 4043309055U; } else { } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425984L, tv_ctl, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425988L, tv_dac, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425988L, 0); __mptr___0 = (struct drm_crtc const *)intel_tv->base.base.crtc; intel_wait_for_vblank(intel_tv->base.base.dev, (int )((struct intel_crtc *)__mptr___0)->pipe); type = -1; tv_dac = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425988L, 1); tmp = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_tv_detect_type", "TV detected: %x, %x\n", tv_ctl, tv_dac); } else { } if ((tv_dac & 1879048192U) == 805306368U) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_tv_detect_type", "Detected Composite TV connection\n"); } else { } type = 5; } else if ((tv_dac & 1610612736U) == 1073741824U) { tmp___1 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_tv_detect_type", "Detected S-Video TV connection\n"); } else { } type = 6; } else if ((tv_dac & 1879048192U) == 0U) { tmp___2 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_tv_detect_type", "Detected Component TV connection\n"); } else { } type = 8; } else { tmp___3 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_tv_detect_type", "Unrecognised TV connection\n"); } else { } type = -1; } (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425988L, save_tv_dac & 4160749567U, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425984L, save_tv_ctl, 1); (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425984L, 0); __mptr___1 = (struct drm_crtc const *)intel_tv->base.base.crtc; intel_wait_for_vblank(intel_tv->base.base.dev, (int )((struct intel_crtc *)__mptr___1)->pipe); if ((int )connector->polled & 1) { spin_lock_irq(& dev_priv->irq_lock); i915_enable_pipestat(dev_priv, 0, 1028U); spin_unlock_irq(& dev_priv->irq_lock); } else { } return (type); } } static void intel_tv_find_better_format(struct drm_connector *connector ) { struct intel_tv *intel_tv ; struct intel_tv *tmp ; struct tv_mode const *tv_mode ; struct tv_mode const *tmp___0 ; int i ; { tmp = intel_attached_tv(connector); intel_tv = tmp; tmp___0 = intel_tv_mode_find(intel_tv); tv_mode = tmp___0; if ((intel_tv->type == 8) == (int )tv_mode->component_only) { return; } else { } i = 0; goto ldv_48555; ldv_48554: tv_mode = (struct tv_mode const *)(& tv_modes) + (unsigned long )i; if ((intel_tv->type == 8) == (int )tv_mode->component_only) { goto ldv_48553; } else { } i = i + 1; ldv_48555: ; if ((unsigned int )i <= 11U) { goto ldv_48554; } else { } ldv_48553: intel_tv->tv_format = tv_mode->name; drm_object_property_set_value(& connector->base, (connector->dev)->mode_config.tv_mode_property, (uint64_t )i); return; } } static enum drm_connector_status intel_tv_detect(struct drm_connector *connector , bool force ) { struct drm_display_mode mode ; struct intel_tv *intel_tv ; struct intel_tv *tmp ; enum drm_connector_status status ; int type ; long tmp___0 ; struct intel_load_detect_pipe tmp___1 ; struct drm_modeset_acquire_ctx ctx ; bool tmp___2 ; { tmp = intel_attached_tv(connector); intel_tv = tmp; tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_tv_detect", "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, (int )force); } else { } mode = reported_modes[0]; if ((int )force) { drm_modeset_acquire_init(& ctx, 0U); tmp___2 = intel_get_load_detect_pipe(connector, & mode, & tmp___1, & ctx); if ((int )tmp___2) { type = intel_tv_detect_type(intel_tv, connector); intel_release_load_detect_pipe(connector, & tmp___1, & ctx); status = type < 0 ? 2 : 1; } else { status = 3; } drm_modeset_drop_locks(& ctx); drm_modeset_acquire_fini(& ctx); } else { return (connector->status); } if ((unsigned int )status != 1U) { return (status); } else { } intel_tv->type = type; intel_tv_find_better_format(connector); return (1); } } static struct input_res const input_res_table[7U] = { {"640x480", 640, 480}, {"800x600", 800, 600}, {"1024x768", 1024, 768}, {"1280x1024", 1280, 1024}, {"848x480", 848, 480}, {"1280x720", 1280, 720}, {"1920x1080", 1920, 1080}}; static void intel_tv_chose_preferred_modes(struct drm_connector *connector , struct drm_display_mode *mode_ptr ) { struct intel_tv *intel_tv ; struct intel_tv *tmp ; struct tv_mode const *tv_mode ; struct tv_mode const *tmp___0 ; { tmp = intel_attached_tv(connector); intel_tv = tmp; tmp___0 = intel_tv_mode_find(intel_tv); tv_mode = tmp___0; if ((int )tv_mode->nbr_end <= 479 && mode_ptr->vdisplay == 480) { mode_ptr->type = mode_ptr->type | 8U; } else if ((int )tv_mode->nbr_end > 480) { if ((int )tv_mode->progressive && (int )tv_mode->nbr_end <= 719) { if (mode_ptr->vdisplay == 720) { mode_ptr->type = mode_ptr->type | 8U; } else { } } else if (mode_ptr->vdisplay == 1080) { mode_ptr->type = mode_ptr->type | 8U; } else { } } else { } return; } } static int intel_tv_get_modes(struct drm_connector *connector ) { struct drm_display_mode *mode_ptr ; struct intel_tv *intel_tv ; struct intel_tv *tmp ; struct tv_mode const *tv_mode ; struct tv_mode const *tmp___0 ; int j ; int count ; u64 tmp___1 ; struct input_res const *input ; unsigned int hactive_s ; unsigned int vactive_s ; { tmp = intel_attached_tv(connector); intel_tv = tmp; tmp___0 = intel_tv_mode_find(intel_tv); tv_mode = tmp___0; count = 0; j = 0; goto ldv_48594; ldv_48593: input = (struct input_res const *)(& input_res_table) + (unsigned long )j; hactive_s = (unsigned int )input->w; vactive_s = (unsigned int )input->h; if ((int )tv_mode->max_srcw != 0 && (int )input->w > (int )tv_mode->max_srcw) { goto ldv_48592; } else { } if ((int )input->w > 1024 && (! ((_Bool )tv_mode->progressive) && ! ((_Bool )tv_mode->component_only))) { goto ldv_48592; } else { } mode_ptr = drm_mode_create(connector->dev); if ((unsigned long )mode_ptr == (unsigned long )((struct drm_display_mode *)0)) { goto ldv_48592; } else { } strncpy((char *)(& mode_ptr->name), input->name, 32UL); mode_ptr->hdisplay = (int )hactive_s; mode_ptr->hsync_start = (int )(hactive_s + 1U); mode_ptr->hsync_end = (int )(hactive_s + 64U); if (mode_ptr->hsync_end <= mode_ptr->hsync_start) { mode_ptr->hsync_end = mode_ptr->hsync_start + 1; } else { } mode_ptr->htotal = (int )(hactive_s + 96U); mode_ptr->vdisplay = (int )vactive_s; mode_ptr->vsync_start = (int )(vactive_s + 1U); mode_ptr->vsync_end = (int )(vactive_s + 32U); if (mode_ptr->vsync_end <= mode_ptr->vsync_start) { mode_ptr->vsync_end = mode_ptr->vsync_start + 1; } else { } mode_ptr->vtotal = (int )(vactive_s + 33U); tmp___1 = (unsigned long long )tv_mode->refresh * (unsigned long long )mode_ptr->vtotal; tmp___1 = (u64 )mode_ptr->htotal * tmp___1; tmp___1 = div_u64(tmp___1, 1000000U); mode_ptr->clock = (int )tmp___1; mode_ptr->type = 64U; intel_tv_chose_preferred_modes(connector, mode_ptr); drm_mode_probed_add(connector, mode_ptr); count = count + 1; ldv_48592: j = j + 1; ldv_48594: ; if ((unsigned int )j <= 6U) { goto ldv_48593; } else { } return (count); } } static void intel_tv_destroy(struct drm_connector *connector ) { { drm_connector_cleanup(connector); kfree((void const *)connector); return; } } static int intel_tv_set_property(struct drm_connector *connector , struct drm_property *property , uint64_t val ) { struct drm_device *dev ; struct intel_tv *intel_tv ; struct intel_tv *tmp ; struct drm_crtc *crtc ; int ret ; bool changed ; int tmp___0 ; { dev = connector->dev; tmp = intel_attached_tv(connector); intel_tv = tmp; crtc = intel_tv->base.base.crtc; ret = 0; changed = 0; ret = drm_object_property_set_value(& connector->base, property, val); if (ret < 0) { goto out; } else { } if ((unsigned long )dev->mode_config.tv_left_margin_property == (unsigned long )property && (uint64_t )intel_tv->margin[0] != val) { intel_tv->margin[0] = (int )val; changed = 1; } else if ((unsigned long )dev->mode_config.tv_right_margin_property == (unsigned long )property && (uint64_t )intel_tv->margin[2] != val) { intel_tv->margin[2] = (int )val; changed = 1; } else if ((unsigned long )dev->mode_config.tv_top_margin_property == (unsigned long )property && (uint64_t )intel_tv->margin[1] != val) { intel_tv->margin[1] = (int )val; changed = 1; } else if ((unsigned long )dev->mode_config.tv_bottom_margin_property == (unsigned long )property && (uint64_t )intel_tv->margin[3] != val) { intel_tv->margin[3] = (int )val; changed = 1; } else if ((unsigned long )dev->mode_config.tv_mode_property == (unsigned long )property) { if (val > 11ULL) { ret = -22; goto out; } else { } tmp___0 = strcmp(intel_tv->tv_format, tv_modes[val].name); if (tmp___0 == 0) { goto out; } else { } intel_tv->tv_format = tv_modes[val].name; changed = 1; } else { ret = -22; goto out; } if ((int )changed && (unsigned long )crtc != (unsigned long )((struct drm_crtc *)0)) { intel_crtc_restore_mode(crtc); } else { } out: ; return (ret); } } static struct drm_connector_funcs const intel_tv_connector_funcs = {& intel_connector_dpms, 0, 0, 0, & intel_tv_detect, & drm_helper_probe_single_connector_modes, & intel_tv_set_property, & intel_tv_destroy, 0, & drm_atomic_helper_connector_duplicate_state, & drm_atomic_helper_connector_destroy_state, 0, & intel_connector_atomic_get_property}; static struct drm_connector_helper_funcs const intel_tv_connector_helper_funcs = {& intel_tv_get_modes, & intel_tv_mode_valid, & intel_best_encoder}; static struct drm_encoder_funcs const intel_tv_enc_funcs = {0, & intel_encoder_destroy}; static int tv_is_present_in_vbt(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; union child_device_config *p_child ; int i ; int ret ; { dev_priv = (struct drm_i915_private *)dev->dev_private; if (dev_priv->vbt.child_dev_num == 0) { return (1); } else { } ret = 0; i = 0; goto ldv_48630; ldv_48629: p_child = dev_priv->vbt.child_dev + (unsigned long )i; switch ((int )p_child->old.device_type) { case 4105: ; case 9: ; case 1545: ; goto ldv_48625; default: ; goto ldv_48627; } ldv_48625: ; if ((unsigned int )p_child->old.addin_offset != 0U) { ret = 1; goto ldv_48628; } else { } ldv_48627: i = i + 1; ldv_48630: ; if (dev_priv->vbt.child_dev_num > i) { goto ldv_48629; } else { } ldv_48628: ; return (ret); } } void intel_tv_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_connector *connector ; struct intel_tv *intel_tv ; struct intel_encoder *intel_encoder ; struct intel_connector *intel_connector ; u32 tv_dac_on ; u32 tv_dac_off ; u32 save_tv_dac ; char *tv_format_names___0[12U] ; int i ; int initial_mode ; uint32_t tmp ; long tmp___0 ; int tmp___1 ; void *tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; initial_mode = 0; tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425984L, 1); if ((tmp & 48U) == 32U) { return; } else { } tmp___1 = tv_is_present_in_vbt(dev); if (tmp___1 == 0) { tmp___0 = ldv__builtin_expect((drm_debug & 4U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_tv_init", "Integrated TV is not present.\n"); } else { } return; } else { } if ((unsigned int )*((unsigned char *)dev_priv + 41280UL) == 0U) { return; } else { } save_tv_dac = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425988L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425988L, save_tv_dac | 134217728U, 1); tv_dac_on = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425988L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425988L, save_tv_dac & 4160749567U, 1); tv_dac_off = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 425988L, 1); (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 425988L, save_tv_dac, 1); if ((tv_dac_on & 134217728U) == 0U || (tv_dac_off & 134217728U) != 0U) { return; } else { } tmp___2 = kzalloc(1184UL, 208U); intel_tv = (struct intel_tv *)tmp___2; if ((unsigned long )intel_tv == (unsigned long )((struct intel_tv *)0)) { return; } else { } intel_connector = intel_connector_alloc(); if ((unsigned long )intel_connector == (unsigned long )((struct intel_connector *)0)) { kfree((void const *)intel_tv); return; } else { } intel_encoder = & intel_tv->base; connector = & intel_connector->base; intel_connector->polled = 2U; drm_connector_init(dev, connector, & intel_tv_connector_funcs, 6); drm_encoder_init(dev, & intel_encoder->base, & intel_tv_enc_funcs, 4); intel_encoder->compute_config = & intel_tv_compute_config; intel_encoder->get_config = & intel_tv_get_config; intel_encoder->pre_enable = & intel_tv_pre_enable; intel_encoder->enable = & intel_enable_tv; intel_encoder->disable = & intel_disable_tv; intel_encoder->get_hw_state = & intel_tv_get_hw_state; intel_connector->get_hw_state = & intel_connector_get_hw_state; intel_connector->unregister = & intel_connector_unregister; intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = 5; intel_encoder->crtc_mask = 3; intel_encoder->cloneable = 0U; intel_encoder->base.possible_crtcs = 3U; intel_tv->type = 0; intel_tv->margin[0] = 54; intel_tv->margin[1] = 36; intel_tv->margin[2] = 46; intel_tv->margin[3] = 37; intel_tv->tv_format = tv_modes[initial_mode].name; drm_connector_helper_add(connector, & intel_tv_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; i = 0; goto ldv_48651; ldv_48650: tv_format_names___0[i] = (char *)tv_modes[i].name; i = i + 1; ldv_48651: ; if ((unsigned int )i <= 11U) { goto ldv_48650; } else { } drm_mode_create_tv_properties(dev, 12U, (char **)(& tv_format_names___0)); drm_object_attach_property(& connector->base, dev->mode_config.tv_mode_property, (uint64_t )initial_mode); drm_object_attach_property(& connector->base, dev->mode_config.tv_left_margin_property, (uint64_t )intel_tv->margin[0]); drm_object_attach_property(& connector->base, dev->mode_config.tv_top_margin_property, (uint64_t )intel_tv->margin[1]); drm_object_attach_property(& connector->base, dev->mode_config.tv_right_margin_property, (uint64_t )intel_tv->margin[2]); drm_object_attach_property(& connector->base, dev->mode_config.tv_bottom_margin_property, (uint64_t )intel_tv->margin[3]); drm_connector_register(connector); return; } } extern int ldv_probe_24(void) ; extern int ldv_probe_26(void) ; void ldv_initialize_drm_connector_helper_funcs_25(void) { void *tmp ; { tmp = ldv_init_zalloc(936UL); intel_tv_connector_helper_funcs_group0 = (struct drm_connector *)tmp; return; } } void ldv_initialize_drm_connector_funcs_26(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(104UL); intel_tv_connector_funcs_group0 = (struct drm_property *)tmp; tmp___0 = ldv_init_zalloc(936UL); intel_tv_connector_funcs_group1 = (struct drm_connector *)tmp___0; return; } } void ldv_main_exported_25(void) { struct drm_display_mode *ldvarg468 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(208UL); ldvarg468 = (struct drm_display_mode *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_25 == 1) { intel_tv_get_modes(intel_tv_connector_helper_funcs_group0); ldv_state_variable_25 = 1; } else { } goto ldv_48670; case 1: ; if (ldv_state_variable_25 == 1) { intel_tv_mode_valid(intel_tv_connector_helper_funcs_group0, ldvarg468); ldv_state_variable_25 = 1; } else { } goto ldv_48670; case 2: ; if (ldv_state_variable_25 == 1) { intel_best_encoder(intel_tv_connector_helper_funcs_group0); ldv_state_variable_25 = 1; } else { } goto ldv_48670; default: ldv_stop(); } ldv_48670: ; return; } } void ldv_main_exported_24(void) { struct drm_encoder *ldvarg178 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(96UL); ldvarg178 = (struct drm_encoder *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_24 == 2) { intel_encoder_destroy(ldvarg178); ldv_state_variable_24 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48679; case 1: ; if (ldv_state_variable_24 == 1) { ldv_probe_24(); ldv_state_variable_24 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48679; default: ldv_stop(); } ldv_48679: ; return; } } void ldv_main_exported_26(void) { struct drm_connector_state *ldvarg261 ; void *tmp ; bool ldvarg260 ; uint64_t *ldvarg255 ; void *tmp___0 ; uint32_t ldvarg258 ; int ldvarg262 ; uint64_t ldvarg259 ; uint32_t ldvarg257 ; struct drm_connector_state *ldvarg256 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(32UL); ldvarg261 = (struct drm_connector_state *)tmp; tmp___0 = ldv_init_zalloc(8UL); ldvarg255 = (uint64_t *)tmp___0; tmp___1 = ldv_init_zalloc(32UL); ldvarg256 = (struct drm_connector_state *)tmp___1; ldv_memset((void *)(& ldvarg260), 0, 1UL); ldv_memset((void *)(& ldvarg258), 0, 4UL); ldv_memset((void *)(& ldvarg262), 0, 4UL); ldv_memset((void *)(& ldvarg259), 0, 8UL); ldv_memset((void *)(& ldvarg257), 0, 4UL); tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_26 == 2) { intel_connector_dpms(intel_tv_connector_funcs_group1, ldvarg262); ldv_state_variable_26 = 2; } else { } if (ldv_state_variable_26 == 1) { intel_connector_dpms(intel_tv_connector_funcs_group1, ldvarg262); ldv_state_variable_26 = 1; } else { } goto ldv_48694; case 1: ; if (ldv_state_variable_26 == 2) { drm_atomic_helper_connector_destroy_state(intel_tv_connector_funcs_group1, ldvarg261); ldv_state_variable_26 = 2; } else { } if (ldv_state_variable_26 == 1) { drm_atomic_helper_connector_destroy_state(intel_tv_connector_funcs_group1, ldvarg261); ldv_state_variable_26 = 1; } else { } goto ldv_48694; case 2: ; if (ldv_state_variable_26 == 2) { drm_atomic_helper_connector_duplicate_state(intel_tv_connector_funcs_group1); ldv_state_variable_26 = 2; } else { } if (ldv_state_variable_26 == 1) { drm_atomic_helper_connector_duplicate_state(intel_tv_connector_funcs_group1); ldv_state_variable_26 = 1; } else { } goto ldv_48694; case 3: ; if (ldv_state_variable_26 == 2) { intel_tv_detect(intel_tv_connector_funcs_group1, (int )ldvarg260); ldv_state_variable_26 = 2; } else { } if (ldv_state_variable_26 == 1) { intel_tv_detect(intel_tv_connector_funcs_group1, (int )ldvarg260); ldv_state_variable_26 = 1; } else { } goto ldv_48694; case 4: ; if (ldv_state_variable_26 == 2) { intel_tv_set_property(intel_tv_connector_funcs_group1, intel_tv_connector_funcs_group0, ldvarg259); ldv_state_variable_26 = 2; } else { } if (ldv_state_variable_26 == 1) { intel_tv_set_property(intel_tv_connector_funcs_group1, intel_tv_connector_funcs_group0, ldvarg259); ldv_state_variable_26 = 1; } else { } goto ldv_48694; case 5: ; if (ldv_state_variable_26 == 2) { intel_tv_destroy(intel_tv_connector_funcs_group1); ldv_state_variable_26 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_48694; case 6: ; if (ldv_state_variable_26 == 2) { drm_helper_probe_single_connector_modes(intel_tv_connector_funcs_group1, ldvarg258, ldvarg257); ldv_state_variable_26 = 2; } else { } if (ldv_state_variable_26 == 1) { drm_helper_probe_single_connector_modes(intel_tv_connector_funcs_group1, ldvarg258, ldvarg257); ldv_state_variable_26 = 1; } else { } goto ldv_48694; case 7: ; if (ldv_state_variable_26 == 2) { intel_connector_atomic_get_property(intel_tv_connector_funcs_group1, (struct drm_connector_state const *)ldvarg256, intel_tv_connector_funcs_group0, ldvarg255); ldv_state_variable_26 = 2; } else { } if (ldv_state_variable_26 == 1) { intel_connector_atomic_get_property(intel_tv_connector_funcs_group1, (struct drm_connector_state const *)ldvarg256, intel_tv_connector_funcs_group0, ldvarg255); ldv_state_variable_26 = 1; } else { } goto ldv_48694; case 8: ; if (ldv_state_variable_26 == 1) { ldv_probe_26(); ldv_state_variable_26 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_48694; default: ldv_stop(); } ldv_48694: ; return; } } bool ldv_queue_work_on_1035(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_1036(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_1037(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_1038(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_1039(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_1049(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_1051(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_1050(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_1053(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_1052(struct workqueue_struct *ldv_func_arg1 ) ; void i915_check_vgpu(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; uint64_t magic ; uint32_t version ; struct drm_i915_private *__p ; unsigned long tmp___0 ; unsigned short tmp___1 ; unsigned short tmp___2 ; { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { return; } else { } tmp___0 = readq((void const volatile *)dev_priv->regs + 491520U); magic = (uint64_t )tmp___0; if (magic != 5149395889849386870ULL) { return; } else { } tmp___1 = readw((void const volatile *)dev_priv->regs + 491528U); tmp___2 = readw((void const volatile *)dev_priv->regs + 491530U); version = (uint32_t )(((int )tmp___1 << 16) | (int )tmp___2); if (version != 65536U) { printk("\016[drm] VGT interface version mismatch!\n"); return; } else { } dev_priv->vgpu.active = 1; printk("\016[drm] Virtual GPU for Intel GVT-g detected.\n"); return; } } static struct _balloon_info_ bl_info ; void intel_vgt_deballoon(void) { int i ; long tmp ; { tmp = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp != 0L) { drm_ut_debug_printk("intel_vgt_deballoon", "VGT deballoon.\n"); } else { } i = 0; goto ldv_48026; ldv_48025: ; if ((unsigned int )bl_info.space[i].allocated != 0U) { drm_mm_remove_node((struct drm_mm_node *)(& bl_info.space) + (unsigned long )i); } else { } i = i + 1; ldv_48026: ; if (i <= 3) { goto ldv_48025; } else { } memset((void *)(& bl_info), 0, 288UL); return; } } static int vgt_balloon_space(struct drm_mm *mm , struct drm_mm_node *node , unsigned long start , unsigned long end ) { unsigned long size ; int tmp ; { size = end - start; if (start == end) { return (-22); } else { } printk("\016[drm] balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", start, end, size / 1024UL); node->start = (u64 )start; node->size = (u64 )size; tmp = drm_mm_reserve_node(mm, node); return (tmp); } } int intel_vgt_balloon(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct drm_i915_private *tmp ; struct i915_address_space *ggtt_vm ; unsigned long ggtt_vm_end ; unsigned long mappable_base ; unsigned long mappable_size ; unsigned long mappable_end ; unsigned long unmappable_base ; unsigned long unmappable_size ; unsigned long unmappable_end ; int ret ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; { tmp = to_i915((struct drm_device const *)dev); dev_priv = tmp; ggtt_vm = & dev_priv->gtt.base; ggtt_vm_end = ggtt_vm->start + ggtt_vm->total; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 491584L, 1); mappable_base = (unsigned long )tmp___0; tmp___1 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 491588L, 1); mappable_size = (unsigned long )tmp___1; tmp___2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 491592L, 1); unmappable_base = (unsigned long )tmp___2; tmp___3 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 491596L, 1); unmappable_size = (unsigned long )tmp___3; mappable_end = mappable_base + mappable_size; unmappable_end = unmappable_base + unmappable_size; printk("\016[drm] VGT ballooning configuration:\n"); printk("\016[drm] Mappable graphic memory: base 0x%lx size %ldKiB\n", mappable_base, mappable_size / 1024UL); printk("\016[drm] Unmappable graphic memory: base 0x%lx size %ldKiB\n", unmappable_base, unmappable_size / 1024UL); if (((ggtt_vm->start > mappable_base || dev_priv->gtt.mappable_end < mappable_end) || dev_priv->gtt.mappable_end > unmappable_base) || unmappable_end > ggtt_vm_end) { drm_err("Invalid ballooning configuration!\n"); return (-22); } else { } if (dev_priv->gtt.mappable_end < unmappable_base) { ret = vgt_balloon_space(& ggtt_vm->mm, (struct drm_mm_node *)(& bl_info.space) + 2UL, dev_priv->gtt.mappable_end, unmappable_base); if (ret != 0) { goto err; } else { } } else { } if (ggtt_vm_end - 4096UL > unmappable_end) { ret = vgt_balloon_space(& ggtt_vm->mm, (struct drm_mm_node *)(& bl_info.space) + 3UL, unmappable_end, ggtt_vm_end - 4096UL); if (ret != 0) { goto err; } else { } } else { } if (ggtt_vm->start < mappable_base) { ret = vgt_balloon_space(& ggtt_vm->mm, (struct drm_mm_node *)(& bl_info.space), ggtt_vm->start, mappable_base); if (ret != 0) { goto err; } else { } } else { } if (dev_priv->gtt.mappable_end > mappable_end) { ret = vgt_balloon_space(& ggtt_vm->mm, (struct drm_mm_node *)(& bl_info.space) + 1UL, mappable_end, dev_priv->gtt.mappable_end); if (ret != 0) { goto err; } else { } } else { } printk("\016[drm] VGT balloon successfully\n"); return (0); err: drm_err("VGT balloon fail\n"); intel_vgt_deballoon(); return (ret); } } bool ldv_queue_work_on_1049(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_1050(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_1051(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_1052(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_1053(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct workqueue_struct *__alloc_workqueue_key(char const * , unsigned int , int , struct lock_class_key * , char const * , ...) ; extern void destroy_workqueue(struct workqueue_struct * ) ; void ldv_destroy_workqueue_1068(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_destroy_workqueue_1069(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_destroy_workqueue_1070(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_destroy_workqueue_1073(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_destroy_workqueue_1074(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_destroy_workqueue_1075(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_queue_work_on_1063(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_1065(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_1064(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_1067(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_1066(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_flush_workqueue_1072(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_cancel_delayed_work_sync_1071(struct delayed_work *ldv_func_arg1 ) ; extern unsigned long pci_mem_start ; extern int release_resource(struct resource * ) ; extern void pci_iounmap(struct pci_dev * , void * ) ; extern void *pci_iomap(struct pci_dev * , int , unsigned long ) ; extern int arch_phys_wc_add(unsigned long , unsigned long ) ; extern void arch_phys_wc_del(int ) ; extern void kmem_cache_destroy(struct kmem_cache * ) ; extern async_cookie_t async_schedule(void (*)(void * , async_cookie_t ) , void * ) ; extern void unregister_shrinker(struct shrinker * ) ; extern resource_size_t pcibios_align_resource(void * , struct resource const * , resource_size_t , resource_size_t ) ; extern struct pci_dev *pci_get_domain_bus_and_slot(int , unsigned int , unsigned int ) ; __inline static struct pci_dev *pci_get_bus_and_slot(unsigned int bus , unsigned int devfn ) { struct pci_dev *tmp ; { tmp = pci_get_domain_bus_and_slot(0, bus, devfn); return (tmp); } } extern int pci_bus_write_config_dword(struct pci_bus * , unsigned int , int , u32 ) ; __inline static int pci_write_config_dword(struct pci_dev const *dev , int where , u32 val ) { int tmp ; { tmp = pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); return (tmp); } } extern int pci_bus_alloc_resource(struct pci_bus * , struct resource * , resource_size_t , resource_size_t , resource_size_t , unsigned long , resource_size_t (*)(void * , struct resource const * , resource_size_t , resource_size_t ) , void * ) ; extern void pci_disable_msi(struct pci_dev * ) ; extern int pci_enable_msi_range(struct pci_dev * , int , int ) ; __inline static int pci_enable_msi_exact(struct pci_dev *dev , int nvec ) { int rc ; int tmp ; { tmp = pci_enable_msi_range(dev, nvec, nvec); rc = tmp; if (rc < 0) { return (rc); } else { } return (0); } } extern int remove_conflicting_framebuffers(struct apertures_struct * , char const * , bool ) ; extern int drm_noop(struct drm_device * , void * , struct drm_file * ) ; extern int drm_vblank_init(struct drm_device * , int ) ; extern void drm_vblank_cleanup(struct drm_device * ) ; extern void drm_kms_helper_poll_init(struct drm_device * ) ; __inline static struct io_mapping *io_mapping_create_wc(resource_size_t base , unsigned long size ) { void *tmp ; { tmp = ioremap_wc(base, size); return ((struct io_mapping *)tmp); } } __inline static void io_mapping_free(struct io_mapping *mapping ) { { iounmap((void volatile *)mapping); return; } } extern void pm_qos_remove_request(struct pm_qos_request * ) ; extern struct consw const dummy_con ; extern struct consw const vga_con ; extern int con_is_bound(struct consw const * ) ; extern int do_unregister_con_driver(struct consw const * ) ; extern int do_take_over_console(struct consw const * , int , int , int ) ; extern int vga_client_register(struct pci_dev * , void * , void (*)(void * , bool ) , unsigned int (*)(void * , bool ) ) ; extern int pnp_range_reserved(resource_size_t , resource_size_t ) ; extern void vga_switcheroo_unregister_client(struct pci_dev * ) ; extern int vga_switcheroo_register_client(struct pci_dev * , struct vga_switcheroo_client_ops const * , bool ) ; extern int vga_switcheroo_process_delayed_switch(void) ; extern int acpi_video_register(void) ; extern void acpi_video_unregister(void) ; extern int unregister_oom_notifier(struct notifier_block * ) ; static int i915_getparam(struct drm_device *dev , void *data , struct drm_file *file_priv ) { struct drm_i915_private *dev_priv ; drm_i915_getparam_t *param ; int value ; bool tmp ; bool tmp___0 ; bool tmp___1 ; bool tmp___2 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; int tmp___3 ; bool tmp___4 ; bool tmp___5 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; long tmp___6 ; unsigned long tmp___7 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; param = (drm_i915_getparam_t *)data; switch (param->param) { case 1: ; case 2: ; case 3: ; return (-19); case 4: value = (int )(dev->pdev)->device; goto ldv_52756; case 32: value = (int )(dev->pdev)->revision; goto ldv_52756; case 5: value = 1; goto ldv_52756; case 6: value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; goto ldv_52756; case 7: value = (unsigned long )dev_priv->overlay != (unsigned long )((struct intel_overlay *)0); goto ldv_52756; case 8: value = 1; goto ldv_52756; case 9: value = 1; goto ldv_52756; case 10: tmp = intel_ring_initialized((struct intel_engine_cs *)(& dev_priv->ring) + 1UL); value = (int )tmp; goto ldv_52756; case 11: tmp___0 = intel_ring_initialized((struct intel_engine_cs *)(& dev_priv->ring) + 2UL); value = (int )tmp___0; goto ldv_52756; case 22: tmp___1 = intel_ring_initialized((struct intel_engine_cs *)(& dev_priv->ring) + 3UL); value = (int )tmp___1; goto ldv_52756; case 31: tmp___2 = intel_ring_initialized((struct intel_engine_cs *)(& dev_priv->ring) + 4UL); value = (int )tmp___2; goto ldv_52756; case 12: value = 1; goto ldv_52756; case 13: value = 1; goto ldv_52756; case 14: __p = to_i915((struct drm_device const *)dev); value = (unsigned int )((unsigned char )__p->info.gen) > 3U; goto ldv_52756; case 15: value = 1; goto ldv_52756; case 16: value = 1; goto ldv_52756; case 17: __p___0 = to_i915((struct drm_device const *)dev); value = (int )__p___0->info.has_llc; goto ldv_52756; case 27: __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 8U) { _L: /* CIL Label */ __p___4 = to_i915((struct drm_device const *)dev); if (__p___4->ellc_size != 0UL) { tmp___3 = 1; } else { tmp___3 = 0; } } else { tmp___3 = 0; } } else { tmp___3 = 0; } } value = tmp___3; goto ldv_52756; case 18: value = i915.enable_ppgtt; goto ldv_52756; case 19: value = 1; goto ldv_52756; case 20: tmp___4 = i915_semaphore_is_enabled(dev); value = (int )tmp___4; goto ldv_52756; case 21: value = 1; goto ldv_52756; case 23: tmp___5 = capable(21); value = (int )tmp___5; goto ldv_52756; case 24: value = 1; goto ldv_52756; case 25: value = 1; goto ldv_52756; case 26: value = 1; goto ldv_52756; case 28: value = i915_cmd_parser_get_version(); goto ldv_52756; case 29: value = 1; goto ldv_52756; case 30: value = 1; goto ldv_52756; case 33: __p___5 = to_i915((struct drm_device const *)dev); value = (int )__p___5->info.subslice_total; if (value == 0) { return (-19); } else { } goto ldv_52756; case 34: __p___6 = to_i915((struct drm_device const *)dev); value = (int )__p___6->info.eu_total; if (value == 0) { return (-19); } else { } goto ldv_52756; default: tmp___6 = ldv__builtin_expect((long )((int )drm_debug) & 1L, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("i915_getparam", "Unknown parameter %d\n", param->param); } else { } return (-22); } ldv_52756: tmp___7 = copy_to_user((void *)param->value, (void const *)(& value), 4UL); if (tmp___7 != 0UL) { drm_err("copy_to_user failed\n"); return (-14); } else { } return (0); } } static int i915_setparam(struct drm_device *dev , void *data , struct drm_file *file_priv ) { struct drm_i915_private *dev_priv ; drm_i915_setparam_t *param ; long tmp ; { dev_priv = (struct drm_i915_private *)dev->dev_private; param = (drm_i915_setparam_t *)data; switch (param->param) { case 1: ; case 2: ; case 3: ; return (-19); case 4: ; if (param->value > dev_priv->num_fence_regs || param->value < 0) { return (-22); } else { } dev_priv->fence_reg_start = param->value; goto ldv_52848; default: tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_setparam", "unknown parameter %d\n", param->param); } else { } return (-22); } ldv_52848: ; return (0); } } static int i915_get_bridge_dev(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; { dev_priv = (struct drm_i915_private *)dev->dev_private; dev_priv->bridge_dev = pci_get_bus_and_slot(0U, 0U); if ((unsigned long )dev_priv->bridge_dev == (unsigned long )((struct pci_dev *)0)) { drm_err("bridge device not found\n"); return (-1); } else { } return (0); } } static int intel_alloc_mchbar_resource(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int reg ; struct drm_i915_private *__p ; u32 temp_lo ; u32 temp_hi ; u64 mchbar_addr ; int ret ; struct drm_i915_private *__p___0 ; int tmp ; long tmp___0 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); reg = (unsigned int )((unsigned char )__p->info.gen) > 3U ? 72 : 68; temp_hi = 0U; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) > 3U) { pci_read_config_dword((struct pci_dev const *)dev_priv->bridge_dev, reg + 4, & temp_hi); } else { } pci_read_config_dword((struct pci_dev const *)dev_priv->bridge_dev, reg, & temp_lo); mchbar_addr = ((unsigned long long )temp_hi << 32) | (unsigned long long )temp_lo; if (mchbar_addr != 0ULL) { tmp = pnp_range_reserved(mchbar_addr, mchbar_addr + 16384ULL); if (tmp != 0) { return (0); } else { } } else { } dev_priv->mch_res.name = "i915 MCHBAR"; dev_priv->mch_res.flags = 512UL; ret = pci_bus_alloc_resource((dev_priv->bridge_dev)->bus, & dev_priv->mch_res, 16384ULL, 16384ULL, (resource_size_t )pci_mem_start, 0UL, & pcibios_align_resource, (void *)dev_priv->bridge_dev); if (ret != 0) { tmp___0 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___0 != 0L) { drm_ut_debug_printk("intel_alloc_mchbar_resource", "failed bus alloc: %d\n", ret); } else { } dev_priv->mch_res.start = 0ULL; return (ret); } else { } __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___1->info.gen) > 3U) { pci_write_config_dword((struct pci_dev const *)dev_priv->bridge_dev, reg + 4, (unsigned int )(dev_priv->mch_res.start >> 32ULL)); } else { } pci_write_config_dword((struct pci_dev const *)dev_priv->bridge_dev, reg, (unsigned int )dev_priv->mch_res.start); return (0); } } static void intel_setup_mchbar(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int mchbar_reg ; struct drm_i915_private *__p ; u32 temp ; bool enabled ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; int tmp ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); mchbar_reg = (unsigned int )((unsigned char )__p->info.gen) > 3U ? 72 : 68; __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 45UL) != 0U) { return; } else { } dev_priv->mchbar_need_disable = 0; __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 44UL) != 0U) { pci_read_config_dword((struct pci_dev const *)dev_priv->bridge_dev, 84, & temp); enabled = (temp & 268435456U) != 0U; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___2->info.device_id) == 9618U) { pci_read_config_dword((struct pci_dev const *)dev_priv->bridge_dev, 84, & temp); enabled = (temp & 268435456U) != 0U; } else { pci_read_config_dword((struct pci_dev const *)dev_priv->bridge_dev, mchbar_reg, & temp); enabled = (temp & 1U) != 0U; } } if ((int )enabled) { return; } else { } tmp = intel_alloc_mchbar_resource(dev); if (tmp != 0) { return; } else { } dev_priv->mchbar_need_disable = 1; __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 44UL) != 0U) { pci_write_config_dword((struct pci_dev const *)dev_priv->bridge_dev, 84, temp | 268435456U); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___4->info.device_id) == 9618U) { pci_write_config_dword((struct pci_dev const *)dev_priv->bridge_dev, 84, temp | 268435456U); } else { pci_read_config_dword((struct pci_dev const *)dev_priv->bridge_dev, mchbar_reg, & temp); pci_write_config_dword((struct pci_dev const *)dev_priv->bridge_dev, mchbar_reg, temp | 1U); } } return; } } static void intel_teardown_mchbar(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int mchbar_reg ; struct drm_i915_private *__p ; u32 temp ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; __p = to_i915((struct drm_device const *)dev); mchbar_reg = (unsigned int )((unsigned char )__p->info.gen) > 3U ? 72 : 68; if ((int )dev_priv->mchbar_need_disable) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___0 + 44UL) != 0U) { pci_read_config_dword((struct pci_dev const *)dev_priv->bridge_dev, 84, & temp); temp = temp & 4026531839U; pci_write_config_dword((struct pci_dev const *)dev_priv->bridge_dev, 84, temp); } else { __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___1->info.device_id) == 9618U) { pci_read_config_dword((struct pci_dev const *)dev_priv->bridge_dev, 84, & temp); temp = temp & 4026531839U; pci_write_config_dword((struct pci_dev const *)dev_priv->bridge_dev, 84, temp); } else { pci_read_config_dword((struct pci_dev const *)dev_priv->bridge_dev, mchbar_reg, & temp); temp = temp & 4294967294U; pci_write_config_dword((struct pci_dev const *)dev_priv->bridge_dev, mchbar_reg, temp); } } } else { } if (dev_priv->mch_res.start != 0ULL) { release_resource(& dev_priv->mch_res); } else { } return; } } static unsigned int i915_vga_set_decode(void *cookie , bool state ) { struct drm_device *dev ; { dev = (struct drm_device *)cookie; intel_modeset_vga_set_state(dev, (int )state); if ((int )state) { return (15U); } else { return (12U); } } } static void i915_switcheroo_set_state(struct pci_dev *pdev , enum vga_switcheroo_state state ) { struct drm_device *dev ; void *tmp ; pm_message_t pmm ; { tmp = pci_get_drvdata(pdev); dev = (struct drm_device *)tmp; pmm.event = 2; if ((unsigned int )state == 1U) { printk("\016i915: switched on\n"); dev->switch_power_state = 2; pci_set_power_state(dev->pdev, 0); i915_resume_legacy(dev); dev->switch_power_state = 0; } else { printk("\vi915: switched off\n"); dev->switch_power_state = 2; i915_suspend_legacy(dev, pmm); dev->switch_power_state = 1; } return; } } static bool i915_switcheroo_can_switch(struct pci_dev *pdev ) { struct drm_device *dev ; void *tmp ; { tmp = pci_get_drvdata(pdev); dev = (struct drm_device *)tmp; return (dev->open_count == 0); } } static struct vga_switcheroo_client_ops const i915_switcheroo_ops = {& i915_switcheroo_set_state, (void (*)(struct pci_dev * ))0, & i915_switcheroo_can_switch}; static int i915_load_modeset_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; struct drm_i915_private *__p ; { dev_priv = (struct drm_i915_private *)dev->dev_private; ret = intel_parse_bios(dev); if (ret != 0) { printk("\016[drm] failed to find VBIOS tables\n"); } else { } ret = vga_client_register(dev->pdev, (void *)dev, (void (*)(void * , bool ))0, & i915_vga_set_decode); if (ret != 0 && ret != -19) { goto out; } else { } intel_register_dsm_handler(); ret = vga_switcheroo_register_client(dev->pdev, & i915_switcheroo_ops, 0); if (ret != 0) { goto cleanup_vga_client; } else { } ret = i915_gem_init_stolen(dev); if (ret != 0) { goto cleanup_vga_switcheroo; } else { } intel_power_domains_init_hw(dev_priv); ret = intel_irq_install(dev_priv); if (ret != 0) { goto cleanup_gem_stolen; } else { } intel_modeset_init(dev); ret = i915_gem_init(dev); if (ret != 0) { goto cleanup_irq; } else { } intel_modeset_gem_init(dev); dev->vblank_disable_allowed = 1; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 38UL) == 0U) { return (0); } else { } ret = intel_fbdev_init(dev); if (ret != 0) { goto cleanup_gem; } else { } intel_hpd_init(dev_priv); async_schedule(& intel_fbdev_initial_config, (void *)dev_priv); drm_kms_helper_poll_init(dev); return (0); cleanup_gem: mutex_lock_nested(& dev->struct_mutex, 0U); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); mutex_unlock(& dev->struct_mutex); cleanup_irq: drm_irq_uninstall(dev); cleanup_gem_stolen: i915_gem_cleanup_stolen(dev); cleanup_vga_switcheroo: vga_switcheroo_unregister_client(dev->pdev); cleanup_vga_client: vga_client_register(dev->pdev, (void *)0, (void (*)(void * , bool ))0, (unsigned int (*)(void * , bool ))0); out: ; return (ret); } } static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv ) { struct apertures_struct *ap ; struct pci_dev *pdev ; bool primary ; int ret ; { pdev = (dev_priv->dev)->pdev; ap = alloc_apertures(1U); if ((unsigned long )ap == (unsigned long )((struct apertures_struct *)0)) { return (-12); } else { } ap->ranges[0].base = dev_priv->gtt.mappable_base; ap->ranges[0].size = (resource_size_t )dev_priv->gtt.mappable_end; primary = (pdev->resource[6].flags & 2UL) != 0UL; ret = remove_conflicting_framebuffers(ap, "inteldrmfb", (int )primary); kfree((void const *)ap); return (ret); } } static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv ) { int ret ; int tmp ; { ret = 0; printk("\016[drm] Replacing VGA console driver\n"); console_lock(); tmp = con_is_bound(& vga_con); if (tmp != 0) { ret = do_take_over_console(& dummy_con, 0, 62, 1); } else { } if (ret == 0) { ret = do_unregister_con_driver(& vga_con); if (ret == -19) { ret = 0; } else { } } else { } console_unlock(); return (ret); } } static void i915_dump_device_info(struct drm_i915_private *dev_priv ) { struct intel_device_info const *info ; long tmp ; { info = & dev_priv->info; tmp = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp != 0L) { drm_ut_debug_printk("i915_dump_device_info", "i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", (int )info->gen, (int )((dev_priv->dev)->pdev)->device, (int )((dev_priv->dev)->pdev)->revision, (unsigned int )*((unsigned char *)info + 12UL) != 0U ? (char *)"is_mobile," : (char *)"", (unsigned int )*((unsigned char *)info + 12UL) != 0U ? (char *)"is_i85x," : (char *)"", (unsigned int )*((unsigned char *)info + 12UL) != 0U ? (char *)"is_i915g," : (char *)"", (unsigned int )*((unsigned char *)info + 12UL) != 0U ? (char *)"is_i945gm," : (char *)"", (unsigned int )*((unsigned char *)info + 12UL) != 0U ? (char *)"is_g33," : (char *)"", (unsigned int )*((unsigned char *)info + 12UL) != 0U ? (char *)"need_gfx_hws," : (char *)"", (unsigned int )*((unsigned char *)info + 12UL) != 0U ? (char *)"is_g4x," : (char *)"", (unsigned int )*((unsigned char *)info + 12UL) != 0U ? (char *)"is_pineview," : (char *)"", (unsigned int )*((unsigned char *)info + 13UL) != 0U ? (char *)"is_broadwater," : (char *)"", (unsigned int )*((unsigned char *)info + 13UL) != 0U ? (char *)"is_crestline," : (char *)"", (unsigned int )*((unsigned char *)info + 13UL) != 0U ? (char *)"is_ivybridge," : (char *)"", (unsigned int )*((unsigned char *)info + 13UL) != 0U ? (char *)"is_valleyview," : (char *)"", (unsigned int )*((unsigned char *)info + 13UL) != 0U ? (char *)"is_haswell," : (char *)"", (unsigned int )*((unsigned char *)info + 13UL) != 0U ? (char *)"is_skylake," : (char *)"", (unsigned int )*((unsigned char *)info + 13UL) != 0U ? (char *)"is_preliminary," : (char *)"", (unsigned int )*((unsigned char *)info + 13UL) != 0U ? (char *)"has_fbc," : (char *)"", (unsigned int )*((unsigned char *)info + 14UL) != 0U ? (char *)"has_pipe_cxsr," : (char *)"", (unsigned int )*((unsigned char *)info + 14UL) != 0U ? (char *)"has_hotplug," : (char *)"", (unsigned int )*((unsigned char *)info + 14UL) != 0U ? (char *)"cursor_needs_physical," : (char *)"", (unsigned int )*((unsigned char *)info + 14UL) != 0U ? (char *)"has_overlay," : (char *)"", (unsigned int )*((unsigned char *)info + 14UL) != 0U ? (char *)"overlay_needs_physical," : (char *)"", (unsigned int )*((unsigned char *)info + 14UL) != 0U ? (char *)"supports_tv," : (char *)"", (unsigned int )*((unsigned char *)info + 14UL) != 0U ? (char *)"has_llc," : (char *)"", (unsigned int )*((unsigned char *)info + 14UL) != 0U ? (char *)"has_ddi," : (char *)"", (unsigned int )*((unsigned char *)info + 15UL) != 0U ? (char *)"has_fpga_dbg," : (char *)""); } else { } return; } } static void cherryview_sseu_info_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_device_info *info ; u32 fuse ; u32 eu_dis ; unsigned int tmp ; unsigned int tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; info = (struct intel_device_info *)(& dev_priv->info); fuse = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 1581416L, 1); info->slice_total = 1U; if ((fuse & 1024U) == 0U) { info->subslice_per_slice = (u8 )((int )info->subslice_per_slice + 1); eu_dis = fuse & 16711680U; tmp = __arch_hweight32(eu_dis); info->eu_total = (unsigned int )((int )info->eu_total - (int )((u8 )tmp)) + 8U; } else { } if ((fuse & 2048U) == 0U) { info->subslice_per_slice = (u8 )((int )info->subslice_per_slice + 1); eu_dis = fuse & 4278190080U; tmp___0 = __arch_hweight32(eu_dis); info->eu_total = (unsigned int )((int )info->eu_total - (int )((u8 )tmp___0)) + 8U; } else { } info->subslice_total = info->subslice_per_slice; info->eu_per_subslice = (unsigned int )info->subslice_total != 0U ? (u8 )((int )info->eu_total / (int )info->subslice_total) : 0U; info->has_slice_pg = 0U; info->has_subslice_pg = (unsigned int )info->subslice_total > 1U; info->has_eu_pg = (unsigned int )info->eu_per_subslice > 2U; return; } } static void gen9_sseu_info_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_device_info *info ; int s_max ; int ss_max ; int eu_max ; int s ; int ss ; u32 fuse2 ; u32 s_enable ; u32 ss_disable ; u32 eu_disable ; u8 eu_mask ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; unsigned int tmp ; unsigned int tmp___0 ; int eu_per_ss ; unsigned int tmp___1 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; int tmp___2 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; s_max = 3; ss_max = 4; eu_max = 8; eu_mask = 255U; __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) == 0U) { __p___0 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___0->info.gen) == 9U) { s_max = 1; eu_max = 6; eu_mask = 63U; } else { } } else { } info = (struct intel_device_info *)(& dev_priv->info); fuse2 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 37152L, 1); s_enable = (fuse2 & 234881024U) >> 25; ss_disable = (fuse2 & 15728640U) >> 20; tmp = __arch_hweight32(s_enable); info->slice_total = (u8 )tmp; tmp___0 = __arch_hweight32(ss_disable); info->subslice_per_slice = (int )((u8 )ss_max) - (int )((u8 )tmp___0); info->subslice_total = (int )info->slice_total * (int )info->subslice_per_slice; s = 0; goto ldv_53040; ldv_53039: ; if (((u32 )(1 << s) & s_enable) == 0U) { goto ldv_53033; } else { } eu_disable = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, (off_t )((s + 9293) * 4), 1); ss = 0; goto ldv_53037; ldv_53036: ; if (((u32 )(1 << ss) & ss_disable) != 0U) { goto ldv_53035; } else { } tmp___1 = __arch_hweight8((eu_disable >> ss * 8) & (u32 )eu_mask); eu_per_ss = (int )((unsigned int )eu_max - tmp___1); if (eu_per_ss == 7) { info->subslice_7eu[s] = (u8 )((int )((signed char )info->subslice_7eu[s]) | (int )((signed char )(1 << ss))); } else { } info->eu_total = (int )info->eu_total + (int )((u8 )eu_per_ss); ldv_53035: ss = ss + 1; ldv_53037: ; if (ss < ss_max) { goto ldv_53036; } else { } ldv_53033: s = s + 1; ldv_53040: ; if (s < s_max) { goto ldv_53039; } else { } info->eu_per_subslice = (unsigned int )info->subslice_total != 0U ? (u8 )((((int )info->eu_total + (int )info->subslice_total) + -1) / (int )info->subslice_total) : 0U; __p___1 = to_i915((struct drm_device const *)dev); info->has_slice_pg = (unsigned char )((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U && (unsigned int )info->slice_total > 1U); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___2 + 45UL) == 0U) { __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___3->info.gen) == 9U) { if ((unsigned int )info->subslice_total > 1U) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } } else { tmp___2 = 0; } info->has_subslice_pg = (unsigned char )tmp___2; info->has_eu_pg = (unsigned int )info->eu_per_subslice > 2U; return; } } static void intel_device_info_runtime_init(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; struct intel_device_info *info ; enum pipe pipe ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; u32 fuse_strap ; uint32_t tmp ; u32 sfuse_strap ; uint32_t tmp___0 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; long tmp___8 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; info = (struct intel_device_info *)(& dev_priv->info); __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) == 0U) { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___4->info.gen) == 9U) { info->num_sprites[0] = 3U; info->num_sprites[1] = 3U; info->num_sprites[2] = 2U; } else { goto _L___0; } } else { _L___0: /* CIL Label */ __p___1 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___1 + 45UL) != 0U) { goto _L; } else { __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 9U) { _L: /* CIL Label */ pipe = 0; goto ldv_53097; ldv_53096: info->num_sprites[(int )pipe] = 2U; pipe = (enum pipe )((int )pipe + 1); ldv_53097: __p = dev_priv; if ((int )__p->info.num_pipes > (int )pipe) { goto ldv_53096; } else { } } else { pipe = 0; goto ldv_53106; ldv_53105: info->num_sprites[(int )pipe] = 1U; pipe = (enum pipe )((int )pipe + 1); ldv_53106: __p___0 = dev_priv; if ((int )__p___0->info.num_pipes > (int )pipe) { goto ldv_53105; } else { } } } } if ((int )i915.disable_display) { printk("\016[drm] Display disabled (module parameter)\n"); info->num_pipes = 0U; } else if ((int )info->num_pipes > 0) { __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___5->info.gen) == 7U) { goto _L___1; } else { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___6->info.gen) == 8U) { _L___1: /* CIL Label */ __p___7 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___7 + 45UL) == 0U) { tmp = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 270356L, 1); fuse_strap = tmp; tmp___0 = (*(dev_priv->uncore.funcs.mmio_readl))(dev_priv, 794644L, 1); sfuse_strap = tmp___0; if (((fuse_strap & 1073741824U) != 0U || (sfuse_strap & 128U) != 0U) || ((unsigned int )dev_priv->pch_type == 2U && (sfuse_strap & 8192U) == 0U)) { printk("\016[drm] Display fused off, disabling\n"); info->num_pipes = 0U; } else { } } else { } } else { } } } else { } __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 45UL) != 0U) { __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___10->info.gen) == 8U) { cherryview_sseu_info_init(dev); } else { goto _L___2; } } else { _L___2: /* CIL Label */ __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___8->info.gen) > 8U) { gen9_sseu_info_init(dev); } else { } } tmp___1 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___1 != 0L) { drm_ut_debug_printk("intel_device_info_runtime_init", "slice total: %u\n", (int )info->slice_total); } else { } tmp___2 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___2 != 0L) { drm_ut_debug_printk("intel_device_info_runtime_init", "subslice total: %u\n", (int )info->subslice_total); } else { } tmp___3 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___3 != 0L) { drm_ut_debug_printk("intel_device_info_runtime_init", "subslice per slice: %u\n", (int )info->subslice_per_slice); } else { } tmp___4 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___4 != 0L) { drm_ut_debug_printk("intel_device_info_runtime_init", "EU total: %u\n", (int )info->eu_total); } else { } tmp___5 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___5 != 0L) { drm_ut_debug_printk("intel_device_info_runtime_init", "EU per subslice: %u\n", (int )info->eu_per_subslice); } else { } tmp___6 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___6 != 0L) { drm_ut_debug_printk("intel_device_info_runtime_init", "has slice power gating: %s\n", (unsigned int )*((unsigned char *)info + 80UL) != 0U ? (char *)"y" : (char *)"n"); } else { } tmp___7 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___7 != 0L) { drm_ut_debug_printk("intel_device_info_runtime_init", "has subslice power gating: %s\n", (unsigned int )*((unsigned char *)info + 80UL) != 0U ? (char *)"y" : (char *)"n"); } else { } tmp___8 = ldv__builtin_expect((drm_debug & 2U) != 0U, 0L); if (tmp___8 != 0L) { drm_ut_debug_printk("intel_device_info_runtime_init", "has EU power gating: %s\n", (unsigned int )*((unsigned char *)info + 80UL) != 0U ? (char *)"y" : (char *)"n"); } else { } return; } } int i915_driver_load(struct drm_device *dev , unsigned long flags ) { struct drm_i915_private *dev_priv ; struct intel_device_info *info ; struct intel_device_info *device_info ; int ret ; int mmio_bar ; int mmio_size ; uint32_t aperture_size ; void *tmp ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; struct lock_class_key __key___2 ; struct lock_class_key __key___3 ; struct lock_class_key __key___4 ; struct lock_class_key __key___5 ; struct lock_class_key __key___6 ; struct lock_class_key __key___7 ; struct drm_i915_private *__p ; struct drm_i915_private *__p___0 ; int tmp___0 ; struct drm_i915_private *__p___1 ; struct drm_i915_private *__p___2 ; struct drm_i915_private *__p___3 ; struct drm_i915_private *__p___4 ; struct lock_class_key __key___8 ; char const *__lock_name ; struct workqueue_struct *tmp___1 ; struct lock_class_key __key___9 ; char const *__lock_name___0 ; struct workqueue_struct *tmp___2 ; struct lock_class_key __key___10 ; char const *__lock_name___1 ; struct workqueue_struct *tmp___3 ; struct drm_i915_private *__p___5 ; struct drm_i915_private *__p___6 ; struct drm_i915_private *__p___7 ; struct drm_i915_private *__p___8 ; bool tmp___4 ; struct drm_i915_private *__p___9 ; struct drm_i915_private *__p___10 ; int __ret_warn_on ; int tmp___5 ; long tmp___6 ; { ret = 0; info = (struct intel_device_info *)flags; tmp = kzalloc(51720UL, 208U); dev_priv = (struct drm_i915_private *)tmp; if ((unsigned long )dev_priv == (unsigned long )((struct drm_i915_private *)0)) { return (-12); } else { } dev->dev_private = (void *)dev_priv; dev_priv->dev = dev; device_info = (struct intel_device_info *)(& dev_priv->info); memcpy((void *)device_info, (void const *)info, 84UL); device_info->device_id = (dev->pdev)->device; spinlock_check(& dev_priv->irq_lock); __raw_spin_lock_init(& dev_priv->irq_lock.__annonCompField18.rlock, "&(&dev_priv->irq_lock)->rlock", & __key); spinlock_check(& dev_priv->gpu_error.lock); __raw_spin_lock_init(& dev_priv->gpu_error.lock.__annonCompField18.rlock, "&(&dev_priv->gpu_error.lock)->rlock", & __key___0); __mutex_init(& dev_priv->backlight_lock, "&dev_priv->backlight_lock", & __key___1); spinlock_check(& dev_priv->uncore.lock); __raw_spin_lock_init(& dev_priv->uncore.lock.__annonCompField18.rlock, "&(&dev_priv->uncore.lock)->rlock", & __key___2); spinlock_check(& dev_priv->mm.object_stat_lock); __raw_spin_lock_init(& dev_priv->mm.object_stat_lock.__annonCompField18.rlock, "&(&dev_priv->mm.object_stat_lock)->rlock", & __key___3); spinlock_check(& dev_priv->mmio_flip_lock); __raw_spin_lock_init(& dev_priv->mmio_flip_lock.__annonCompField18.rlock, "&(&dev_priv->mmio_flip_lock)->rlock", & __key___4); __mutex_init(& dev_priv->sb_lock, "&dev_priv->sb_lock", & __key___5); __mutex_init(& dev_priv->modeset_restore_lock, "&dev_priv->modeset_restore_lock", & __key___6); __mutex_init(& dev_priv->csr_lock, "&dev_priv->csr_lock", & __key___7); intel_pm_setup(dev); intel_display_crc_init(dev); i915_dump_device_info(dev_priv); __p = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p + 45UL) != 0U) { __p___0 = to_i915((struct drm_device const *)dev); if (((int )__p___0->info.device_id & 65280) == 3072) { printk("\016[drm] This is an early pre-production Haswell machine. It may not be fully functional.\n"); } else { } } else { } tmp___0 = i915_get_bridge_dev(dev); if (tmp___0 != 0) { ret = -5; goto free_priv; } else { } __p___1 = to_i915((struct drm_device const *)dev); mmio_bar = (unsigned int )((unsigned char )__p___1->info.gen) == 2U; if ((unsigned int )info->gen <= 4U) { mmio_size = 524288; } else { mmio_size = 2097152; } dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, (unsigned long )mmio_size); if ((unsigned long )dev_priv->regs == (unsigned long )((void *)0)) { drm_err("failed to map registers\n"); ret = -5; goto put_bridge; } else { } intel_detect_pch(dev); intel_uncore_init(dev); intel_csr_ucode_init(dev); ret = i915_gem_gtt_init(dev); if (ret != 0) { goto out_freecsr; } else { } ret = i915_kick_out_firmware_fb(dev_priv); if (ret != 0) { drm_err("failed to remove conflicting framebuffer drivers\n"); goto out_gtt; } else { } ret = i915_kick_out_vgacon(dev_priv); if (ret != 0) { drm_err("failed to remove conflicting VGA console\n"); goto out_gtt; } else { } pci_set_master(dev->pdev); __p___2 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___2->info.gen) == 2U) { dma_set_coherent_mask(& (dev->pdev)->dev, 1073741823ULL); } else { } __p___3 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___3 + 45UL) != 0U) { dma_set_coherent_mask(& (dev->pdev)->dev, 4294967295ULL); } else { __p___4 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___4 + 45UL) != 0U) { dma_set_coherent_mask(& (dev->pdev)->dev, 4294967295ULL); } else { } } aperture_size = (uint32_t )dev_priv->gtt.mappable_end; dev_priv->gtt.mappable = io_mapping_create_wc(dev_priv->gtt.mappable_base, (unsigned long )aperture_size); if ((unsigned long )dev_priv->gtt.mappable == (unsigned long )((struct io_mapping *)0)) { ret = -5; goto out_gtt; } else { } dev_priv->gtt.mtrr = arch_phys_wc_add((unsigned long )dev_priv->gtt.mappable_base, (unsigned long )aperture_size); __lock_name = "\"i915\""; tmp___1 = __alloc_workqueue_key("i915", 131074U, 1, & __key___8, __lock_name); dev_priv->wq = tmp___1; if ((unsigned long )dev_priv->wq == (unsigned long )((struct workqueue_struct *)0)) { drm_err("Failed to create our workqueue.\n"); ret = -12; goto out_mtrrfree; } else { } __lock_name___0 = "\"i915-dp\""; tmp___2 = __alloc_workqueue_key("i915-dp", 131074U, 1, & __key___9, __lock_name___0); dev_priv->dp_wq = tmp___2; if ((unsigned long )dev_priv->dp_wq == (unsigned long )((struct workqueue_struct *)0)) { drm_err("Failed to create our dp workqueue.\n"); ret = -12; goto out_freewq; } else { } __lock_name___1 = "\"i915-hangcheck\""; tmp___3 = __alloc_workqueue_key("i915-hangcheck", 131074U, 1, & __key___10, __lock_name___1); dev_priv->gpu_error.hangcheck_wq = tmp___3; if ((unsigned long )dev_priv->gpu_error.hangcheck_wq == (unsigned long )((struct workqueue_struct *)0)) { drm_err("Failed to create our hangcheck workqueue.\n"); ret = -12; goto out_freedpwq; } else { } intel_irq_init(dev_priv); intel_uncore_sanitize(dev); intel_setup_mchbar(dev); intel_setup_gmbus(dev); intel_opregion_setup(dev); intel_setup_bios(dev); i915_gem_load(dev); __p___5 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned short )__p___5->info.device_id) != 10098U) { __p___6 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___6 + 44UL) == 0U) { pci_enable_msi_exact(dev->pdev, 1); } else { } } else { } intel_device_info_runtime_init(dev); __p___8 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___8 + 38UL) != 0U) { __p___7 = to_i915((struct drm_device const *)dev); ret = drm_vblank_init(dev, (int )__p___7->info.num_pipes); if (ret != 0) { goto out_gem_unload; } else { } } else { } intel_power_domains_init(dev_priv); ret = i915_load_modeset_init(dev); if (ret < 0) { drm_err("failed to init modeset\n"); goto out_power_well; } else { } tmp___4 = intel_vgpu_active(dev); if ((int )tmp___4) { (*(dev_priv->uncore.funcs.mmio_writel))(dev_priv, 493572L, 1U, 1); } else { } i915_setup_sysfs(dev); __p___9 = to_i915((struct drm_device const *)dev); if ((unsigned int )*((unsigned char *)__p___9 + 38UL) != 0U) { intel_opregion_init(dev); acpi_video_register(); } else { } __p___10 = to_i915((struct drm_device const *)dev); if ((unsigned int )((unsigned char )__p___10->info.gen) == 5U) { intel_gpu_ips_init(dev_priv); } else { } intel_runtime_pm_enable(dev_priv); i915_audio_component_init(dev_priv); return (0); out_power_well: intel_power_domains_fini(dev_priv); drm_vblank_cleanup(dev); out_gem_unload: tmp___5 = unregister_oom_notifier(& dev_priv->mm.oom_notifier); __ret_warn_on = tmp___5 != 0; tmp___6 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___6 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_dma.c", 1021, "WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); unregister_shrinker(& dev_priv->mm.shrinker); if ((unsigned int )*((unsigned char *)dev->pdev + 2530UL) != 0U) { pci_disable_msi(dev->pdev); } else { } intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); pm_qos_remove_request(& dev_priv->pm_qos); ldv_destroy_workqueue_1068(dev_priv->gpu_error.hangcheck_wq); out_freedpwq: ldv_destroy_workqueue_1069(dev_priv->dp_wq); out_freewq: ldv_destroy_workqueue_1070(dev_priv->wq); out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); io_mapping_free(dev_priv->gtt.mappable); out_gtt: i915_global_gtt_cleanup(dev); out_freecsr: intel_csr_ucode_fini(dev); intel_uncore_fini(dev); pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: ; if ((unsigned long )dev_priv->requests != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(dev_priv->requests); } else { } if ((unsigned long )dev_priv->vmas != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(dev_priv->vmas); } else { } if ((unsigned long )dev_priv->objects != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(dev_priv->objects); } else { } kfree((void const *)dev_priv); return (ret); } } int i915_driver_unload(struct drm_device *dev ) { struct drm_i915_private *dev_priv ; int ret ; int __ret_warn_on ; int tmp ; long tmp___0 ; { dev_priv = (struct drm_i915_private *)dev->dev_private; i915_audio_component_cleanup(dev_priv); ret = i915_gem_suspend(dev); if (ret != 0) { drm_err("failed to idle hardware: %d\n", ret); return (ret); } else { } intel_power_domains_fini(dev_priv); intel_gpu_ips_teardown(); i915_teardown_sysfs(dev); tmp = unregister_oom_notifier(& dev_priv->mm.oom_notifier); __ret_warn_on = tmp != 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--08_1a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/10168/dscv_tempdir/dscv/ri/08_1a/drivers/gpu/drm/i915/i915_dma.c", 1076, "WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier))"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); unregister_shrinker(& dev_priv->mm.shrinker); io_mapping_free(dev_priv->gtt.mappable); arch_phys_wc_del(dev_priv->gtt.mtrr); acpi_video_unregister(); intel_fbdev_fini(dev); drm_vblank_cleanup(dev); intel_modeset_cleanup(dev); if ((unsigned long )dev_priv->vbt.child_dev != (unsigned long )((union child_device_config *)0) && dev_priv->vbt.child_dev_num != 0) { kfree((void const *)dev_priv->vbt.child_dev); dev_priv->vbt.child_dev = (union child_device_config *)0; dev_priv->vbt.child_dev_num = 0; } else { } vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, (void *)0, (void (*)(void * , bool ))0, (unsigned int (*)(void * , bool ))0); ldv_cancel_delayed_work_sync_1071(& dev_priv->gpu_error.hangcheck_work); i915_destroy_error_state(dev); if ((unsigned int )*((unsigned char *)dev->pdev + 2530UL) != 0U) { pci_disable_msi(dev->pdev); } else { } intel_opregion_fini(dev); ldv_flush_workqueue_1072(dev_priv->wq); mutex_lock_nested(& dev->struct_mutex, 0U); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); mutex_unlock(& dev->struct_mutex); i915_gem_cleanup_stolen(dev); intel_csr_ucode_fini(dev); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); ldv_destroy_workqueue_1073(dev_priv->dp_wq); ldv_destroy_workqueue_1074(dev_priv->wq); ldv_destroy_workqueue_1075(dev_priv->gpu_error.hangcheck_wq); pm_qos_remove_request(& dev_priv->pm_qos); i915_global_gtt_cleanup(dev); intel_uncore_fini(dev); if ((unsigned long )dev_priv->regs != (unsigned long )((void *)0)) { pci_iounmap(dev->pdev, dev_priv->regs); } else { } if ((unsigned long )dev_priv->requests != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(dev_priv->requests); } else { } if ((unsigned long )dev_priv->vmas != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(dev_priv->vmas); } else { } if ((unsigned long )dev_priv->objects != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(dev_priv->objects); } else { } pci_dev_put(dev_priv->bridge_dev); kfree((void const *)dev_priv); return (0); } } int i915_driver_open(struct drm_device *dev , struct drm_file *file ) { int ret ; { ret = i915_gem_open(dev, file); if (ret != 0) { return (ret); } else { } return (0); } } void i915_driver_lastclose(struct drm_device *dev ) { { intel_fbdev_restore_mode(dev); vga_switcheroo_process_delayed_switch(); return; } } void i915_driver_preclose(struct drm_device *dev , struct drm_file *file ) { { mutex_lock_nested(& dev->struct_mutex, 0U); i915_gem_context_close(dev, file); i915_gem_release(dev, file); mutex_unlock(& dev->struct_mutex); intel_modeset_preclose(dev, file); return; } } void i915_driver_postclose(struct drm_device *dev , struct drm_file *file ) { struct drm_i915_file_private *file_priv ; { file_priv = (struct drm_i915_file_private *)file->driver_priv; if ((unsigned long )file_priv != (unsigned long )((struct drm_i915_file_private *)0) && (unsigned long )file_priv->bsd_ring != (unsigned long )((struct intel_engine_cs *)0)) { file_priv->bsd_ring = (struct intel_engine_cs *)0; } else { } kfree((void const *)file_priv); return; } } static int i915_gem_reject_pin_ioctl(struct drm_device *dev , void *data , struct drm_file *file ) { { return (-19); } } struct drm_ioctl_desc const i915_ioctls[54U] = { {1078223936U, 7, & drm_noop, "I915_INIT"}, {25665U, 1, & drm_noop, "I915_FLUSH"}, {25666U, 1, & drm_noop, "I915_FLIP"}, {1075864643U, 1, & drm_noop, "I915_BATCHBUFFER"}, {3221775428U, 1, & drm_noop, "I915_IRQ_EMIT"}, {1074029637U, 1, & drm_noop, "I915_IRQ_WAIT"}, {3222299718U, 33, & i915_getparam, "I915_GETPARAM"}, {1074291783U, 7, & i915_setparam, "I915_SETPARAM"}, {3222824008U, 1, & drm_noop, "I915_ALLOC"}, {1074291785U, 1, & drm_noop, "I915_FREE"}, {1074553930U, 7, & drm_noop, "I915_INIT_HEAP"}, {1075864651U, 1, & drm_noop, "I915_CMDBUFFER"}, {1074029644U, 7, & drm_noop, "I915_DESTROY_HEAP"}, {1074029645U, 7, & drm_noop, "I915_SET_VBLANK_PIPE"}, {2147771470U, 1, & drm_noop, "I915_GET_VBLANK_PIPE"}, {3222037583U, 1, & drm_noop, "I915_VBLANK_SWAP"}, {0U, 0, 0, 0}, {1074816081U, 7, & drm_noop, "I915_HWS_ADDR"}, {0U, 0, 0, 0}, {1074816083U, 23, & drm_noop, "I915_GEM_INIT"}, {1076388948U, 17, & i915_gem_execbuffer, "I915_GEM_EXECBUFFER"}, {3222824021U, 21, & i915_gem_reject_pin_ioctl, "I915_GEM_PIN"}, {1074291798U, 21, & i915_gem_reject_pin_ioctl, "I915_GEM_UNPIN"}, {3221775447U, 49, & i915_gem_busy_ioctl, "I915_GEM_BUSY"}, {25688U, 49, & i915_gem_throttle_ioctl, "I915_GEM_THROTTLE"}, {25689U, 23, & drm_noop, "I915_GEM_ENTERVT"}, {25690U, 23, & drm_noop, "I915_GEM_LEAVEVT"}, {3222299739U, 48, & i915_gem_create_ioctl, "I915_GEM_CREATE"}, {1075864668U, 48, & i915_gem_pread_ioctl, "I915_GEM_PREAD"}, {1075864669U, 48, & i915_gem_pwrite_ioctl, "I915_GEM_PWRITE"}, {3223872606U, 48, & i915_gem_mmap_ioctl, "I915_GEM_MMAP"}, {1074553951U, 48, & i915_gem_set_domain_ioctl, "I915_GEM_SET_DOMAIN"}, {1074029664U, 48, & i915_gem_sw_finish_ioctl, "I915_GEM_SW_FINISH"}, {3222299745U, 48, & i915_gem_set_tiling, "I915_GEM_SET_TILING"}, {3222299746U, 48, & i915_gem_get_tiling, "I915_GEM_GET_TILING"}, {2148557923U, 48, & i915_gem_get_aperture_ioctl, "I915_GEM_GET_APERTURE"}, {3222299748U, 48, & i915_gem_mmap_gtt_ioctl, "I915_GEM_MMAP_GTT"}, {3221775461U, 16, & intel_get_pipe_from_crtc_id, "I915_GET_PIPE_FROM_CRTC_ID"}, {3222037606U, 48, & i915_gem_madvise_ioctl, "I915_GEM_MADVISE"}, {1076651111U, 26, & intel_overlay_put_image, "I915_OVERLAY_PUT_IMAGE"}, {3224134760U, 26, & intel_overlay_attrs, "I915_OVERLAY_ATTRS"}, {1077961833U, 49, & i915_gem_execbuffer2, "I915_GEM_EXECBUFFER2"}, {3222561898U, 26, & drm_noop, "I915_GET_SPRITE_COLORKEY"}, {3222561899U, 26, & intel_sprite_set_colorkey, "I915_SET_SPRITE_COLORKEY"}, {3222299756U, 49, & i915_gem_wait_ioctl, "I915_GEM_WAIT"}, {3221775469U, 48, & i915_gem_context_create_ioctl, "I915_GEM_CONTEXT_CREATE"}, {1074291822U, 48, & i915_gem_context_destroy_ioctl, "I915_GEM_CONTEXT_DESTROY"}, {1074291823U, 48, & i915_gem_set_caching_ioctl, "I915_GEM_SET_CACHING"}, {3221775472U, 48, & i915_gem_get_caching_ioctl, "I915_GEM_GET_CACHING"}, {3222299761U, 48, & i915_reg_read_ioctl, "I915_REG_READ"}, {3222824050U, 48, & i915_get_reset_stats_ioctl, "I915_GET_RESET_STATS"}, {3222824051U, 48, & i915_gem_userptr_ioctl, "I915_GEM_USERPTR"}, {3222824052U, 48, & i915_gem_context_getparam_ioctl, "I915_GEM_CONTEXT_GETPARAM"}, {3222824053U, 48, & i915_gem_context_setparam_ioctl, "I915_GEM_CONTEXT_SETPARAM"}}; int i915_max_ioctl = 54; int i915_driver_device_is_agp(struct drm_device *dev ) { { return (1); } } void ldv_initialize_vga_switcheroo_client_ops_23(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); i915_switcheroo_ops_group0 = (struct pci_dev *)tmp; return; } } void ldv_main_exported_23(void) { enum vga_switcheroo_state ldvarg207 ; int tmp ; { ldv_memset((void *)(& ldvarg207), 0, 4UL); tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_23 == 1) { i915_switcheroo_set_state(i915_switcheroo_ops_group0, ldvarg207); ldv_state_variable_23 = 1; } else { } goto ldv_53303; case 1: ; if (ldv_state_variable_23 == 1) { i915_switcheroo_can_switch(i915_switcheroo_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_53303; default: ldv_stop(); } ldv_53303: ; return; } } bool ldv_queue_work_on_1063(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_1064(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_1065(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_18(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_1066(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_queue_delayed_work_on_1067(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_18(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_destroy_workqueue_1068(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } void ldv_destroy_workqueue_1069(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } void ldv_destroy_workqueue_1070(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } bool ldv_cancel_delayed_work_sync_1071(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = cancel_delayed_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_18(& ldv_func_arg1->work); return (ldv_func_res); } } void ldv_flush_workqueue_1072(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } void ldv_destroy_workqueue_1073(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } void ldv_destroy_workqueue_1074(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } void ldv_destroy_workqueue_1075(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_18(2); return; } } extern void *memset(void * , int , size_t ) ; __inline static void ldv_error(void) { { ERROR: ; __VERIFIER_error(); } } bool ldv_is_err(void const *ptr ) { { return ((unsigned long )ptr > 2012UL); } } void *ldv_err_ptr(long error ) { { return ((void *)(2012L - error)); } } long ldv_ptr_err(void const *ptr ) { { return ((long )(2012UL - (unsigned long )ptr)); } } bool ldv_is_err_or_null(void const *ptr ) { bool tmp ; int tmp___0 ; { if ((unsigned long )ptr == (unsigned long )((void const *)0)) { tmp___0 = 1; } else { tmp = ldv_is_err(ptr); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((bool )tmp___0); } } int ldv_module_refcounter = 1; void ldv_module_get(struct module *module ) { { if ((unsigned long )module != (unsigned long )((struct module *)0)) { ldv_module_refcounter = ldv_module_refcounter + 1; } else { } return; } } int ldv_try_module_get(struct module *module ) { int module_get_succeeded ; { if ((unsigned long )module != (unsigned long )((struct module *)0)) { module_get_succeeded = ldv_undef_int(); if (module_get_succeeded == 1) { ldv_module_refcounter = ldv_module_refcounter + 1; return (1); } else { return (0); } } else { } return (0); } } void ldv_module_put(struct module *module ) { { if ((unsigned long )module != (unsigned long )((struct module *)0)) { if (ldv_module_refcounter <= 1) { ldv_error(); } else { } ldv_module_refcounter = ldv_module_refcounter - 1; } else { } return; } } void ldv_module_put_and_exit(void) { { ldv_module_put((struct module *)1); LDV_STOP: ; goto LDV_STOP; } } unsigned int ldv_module_refcount(void) { { return ((unsigned int )(ldv_module_refcounter + -1)); } } void ldv_check_final_state(void) { { if (ldv_module_refcounter != 1) { ldv_error(); } else { } return; } }