\\xa2\\xce\\xbc7\\xd7\\xb9\\xbd1K\\xbf=\\x98d\\xcf\\xbdt\\xa2\\xb7=\\'\\x18\\xa9=\\x92\\xe7\\xb7\\xbd\\xfe\\xb7\\xcf\\xbd\\x9bn\\x85\\xbd\\xcd\\x0ca=\\xa1Cy=\\x84\\xb5\\xe3<\\xc7X_<\\x8ey%=bK\\xbe=\\xab\\x01\\xb6=h\\x88\\xd4\\xbc2\\x92I\\xbd\\xfc\\xee<=62\\xef\\x83\\xbd\\xa1\\xd2N\\xbdVD\\x84\\xbc\\xe2\\x1c\\xc8=\\xa6\\xab\\xec<\\x1a\\xd4\\x81=\\xe0\\x99\\xdb<\\xeb,\\xc6\\xbd1\\xef\\xcf=\\x1f\\x1f\\xbf\\xbd\\xc2\\x84\\x19<\\xd0\\x8an=\\xdbg\\x86=fs\\x97=\\xe0=\\x02=\\x99\\xe3\\n\\xbdX\\x13\\xe1<)\\xe1u\\xbc\\xba\\x9f\\x03<\\x8a\\xc8Z\\xbd^[\\xbd=\\xdfx\\x9a\\xbdR\\x18\\x9e=\\xc7\\x8f\\x9c=\\x98P\\x80\\xbd\\xa65\\x07\\xbb\\xbe|\\xa1=\\x04{\\xc8=\\x06\\x94\\x84\\xbd\\x86p4\\xbc\\xa7l9\\xbc\\x89\\x0e\\xa9\\xbdh\\x0f\\xb8\\xbc\\x02\\x9a$<\\xfb6\\x84\\xbd\\t\\xbe\\xc1=F\\xbe\\x14\\xbc@\\xb2\\xb0=\\x11s+=\\n\\xfb[=4\\xdfM\\xbb\\xf1\\x8f\\x89\\xbd\\xad\\xc1\\x9a<3\\xa1/\\xbdz\\xf8\\x9a\\xbc^\\x96\\xd9\\xbc\\x8c{\\xa8=\\x02\\xff\\xa3={\\x9a\\xa4=Z\\xc5\\x05=:\\x81\\x0b\\xbd\\x02\\x95\\xcd=a\\xcc\\xfc=A\\xf6\\xcc\\xbdA\\xa0\\xd7=""\\xa4\\x9d=\\xdd\\xee\\xcb\\xbd\\x8d\\x0e\\xce;Y\\xc7\\xfb\\xbc\\x1e\\xa0\\x8b=|\\xa6\\xf6\\xbdMS\\xa3\\xbdE7\\x10\\xbd\\xd5\\xc0\\x80\\xbd\\xf5\\x8a\\x8a=\\xb7\\'\\x9d=\\xb8\\xea\\xe1\\xbcJF\\xc0|i\\xbc>J\\xf1\\xbd\\xd3\\xe0\\xb8\\xbd\\x90lz\\xbd_\\xae\\xe2\\xbd\\xee\\x84\\xab=\\xcc@\\x83<\\x99\\xaf\\xa5=\\xe0Y\\xcf\\xbdH\\xaf\\x1e=\\x16\\x12\\xd2\\xbd\\xfd\\xcc\\xcc=\\xd17\\xa3\\xbcG\\xf7\\xfb=\\xd1\\xe9\\x9b\\xbc\\xdc\\x16\\xf6:\\xf4\\xb4\\xb3\\xbd\\xd4f\\x81=\\xf6\\x98\\x9a;\\xf2I\\x14\\xbda\\x98D\\xbd\\x16eS=\\x9cB\\x92\\xbd\\xd5\\x07m\\xbd)j\\x1f=\\xa0\\x1aW=\\xb1\\xe8_=\\x06\\x84\\x9b8n\\xe7H=\\x86\\x83T\\xbd\\x85H\\xbc\\xbd\\xdb\\xda1<\\x81N\\x1f\\xbd\\x9ff\\x1b=\\x96\\xa07=vCB\\xbdL\\xad\\xbd\\xbdjc\\xce\\xbd\\xb3))<\\x16$\\xdd\\xbd\\xde~\\x19\\xbd\\xd4\\xb5\\xa0\\xb2\\xbd\\xe9\\xbe\\xba=^\\x0c\\x9b=\\xef-M\\xbd\\x11\\x8f\\xb9\\xbd\\xb9>\\x8c\\xbb8\\xbf\\x1d=\\xd5\\xdc!\\xbd\\x1a\\xb0\\xae\\xbd\\xb7E\\xe5;.\\x8fN\\xbd\\xaf&\\xfc\\xbd\\xd2}\\xfa;u\\xa4;\\xbc\\xb9-\\x93\\xbd\\x0880\\xbc]\\xba\\xc5=[\\xbc\\x87\\xbd8]\\xe5\\xbdz\\xa4\\x08\\xbc\\xe4\\x87\\x95=\\x8f\\xf7\\xa6=3\\xc9y=\\x1a&b\\xbd\\xc4\\x88\\xd8=t\\xde\\xbb\\xbb_m\\xd1<\\r\\xa5\\x9b=\\xf8x\\xc7=\\xc4\\xc1\\x0c=w\\xcf\\xe0\\xbd9\\xc3P\\xbb\\xf5J\\x8c\\xbd\\'\\xc82=:\\xe3\\xb8=\\rp\\x7f\\xbd<\\xf0T\\xbd\\xdb3\\xcd=\\xfd\\r\\xcd=\\x9d\\x7f\\xad=\\xea\\xc6\\xe1\\xbd-\\x98\\xa1\\xbd\\xde\\xc6\\xce=\\xcc\\x8b|\\xbd\\xd6\\xb4\\xc3<\\x1ax/\\xbdV,\\xce=\\xb9o\\x1d\\xbd\\x1b\\xac\\x84\\xbdtF\\xd0;\\xe2\\x1f\\xb0=\\xf7\\r\\xb7\\xbcb\\xf6\\xa4\\xbdt/\\xd3\\xbd\\x9f\\xb4\\xad\\xbd\\xd9\\x06\\x1c\\xbdt\\n\\xce=\\xa6\\xc5\\r=}t\\xf1<;]\\x93\\xbd\\xbb5\\xee\\xbd\\x14\\x8b\\x8d=I2\\xa9\\xbd\\xa7\\xd4\\t\\xbc\\xb7\\n\\xe2\\xbb\\xaca\\xc6=\\xc9!\\xb7=\\xef\\x9c\\xe2\\xbc_,\\xd2;\\x1c\\xbe\\'<\\x9d\\xc5\\x8b=\\xd57\\x1d\\xbd\\x9b\\xd2\\xbb=1X\\xb6\\xbd\\xcc\\xe6Q\\xbd]E\\xcb=\\x87\\xab(=g\\xdf\\xb0\\xbd\\xcb\\x1d\\x82\\xbd\\x0fb\\xae\\xbb\\xb34\\xb3=C&`\\xbdw+a\\xbd\\x96m\\x19\\xbd)s5=\\xfdF\\xc3=\\xf5F\\xb5=\\xa46\\x9f\\xbc\\xc7\\x96\\x8b\\'\\xabM\\xbd\\xc5+r\\xbc\\xd8\\xd7\\xa6=\\xa6\\x0c\\xb5=\\x95\\t\\xb0=\\xd5\\xc1\\xdb;\\xc3\\xbb\\x86\\xbd\\x89\\xe6\\xde=\\x87\\xc0\\x1e\\xbd\\x80A\\xef=\\x87\\xfb\\xc3\\xbd\\x06k\\n\\xbd""R8=\\x99\\xddK\\xbd\\r\\xbf\\xf9\\xbda\\xc9\\xd2=\\xca\\xfb\\x17=\\xa5\\xac\\xdb\\xbd\\x12\\xfd9\\xbd2\\xb4%=\\x8c\\x93\\x92\\xbdX\\xb3\\xa7\\xbd\\xf2\\xef\\x97\\xbd\\xbf\\xad\\xf0\\xbd\\x8f\\x96\\xda=\\xe8\\xc8\\xf6\\xbb\\xafY\\x11\\xbd\\x9d\\x12\\x07>\\x9fE\\xa2\\xbc\\xf4\\x8c\\r\\xbd4\\xf3\\xab\\xbdd\\x95\\xde<\\xcb`\\n\\xbe\\\\\\xb38=\\x81\\xef\\xa5\\xbdq\\xd2\\x82\\xbd<\\xb2\\xb9;\\xb3\\x82\\x1a\\xbd]\\xad#=\\x10\\xb3\\xbc\\xbd7\\xee\\x84\\xbd\\xf037=\\xfd\\xaa[\\xbd\\x86\\x92\\xbb=\\xc6\\xcf==\\xb6b;\\xbd\\xec\\x01\\xcdJ/\\xcb=\\xb0\\x8a\\xaf\\xbd\\xd2\\x18M\\xbbm\\xa4\\x1b=\\x99\\xc8\\xe8\\xbds\\x0b-\\xbd\\x8c\\xed@=)\\xe0\\x8c\\xbd\\xef\\x17\\xed=\\xdf4\\x84\\xbd\\xb4\\x08\\x8b\\xbc\\'m[;\\xc85\\xbe\\xbd\\x82_\\xd7a\\xe0b=\\xed\\xbe\\xa3\\xbc\\xb2a\\xa1=e\\x19T=NX\\x95\\xbd\\xd3J\\x8b=3\\xf4\\xd0<\\xe6I\\x96\\xbdg4\\x80\\xbd\\xf4\\x82\\xf7\\xbd\\xc1\\x9d\\xd8\\xbc\\xa1\\x0e{\\xbd\\x8e\\xf4\\xfe\\xbd\\xa3\\x16\\xf3=3\\xbc\\x8b\\xbc\\xeb\\xac\\xb6=\\x8f\\xec\\x99\\xc2v=\\x8a\\x97\\x06\\xbcj\\xc8\\xc0<\\xfd*\\x1f\\xbc\\xa8\\xd2#=5|\\x15\\xbd\\x05\\x10\\xcd\\xbc\\xf1\\xa8\\xb6\\xbd\\x85\\x94z\\xbd=\\xf0x\\xbdj[\\x92\\xbci\\x03(=\\xfeI~\\xbd\\xbdh\\xa4=\\x0e=\\x06>\\xdd\\xf4\\xba\\xbd\\x10\\xa6\\x9d=j\\xe0\\x96\\xbd\\xcb\\x03\\x91=C\\xb3\\xb1\\xbdE\\x8c \\xbb0\\x14\\xee;\\xc7\\xf9&\\xbb\\\\\\xea\\xf6\\xbd\\xdd\\x1e\\xe5\\xbb\\xd2\\xd6\\xa1\\xbc\\x96k\\x06\\xbd\\xa2\\xc5+:\\x845\\x1b\\xbd{\\xe0&\\xbd\\xb1\\x83Q=L\\xa5\\xb4\\xbd\\xa2\\x82\\xcf=\\x19\\'\\xce:\\x9e\\x96\\xc8=\\xec\\xa7\\xb4\\xbbW\\x0f\\xc3=\\n\\x00\\x9f\\xbc\\xb7-\\xbc=hj\\x98=\\xd1s\\xb1=d`s=\\xbb\\xd6 \\xbc\\xee\\xd9\\xa3\\xbc\\x16\\x9d\\xa7=\\xc2\\xfa\\x05=2\\x1b\\xff\\xba\\xca\\xd6g=\\x92\\xec\\x92\\xbdX\\xb5)\\xbbB\\xb8\\xc7\\xbd\\xacf\\x95=A\\xbf\\xc2\\xbc\\xb7\\xb5\\xb7=\\xa5\\xa2>\\xbd\\xb2\\x04\\x9f=\\x06m\\xbb<\\xe2\\xc6\\t\\xbd\\xc9\\x19\\xfd\\xbd!\\xa1;=~M7\\xbc{\\xe4\\x03\\xbc+\\xdd\\xbd=B\\xaf \\xbc\\x82\\x95\\xcf;c\\tS=\\x1eU\\xe5\\xbd\\xff\\x86l\\xbd\\xf3\\x06\\xa5;w\\xd6\\x16\\xbd\\x900\\x89<\\xd5;?\\xbd\\x9d[\\x98\\xbd\\xf2\\xff\\x08=\\x84\\x92\\\\;\\xe3\\xa5\\x87\\xbc\\x84\\xf4@=\\xe0\\x03&\\xbb\\xb9Q\\xbe\\xbd\\x9d0!=\\xc8_\\x88\\xbd{\\xa5\\xc8=\\xaafw\\xbdP\\x15\\xe5\\xbc\\x99\\x03\\x89\\xbd\\xe1b,==\\x7f9\\xbd\\xccK\\xe4\\xbdp<\\x95=\\xf0\\xc2\\xcd=\\xf5:\\x88=}FF\\xbdF4\\x1f=\\x80\\xe4b\\xbd\\xec\\x93\\xc8\\xbc\\x0e\\t\\xff;Ga\\xd7\\xbd\\xba\\x03\\xb8\\x04\\xb9H\\xf3/\\xba\\xaf\\xf3\\x10\\xba\\xbe3\\xad\\xb9%&\\xe09\\xc7A29\\x7f\\xb4\\x8b\\xb8\\xeb8\\x1e\\xbaN&\\xe8\\xb9D\\xb2,\\xbaw\\xa3r\\xb9!\\xa5\\x85\\xb9B\\xf6u\\xb9\\xe7\\xcf.\\xba\\x95#\\xd5\\xb9Ps\\xae8\\x0c|\\xce9U\\x05\\xa88g\\xe8\\x1b\\xba\\xab\\x8e\\x83\\xb7\\x07[\\xb6\\xb9@\\x0e^8\\xad\\x1b\\xad\\xb9\\xd5\\x90""\\xba\\x99(\\xe39a\\x01\\xea9/=P\\xb8\\xf9\\xde<:P\\xcf\\x1e:\\xcc\\xde?\\xba\\xe4\\xf0\\xfc\\xb9Ho\\x1c\\xb8I\\n\\x14\\xba\\x04\\xd2\\xa08`\\x9f\\xec8\\xca\\x1b\\x1c\\xba\\x9ea\\xf39\\xc7\\xdfM:\\xbf\\x9f\\xc58*\\xfc\\x12\\xb9\\xbf\\xe1\\x84\\xb8zF#\\xba@\\t\\':k\\xe9\\xbd\\xb9Z+\\xe49\\xbe\\x11\\xce\\xb9.\\xe8$:\\x88:\\xf1\\xb9\\xa3\\xef\\x10\\xba\\xcar\\xe0\\xb9\\xa4I;\\xb9\\xae\\xa3Z\\xb9x\\x0f\\x1b:O4\\x08\\xba:x\\x17\\xba\\xdb! :]{""\\xba\\xeaC\\x9d\\xb9\\xae\\xdc\\xbe\\xb9?i3\\xba /\\xc7\\xb9\\n\\xb95:\\x95\\x01\\x0e\\xba\\xe1M\\xa09\\xa6C(:\\xdcD\\xeb9\\xf7$\\xf58/[%\\xbak\\xb12\\xba\\x96r\\xc49#D\\xd79\\x17\\xd5<:.\\x1cA7\\xbf\\xc6\\xdf\\xb6r\\xe0\\x089\\xe7\\xc7\\x05:\\x11\\xa4/\\xba\\xf8\\x8c?:\\x02B@:ky\\x969\\xaa\\xa6=:J\\xad\\xae9>H\\xa2\\xb9sl\\x18:\\xc6N\\n\\xbaq~;\\xba9\\x98}8\\xfe\\xf9""9\\xf3d:\\xba\\xe3\\t\\xd49\\xf6\\xcc\\xd0\\xb9zs3\\xba\\\\\\x02\\x0b\\xbaS\\xa4\\x95\\xb9G\\xa8\\xb09\\xd7\\xdd,:O\\xfc\\x0c\\xba\\x94\\xc2\\xe2\\xb9<\\x97R9\\xa5\\xf6\\xa1\\xb6\\xf1\\x8b\\xe0\\xb9\\xf1\\x89\\xd2\\xb9+f\\xdf8\\x12g\\x04\\xb9\\xda\\xd2\\xda9~\\x14\\xf37\\x8c<$:\\xf53\\x19:\\xfcKx7\\xae\\x9c9\\xba\\x1f\\x8d\\xee\\xb8\\x0f\\x19\\x1e\\xb9\\x12\\xa5\\t:\\xe6\\xb0z9\\xab&\\xa6\\xb7\\xb5\\xbcX\\xb9\\xa2\\x9c\\x129\\x12G\\x80\\xb9\\x91\\x92\\xf69\\x92\\x03\\x01\\xba\\xfb\\xfd\\x0b:\\xb4\\xf4f9\\xcf\\x80\\x148M\\x0b\\xb8\\xb9\\xeb\\xebW8,@\\xf8\\xb9f\\x00\\xe78\\xc64\\xfd\\xb9+[\\x8c8\\xec\\xca\\xd89\\x89\\xa6%\\xb8\\xeff\\x13\\xba\\x11_\\xf8\\xb9)\\xd0\\xc29\\xda4\\x1f\\xb9O<\\x10:\\xe3P\\x0c:%f\\xb8\\xb9\\x18\\xe6\\xfa9\\x0b/|9\\xd8\\x8b\\xcb9\\x8e_\\'\\xba\\x8f\\xb5\\xbf91r\\xf5\\xb9\\xdf\\xfb)\\xbaYb\\x17\\xb8O=\\xdf8Q\\xea`\\xb9\\x88+\\x058u\\xba\\x13\\xbaG\\x1f\\x18981\\xd09\\xb3\\x96\\xc29\\x99I\\x1e\\xb8G\\xb3\\x0e\\xba|\\x0e\\x879\\xb06\\x049\\xb1F,:o\\x99@\\xba\\x1b\\xa5#9\\xf4\\x00\\xc7\\xb7\\xa6\\x17Q\\xba\\x07Hp9Z\\xc7&:\\xe8\\xa4\\x02:k\\xbd\\xb19\\xffU 8\\x0c:R\\xba\\xb2\\xcc\\xe49\\xfef\\x9c9\\xf3p6\\xba\\xe4\\x10\\xc1\\xb9\\x1cq9\\xb8$\\xa9\\x06\\xba)w\\x189%\\x18\\x00:\\xeat\\xc9\\xb8\\x86+!\\xb7\\xe2\\x10\\x06\\xbaMx48\\x06\\xf0\\xf2\\xb9S\\x186\\xb9\\xdc\\xc0\\x16:\\xb6\\xb9O\\xb9\\n\\n2\\xbaKP\\x8e9\\xaf\\x88\\xc39\\xb4\\x9e6:\\xba\\xc8\\x1a\\xb9\\x80\\xb5\\x0f:\\xf1\\xb8\\xc39\\xccy\\x1f:\\xbc\\xbf\\x1f\\xba\\xc8\\xaf\\xed9\\x12\\x82\\x19:*\\xfe\\xff8\\x10\\x02\\xfa9-\\x11a90}5:\\x82\\x96\\xa49\\xf8N\\x0f:I\\xde:\\xb9p\\xf3\\xff\\xfft\\xf3\\xff\\xff\\x0f\\x00\\x00\\x00MLIR Converted.\\x00\\x01\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x00\\x00\\x0e\\x00\\x18\\x00\\x14\\x00\\x10\\x00\\x0c\\x00\\x08\\x00\\x04\\x00\\x0e\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x94\\x02\\x00\\x00\\x98\\x02\\x00\\x00\\x9c\\x02\\x00\\x00\\x04\\x00\\x00\\x00main\\x00\\x00\\x00\\x00\\t\\x00\\x00\\x000\\x02\\x00\\x00\\xcc\\x01\\x00\\x00|\\x01\\x00\\x008\\x01\\x00\\x00\\xf8\\x00\\x00\\x00\\xb4\\x00\\x00\\x00\\x8c\\x00\\x00\\x00<\\x00\\x00\\x00\\x04\\x00\\x00\\x00b\\xfe\\xff\\xff\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x08\\x10\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x08\\xf4\\xff\\xff\\x01\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x13\\x00\\x00\\x00\\x0b\\x00\\x00\\x00\\x08\\x00\\x00\\x00\\x96\\xfe\\xff\\xff\\x1c\\x00\\x00\\x00\\x00\\x00\\x00\\x08\\x1c\\x00\\x00\\x00 \\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\x00\\x06\\x00\\x08\\x00\\x07\\x00\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x13\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x12\\x00\\x00\\x00\\n\\x00\\x00\\x00\\t\\x00\\x00\\x00\\x00\\x00\\n\\x00\\x10\\x00\\x0c\\x00\\x08\\x00\\x04\\x00\\n\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x12\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x11\\x00\\x00\\x00\\x07\\x00\\x00\\x00\\x06\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x05$\\x00\\x00\\x00(\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\xf6\\xfe\\xff\\xff\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x11\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\xe6\\xfe\\xff\\xff\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x18\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\xd8\\xfe\\xff\\xff\\x00\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x0f\\x00\\x00\\x00\\x06\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x82\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x05$\\x00\\x00\\x00(\\x00\\x00\\x00\\x01\\x00\\x00\\x00r\\xff\\xff\\xff\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x0f\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x0e\\x00\\x00\\x00b\\xff\\xff\\xff\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x18\\x00\\x00\\x00\\x1c\\x00\\x00\\x00T\\xff\\xff\\xff\\x00\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x0e\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\r\\x00\\x00\\x00\\x05\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\x00\\x0e\\x00\\x1a\\x00\\x14\\x00\\x10\\x00\\x0c\\x00\\x0b\\x00\\x04\\x00\\x0e\\x00\\x00\\x00$\\x00\\x00\\x00\\x00\\x00\\x00\\x054\\x00\\x00\\x008\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x0e\\x00\\x18\\x00\\x17\\x00\\x10\\x00\\x0c\\x00\\x08\\x00\\x04\\x00\\x0e\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\r\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x00\\x00\\x0e\\x00\\x14\\x00\\x00\\x00\\x10\\x00\\x0c\\x00\\x0b\\x00\\x04\\x00\\x0e\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\\x00\\x01$\\x00\\x00\\x00(\\x00\\x00\\x00\\x0c\\x00\\x10\\x00\\x00\\x00\\x0c\\x00\\x08\\x00\\x07\\x00\\x0c\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x15\\x00\\x00\\x00x\\t\\x00\\x00\\xac\\x08\\x00\\x00<\\x08\\x00\\x00\\xe0\\x07\\x00\\x00\\x84\\x07\\x00\\x00,\\x07\\x00\\x00\\xd4\\x06\\x00\\x00\\x88\\x06\\x00\\x00\\x18\\x06\\x00\\x00\\xc0\\x05\\x00\\x00t\\x05\\x00\\x00(\\x05\\x00\\x00\\\\\\x04\\x00\\x00\\xe8\\x03\\x00\\x00\\x14\\x03\\x00\\x00\\x9c\\x02\\x00\\x00\\xc8\\x01\\x00\\x00P\\x01\\x00\\x00\\xf0\\x00\\x00\\x00`\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xf2\\xf6\\xff\\xff\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x15\\x00\\x00\\x004\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\xff\\xff\\xff\\xff\\x0b\\x00\\x00\\x00\\xd4\\xf6\\xff\\xff\\x19\\x00\\x00\\x00StatefulPartitionedCall:0\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x0b\\x00\\x00\\x00J\\xf7\\xff\\xff\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x14\\x00\\x00\\x00h\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\xff\\xff\\xff\\xff\\x80\\x00\\x00\\x00,\\xf7\\xff\\xffL\\x00\\x00\\x00sequential_1/dense/MatMul;sequential_1/dense/Relu;sequential_1/dense/BiasAdd\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\xd6\\xf7\\xff\\xff\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x13\\x00\\x00\\x008\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\xff\\xff\\xff\\xff\\x00y\\x00\\x00\\xb8\\xf7\\xff\\xff\\x1c\\x00\\x00\\x00sequential_1/flatten/Reshape\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00y\\x00\\x002\\xf8\\xff\\xff\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00$\\x00\\x00\\x00$\\x00\\x00\\x00\\x12\\x00\\x00\\x00H\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xff\\xff\\xff\\xff\\x16\\x00\\x00\\x00\\x16\\x00\\x00\\x00@\\x00\\x00\\x00\\x1c\\xf8\\xff\\xff$\\x00\\x00\\x00sequential_1/max_pooling2d_2/MaxPool\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x16\\x00\\x00\\x00\\x16\\x00\\x00\\x00@\\x00\\x00\\x00\\xa6\\xf8\\xff\\xff\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00$\\x00\\x00\\x00$\\x00\\x00\\x00\\x11\\x00\\x00\\x00\\xa4\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xff\\xff\\xff\\xff-\\x00\\x00\\x00-\\x00\\x00\\x00@\\x00\\x00\\x00\\x90\\xf8\\xff\\xff\\x82\\x00\\x00\\x00sequential_1/conv2d_2/Relu;sequential_1/conv2d_2/BiasAdd;sequential_1/conv2d_2/Conv2D;sequential_1/conv2d_2/BiasAdd/ReadVariableOp\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00-\\x00\\x00\\x00-\\x00\\x00\\x00@\\x00\\x00\\x00v\\xf9\\xff\\xff\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00$\\x00\\x00\\x00$\\x00\\x00\\x00\\x10\\x00\\x00\\x00H\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xff\\xff\\xff\\xff-\\x00\\x00\\x00-\\x00\\x00\\x00 \\x00\\x00\\x00`\\xf9\\xff\\xff$\\x00\\x00\\x00sequential_1/max_pooling2d_1/MaxPool\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00-\\x00\\x00\\x00-\\x00\\x00\\x00 \\x00\\x00\\x00\\xea\\xf9\\xff\\xff\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00$\\x00\\x00\\x00$\\x00\\x00\\x00\\x0f\\x00\\x00\\x00\\xa4\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xff\\xff\\xff\\xffZ\\x00\\x00\\x00Z\\x00\\x00\\x00 \\x00\\x00\\x00\\xd4\\xf9\\xff\\xff\\x82\\x00\\x00\\x00sequential_1/conv2d_1/Relu;sequential_1/conv2d_1/BiasAdd;sequential_1/conv2d_1/Conv2D;sequential_1/conv2d_1/BiasAdd/ReadVariableOp\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00Z\\x00\\x00\\x00Z\\x00\\x00\\x00 \\x00\\x00\\x00\\xba\\xfa\\xff\\xff\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00$\\x00\\x00\\x00$\\x00\\x00\\x00\\x0e\\x00\\x00\\x00D\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xff\\xff\\xff\\xffZ\\x00\\x00\\x00Z\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\xa4\\xfa\\xff\\xff""\\x00\\x00\\x00sequential_1/max_pooling2d/MaxPool\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00Z\\x00\\x00\\x00Z\\x00\\x00\\x00\\x10\\x00\\x00\\x00*\\xfb\\xff\\xff\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00$\\x00\\x00\\x00$\\x00\\x00\\x00\\r\\x00\\x00\\x00\\x9c\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xff\\xff\\xff\\xff\\xb4\\x00\\x00\\x00\\xb4\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x14\\xfb\\xff\\xff{\\x00\\x00\\x00sequential_1/conv2d/Relu;sequential_1/conv2d/BiasAdd;sequential_1/conv2d/Conv2D;sequential_1/conv2d/BiasAdd/ReadVariableOp1\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\xb4\\x00\\x00\\x00\\xb4\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\xba\\xfc\\xff\\xff\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x0c\\x00\\x00\\x00(\\x00\\x00\\x00\\xc4\\xfb\\xff\\xff\\x1b\\x00\\x00\\x00sequential_1/dense_1/MatMul\\x00\\x02\\x00\\x00\\x00\\x0b\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x02\\xfd\\xff\\xff\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x0b\\x00\\x00\\x00(\\x00\\x00\\x00\\x0c\\xfc\\xff\\xff\\x19\\x00\\x00\\x00sequential_1/dense/MatMul\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00y\\x00\\x00J\\xfd\\xff\\xff\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\n\\x00\\x00\\x008\\x00\\x00\\x00T\\xfc\\xff\\xff)\\x00\\x00\\x00sequential_1/dense/BiasAdd/ReadVariableOp\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x9e\\xfd\\xff\\xff\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\t\\x00\\x00\\x008\\x00\\x00\\x00\\xa8\\xfc\\xff\\xff+\\x00\\x00\\x00sequential_1/dense_1/BiasAdd/ReadVariableOp\\x00\\x01\\x00\\x00\\x00\\x0b\\x00\\x00\\x00\\x00\\x00\\x16\\x00\\x1c\\x00\\x18\\x00\\x17\\x00\\x10\\x00\\x0c\\x00\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x07\\x00\\x16\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x02(\\x00\\x00\\x00\\x18\\xfd\\xff\\xff\\x1a\\x00\\x00\\x00sequential_1/flatten/Const\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00R\\xfe\\xff\\xff\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x07\\x00\\x00\\x00,\\x00\\x00\\x00\\\\\\xfd\\xff\\xff\\x1c\\x00\\x00\\x00sequential_1/conv2d_2/Conv2D\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00@\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x03\\x00\\x00\\x00 \\x00\\x00\\x00\\xa6\\xfe\\xff\\xff\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x06\\x00\\x00\\x00,\\x00\\x00\\x00\\xb0\\xfd\\xff\\xff\\x1c\\x00\\x00\\x00sequential_1/conv2d_1/Conv2D\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00 \\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\xfa\\xfe\\xff\\xff\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x05\\x00\\x00\\x00<\\x00\\x00\\x00\\x04\\xfe\\xff\\xff,\\x00\\x00\\x00sequential_1/conv2d_2/BiasAdd/ReadVariableOp\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00@\\x00\\x00\\x00R\\xff\\xff\\xff\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x04\\x00\\x00\\x00<\\x00\\x00\\x00\\\\\\xfe\\xff\\xff,\\x00\\x00\\x00sequential_1/conv2d_1/BiasAdd/ReadVariableOp\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00 \\x00\\x00\\x00\\xaa\\xff\\xff\\xff\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x03\\x00\\x00\\x008\\x00\\x00\\x00\\xb4\\xfe\\xff\\xff*\\x00\\x00\\x00sequential_1/conv2d/BiasAdd/ReadVariableOp\\x00\\x00\\x01\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x16\\x00\\x18\\x00\\x14\\x00\\x00\\x00\\x10\\x00\\x0c\\x00\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x07\\x00\\x16\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x10\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x88\\x00\\x00\\x00 \\xff\\xff\\xffz\\x00\\x00\\x00sequential_1/conv2d/Relu;sequential_1/conv2d/BiasAdd;sequential_1/conv2d/Conv2D;sequential_1/conv2d/BiasAdd/ReadVariableOp\\x00\\x00\\x04\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\x00\\x16\\x00\\x1c\\x00\\x18\\x00\\x00\\x00\\x14\\x00\\x10\\x00\\x0c\\x00\\x00\\x00\\x00\\x00\\x08\\x00\\x07\\x00\\x16\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x14\\x00\\x00\\x00(\\x00\\x00\\x00(\\x00\\x00\\x00\\x01\\x00\\x00\\x00H\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xff\\xff\\xff\\xff\\xb4\\x00\\x00\\x00\\xb4\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x04\\x00\\x04\\x00\\x04\\x00\\x00\\x00""\\x00\\x00\\x00serving_default_sequential_input:0\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\xb4\\x00\\x00\\x00\\xb4\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x04\\x00\\x00\\x00@\\x00\\x00\\x00$\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xdc\\xff\\xff\\xff\\t\\x00\\x00\\x00\\x00\\x00\\x00\\t\\xe8\\xff\\xff\\xff\\x16\\x00\\x00\\x00\\x00\\x00\\x00\\x16\\xf4\\xff\\xff\\xff\\x11\\x00\\x00\\x00\\x00\\x00\\x00\\x11\\x0c\\x00\\x0c\\x00\\x0b\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x03'', does not exist.
```
"
tensorflow/tensorflow,2023-09-07 21:31:59,bug,tf.data.Dataset.list_files(): You must feed a value for placeholder tensor,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.12.0-rc1-12-g0db597d0d75 2.12.0
### Custom code
No
### OS platform and distribution
Ubuntu 20.04.5 LTS
### Mobile device
_No response_
### Python version
3.8.10
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Reading a dataset obtained with `tf.data.Dataset.list_files()` prints incomprehensible warnings.
Create two files:
```bash
touch a.txt
touch b.txt
```
Run this python program:
```python
import tensorflow as tf
dataset = tf.data.Dataset.list_files(['a.txt', 'b.txt'])
for f in dataset:
print(f)
```
Prints some incomprehensible warnings:
```
tf.Tensor(b'b.txt', shape=(), dtype=string)
tf.Tensor(b'a.txt', shape=(), dtype=string)
2023-09-07 17:19:04.634978: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_0' with dtype string and shape [2]
[[{{node Placeholder/_0}}]]
2023-09-07 17:19:04.635273: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_0' with dtype string and shape [2]
[[{{node Placeholder/_0}}]]
```
This is a ~duplicate of https://github.com/tensorflow/tensorflow/issues/41648 that was marked as resolved 3 years ago.
### Standalone code to reproduce the issue
```shell
With tensorflow==2.12.0:
https://colab.research.google.com/drive/1_kjUH6BzcLnlM4rc8mY7JcnhE5NdaRGy?usp=sharing
No warning with tensorflow==2.11.1
https://colab.research.google.com/drive/1QhatrE7hdJIxIUAIrYw5yDPQ50SeqFrI?usp=sharing
```
### Relevant log output
```shell
2023-09-07 17:19:04.635273: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_0' with dtype string and shape [2]
[[{{node Placeholder/_0}}]]
```
"
tensorflow/tensorflow,2023-09-05 13:06:02,bug,Cannot create interpreter when using GPU-Delegate or NNAPI-Delegate,"**System information**
- Android Device information: samsung/a14mnseea/a14m:13/TP1A.220624.014/A145RXXU2AWG3:user/release-keys
- TensorFlow Lite in Play Services SDK version (found in `build.gradle`):
- com.google.android.gms:play-services-tflite-java:16.1.0
- com.google.android.gms:play-services-tflite-support:16.1.0
- com.google.android.gms:play-services-tflite-gpu:16.2.0
- Google Play Services version: 23.33.16
**Standalone code to reproduce the issue**
var useGpu = Tasks.await(TfLiteGpu.isGpuDelegateAvailable(context));
var optionsBuilder = TfLiteInitializationOptions.builder();
optionsBuilder.setEnableGpuDelegateSupport(useGpu);
Tasks.await(TfLite.initialize(context, optionsBuilder.build()));
var options = new InterpreterApi.Options();
if(useGpu){
options.addDelegateFactory(new GpuDelegateFactory());
}
/*delegate = new NnApiDelegate();
options.addDelegate(delegate);
options.setUseNNAPI(true);*/
options.setRuntime(InterpreterApi.Options.TfLiteRuntime.FROM_SYSTEM_ONLY);
//load Model from App assets
interpreter = InterpreterApi.create(new File(modelPath), options);
**Any other info / logs**
I oriented my code on the official documentation [on here](https://www.tensorflow.org/lite/android/delegates/gpu)
Logcat-Output:
java.lang.IllegalArgumentException: Internal error: Cannot create interpreter:
at com.google.android.gms.tflite.NativeInterpreterWrapper.createInterpreter(Native Method)
at com.google.android.gms.tflite.NativeInterpreterWrapper.zzl(com.google.android.gms:play-services-tflite-java@@16.1.0:34)
at com.google.android.gms.tflite.NativeInterpreterWrapper.(com.google.android.gms:play-services-tflite-java@@16.1.0:6)
at com.google.android.gms.tflite.zzd.(com.google.android.gms:play-services-tflite-java@@16.1.0:1)
at com.google.android.gms.tflite.InterpreterFactoryImpl.create(com.google.android.gms:play-services-tflite-java@@16.1.0:2)
at org.tensorflow.lite.InterpreterApi.create(InterpreterApi.java:336)
at com.example.tfliteaudio.TFLiteEngine.initialize(TFLiteEngine.java:83)
at com.example.tfliteaudio.MainActivity.lambda$transcribeAudio$5(MainActivity.java:143)
at com.example.tfliteaudio.MainActivity.$r8$lambda$1xqJ9hAvPXTc26gXgWfy8QcV0VE(Unknown Source:0)
at com.example.tfliteaudio.MainActivity$$ExternalSyntheticLambda2.run(Unknown Source:2)
at java.lang.Thread.run(Thread.java:1012)
"
tensorflow/tensorflow,2023-09-03 14:28:22,bug,The model does not save and load correctly when containing `tf.keras.layers.experimental.preprocessing.StringLookup` layer,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.14.0-rc1
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
The model does not save and load correctly when containing `tf.keras.layers.experimental.preprocessing.StringLookup` layer.
It seems that the `vocabulary` is not saved or loaded correctly, which is empty when loading the model.
This behavior may relate to #61369, but different API endpoint.
### Standalone code to reproduce the issue
```python
import pickle
import tensorflow as tf
print(tf.version.GIT_VERSION, tf.version.VERSION, flush=True)
model_input = tf.keras.Input(shape=(1,), dtype=tf.int64)
lookup = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=['a', 'b'])(model_input)
output = tf.keras.layers.Dense(10)(lookup)
full_model = tf.keras.Model(model_input, output)
# this part works
try:
model_bytes = pickle.dumps(full_model)
model_recovered = pickle.loads(model_bytes)
except Exception as e:
print(""Failed! Error:"", e, flush=True)
else:
print(""Success!"", flush=True)
# this part throws an error
try:
full_model.save(""/tmp/temp_model"")
full_model_loaded = tf.keras.models.load_model(""/tmp/temp_model"")
model_bytes = pickle.dumps(full_model_loaded)
model_recovered = pickle.loads(model_bytes)
except Exception as e:
print(""Failed! Error:"", e, flush=True)
else:
print(""Success!"", flush=True)
```
### Relevant log output
```text
v2.14.0-rc0-34-gdd01672d9a9 2.14.0-rc1
Success!
WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.
WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.
WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.
Failed! Error: Error when deserializing class 'StringLookup' using config={'name': 'string_lookup', 'trainable': True, 'dtype': 'int64', 'invert': False, 'max_tokens': None, 'num_oov_indices': 1, 'oov_token': '[UNK]', 'mask_token': None, 'output_mode': 'int', 'sparse': False, 'pad_to_max_tokens': False, 'idf_weights': None, 'vocabulary': [], 'vocabulary_size': 3, 'encoding': 'utf-8'}.
Exception encountered: Cannot set an empty vocabulary, you passed [].
```
"
tensorflow/tensorflow,2023-08-28 20:15:53,bug,rnn with initial_state model can't be loaded with load_model ,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
A simple RNN with LSTMcell model.
I want to initialize the states with `initial_state_h` and `initial_state_c`.
```
batch_size= 16
inputs = tf.keras.layers.Input(shape=(20,5),batch_size=batch_size)
units = 8
lstm_cell_fw = tf.keras.layers.LSTMCell(units)
initial_state_h = tf.random.normal(shape = (batch_size,units), mean=0., stddev=10., dtype=tf.dtypes.float32)
initial_state_c = tf.random.normal(shape = (batch_size,units), mean=0., stddev=10., dtype=tf.dtypes.float32)
lstm_layer_fw = tf.keras.layers.RNN(lstm_cell_fw, stateful=True, return_state=True, return_sequences=False)
outputs,states_h_fw, states_c_fw= lstm_layer_fw(inputs,initial_state = [initial_state_h,initial_state_c])
lstm_dense1 = tf.keras.layers.Dense(16, activation = 'relu')
lstm_dense2 = tf.keras.layers.Dense(2, activation = 'softmax')
out=lstm_dense2(lstm_dense1(outputs))
model = tf.keras.models.Model(inputs, out)
```
After compile and train, the model is saved with `model.save('my_model_test.keras')`.
```
model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
xTrain = np.random.rand(96,20,5)
yTrain = np.random.rand(96,2)
for i in range(10):
model.fit(xTrain, yTrain,batch_size=batch_size)
model.save('my_model_test.keras')
```
But when I try to load it with `load_model = tf.keras.models.load_model('my_model_test.keras')`, it gives error:
```
13 frames
[/usr/local/lib/python3.10/dist-packages/keras/src/backend.py](https://localhost:8080/#) in int_shape(x)
1530 """"""
1531 try:
-> 1532 shape = x.shape
1533 if not isinstance(shape, tuple):
1534 shape = tuple(shape.as_list())
AttributeError: 'float' object has no attribute 'shape'
```
I tried to save in other format, `.h5`, `.json`, etc. All give the same error.
But, if I don't use `initial_state` in `outputs,states_h_fw, states_c_fw= lstm_layer_fw(inputs)`, everything goes well. No problem with `load_model`.
### Standalone code to reproduce the issue
```shell
https://colab.research.google.com/drive/1uKEpnddzeYSRG_1vKjtcQ4OLNwLuQeqy?usp=sharing
```
### Relevant log output
```shell
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
in ()
----> 1 load_model = tf.keras.models.load_model('my_model_test.keras')
13 frames
/usr/local/lib/python3.10/dist-packages/keras/src/backend.py in int_shape(x)
1530 """"""
1531 try:
-> 1532 shape = x.shape
1533 if not isinstance(shape, tuple):
1534 shape = tuple(shape.as_list())
AttributeError: 'float' object has no attribute 'shape'
```
```
"
tensorflow/tensorflow,2023-08-26 15:17:24,bug,Issues with Running Custom TensorFlow Lite Model in C++,"### 1. System information
- Platform and Linux distribution kubuntu 22.04:
- TensorFlow is built from C++ source code:
- Tensorflow 2.11:
### 2. Code
- Link to models that I trained and tried but they don't work in C++ - https://github.com/asuemg1/models_hub/tree/main/Tensorflow%20Lite/Object%20Detection/my_ssd_mobnet/Optimized%20Models
- Link to the model that works in C++ - https://github.com/ankdesh/tflite/blob/master/Android-TensorFlow-Lite-Example/app/src/main/assets/mobilenet_quant_v1_224.tflite
- Link to C++ code (mainwindow.cpp file):
https://drive.google.com/file/d/1u87yK-1qqKeHBjUKq-Lkxi0LMQKkdrPg/view?usp=sharing
### 3. Crash after conversion
- The model does not work in C++.
Please tell me how you can run the Tensorflow Lite model (tflite format) for object detection or image classification in C ++.
My steps:
- Trained the model for object detection using Tensorflow 2 API object detection.
- After training, I converted the model to the savedmodel format, and then to tflite.
- Next, I needed to embed this model into a C++ project. In order to use it in the future on low-power devices such as rasberry pi
My actions:
- Compiled the Tensorflow Lite library for C++.
- Found a test case using the mobilenet_quant_v1_224.tflite model. In this test case, the model runs successfully. However, when trying to use my own model, it does not work, although it has been tested and works in Python.
What was found out:
- The mobilenet_quant_v1_224.tflite model was quantized and had no metadata and no internal labelmap.txt file.
- TensorFlow Lite API 2 for C++ does not currently support metadata.
If you have any information on how to get my tflite model to work in C++ please share."
tensorflow/tensorflow,2023-08-25 07:37:33,bug,JIT yields inconsistent results using tf.math.top_k,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.15.0-dev20230824
### Custom code
Yes
### OS platform and distribution
Ubuntu 22.04
### Mobile device
_No response_
### Python version
3.11
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
cuda_11.8.r11.8/compiler.31833905_0 / cuDNN version 8700
### GPU model and memory
NVIDIA GeForce RTX 2080 Ti
### Current behavior?
JIT yields inconsistent results using `tf.math.top_k` when `index_type=tf.int32` (no issue with `index_type=tf.int64`).
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
@tf.function(jit_compile=True)
def tf_func(shape):
x = tf.random.stateless_normal(shape, seed=(1, 2))
x = tf.transpose(x, perm=[1, 0])
topk_max, indices = tf.math.top_k(x, 1, sorted=False, index_type=tf.int32)
reduce_max = tf.reduce_max(x, axis=1, keepdims=True)
return topk_max - reduce_max
def check(shape):
should_be_all_zero = tf_func(shape)
print(f""should_be_all_zero shape {shape}:\\n{should_be_all_zero}"")
check((1024, 3))
check((1023, 3))
check((1024, 2))
```
### Relevant log output
```shell
should_be_all_zero shape (1024, 3):
[[-0.7787831 ]
[ 0.47324872]
[ 0.30553436]]
should_be_all_zero shape (1023, 3):
[[0.]
[0.]
[0.]]
should_be_all_zero shape (1024, 2):
[[0.]
[0.]]
```
"
tensorflow/tensorflow,2023-08-22 09:38:14,bug,Please help ! ,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
source
### TensorFlow version
1.13.1
### Custom code
Yes
### OS platform and distribution
Linux Ubuntu 20.0
### Mobile device
_No response_
### Python version
3.6
### Bazel version
_No response_
### GCC/compiler version
10.5.0
### CUDA/cuDNN version
10
### GPU model and memory
RTX 3060 12G
### Current behavior?
I am running the Octopus repo (https://github.com/thmoa/octopus) which use tensorflow-gpu version 1.13.1 . When I run that model with python, I got some errors from tensorflow. Please help me.
### Standalone code to reproduce the issue
```shell
import os
import argparse
import tensorflow as tf
import keras.backend as K
from glob import glob
from lib.io import openpose_from_file, read_segmentation, write_mesh
from model.octopus import Octopus
def main(weights, name, segm_dir, pose_dir, out_dir, opt_pose_steps, opt_shape_steps):
segm_files = sorted(glob(os.path.join(segm_dir, '*.png')))
pose_files = sorted(glob(os.path.join(pose_dir, '*.json')))
if len(segm_files) != len(pose_files) or len(segm_files) == len(pose_files) == 0:
exit('Inconsistent input.')
K.set_session(tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))))
model = Octopus(num=len(segm_files))
model.load(weights)
segmentations = [read_segmentation(f) for f in segm_files]
joints_2d, face_2d = [], []
for f in pose_files:
j, f = openpose_from_file(f)
assert(len(j) == 25)
assert(len(f) == 70)
joints_2d.append(j)
face_2d.append(f)
if opt_pose_steps:
print('Optimizing for pose...')
model.opt_pose(segmentations, joints_2d, opt_steps=opt_pose_steps)
if opt_shape_steps:
print('Optimizing for shape...')
model.opt_shape(segmentations, joints_2d, face_2d, opt_steps=opt_shape_steps)
print('Estimating shape...')
pred = model.predict(segmentations, joints_2d)
write_mesh('{}/{}.obj'.format(out_dir, name), pred['vertices'][0], pred['faces'])
print('Done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'name',
type=str,
help=""Sample name"")
parser.add_argument(
'segm_dir',
type=str,
help=""Segmentation images directory"")
parser.add_argument(
'pose_dir',
type=str,
help=""2D pose keypoints directory"")
parser.add_argument(
'--opt_steps_pose', '-p', default=5, type=int,
help=""Optimization steps pose"")
parser.add_argument(
'--opt_steps_shape', '-s', default=15, type=int,
help=""Optimization steps"")
parser.add_argument(
'--out_dir', '-od',
default='out',
help='Output directory')
parser.add_argument(
'--weights', '-w',
default='weights/octopus_weights.hdf5',
help='Model weights file (*.hdf5)')
args = parser.parse_args()
main(args.weights, args.name, args.segm_dir, args.pose_dir, args.out_dir, args.opt_steps_pose, args.opt_steps_shape)
```
### Relevant log output
```shell
Processing sample...
> Optimizing for pose...
0%| | 0/10 [00:00, ?it/s]2023-08-22 16:24:18.296359: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library libcublas.so.10.0 locally
2023-08-22 16:25:50.156420: I tensorflow/core/kernels/cuda_solvers.cc:159] Creating CudaSolver handles for stream 0x55fa094fdcf0
2023-08-22 16:26:08.284736: E tensorflow/stream_executor/cuda/cuda_blas.cc:698] failed to run cuBLAS routine cublasGemmBatchedEx: CUBLAS_STATUS_EXECUTION_FAILED
2023-08-22 16:26:08.284773: E tensorflow/stream_executor/cuda/cuda_blas.cc:2620] Internal: failed BLAS call, see log for details
2023-08-22 16:26:08.326578: I tensorflow/stream_executor/stream.cc:5014] [stream=0x55fa0950bb90,impl=0x55fa093dbf20] did not memcpy device-to-host; source: 0x813bc6700
2023-08-22 16:26:08.326623: F tensorflow/core/framework/op_kernel.cc:1408] Check failed: nullptr == ctx->op_kernel().AsAsync() (nullptr vs. 0x55fa38108400)Use OP_REQUIRES_ASYNC in AsyncOpKernel implementations.
Aborted
```
"
tensorflow/tensorflow,2023-08-22 07:36:21,bug,python3.11.3 mismatch with tensorflow 2.13.0,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
No
### OS platform and distribution
macOS Venture 13.4
### Mobile device
_No response_
### Python version
3.11.3
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
![image](https://github.com/tensorflow/tensorflow/assets/11846497/3459b246-e377-499d-9d9f-e5f666fb7957)
### Standalone code to reproduce the issue
```shell
import tensorflow
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-08-21 15:11:42,bug,Unable to serialize VariableSpec,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
source
### TensorFlow version
tf 2.13.0
### Custom code
Yes
### OS platform and distribution
windows 11
### Mobile device
_No response_
### Python version
3.11.4
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
A graph
### Standalone code to reproduce the issue
```shell
from models import *
from conftest import DDPGAgent
import matplotlib as plt
import pytest
import time
# Just disables the warning, doesn't take advantage of AVX/FMA to run faster
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# setting for hidden layers
Layer1 = 400
Layer2 = 300
class MecTer(object):
""""""
MEC terminal parent class
""""""
def __init__(self, user_config, train_config):
self.rate = user_config['rate']
self.dis = user_config['dis']
self.id = user_config['id']
self.state_dim = user_config['state_dim']
self.action_dim = user_config['action_dim']
self.action_bound = user_config['action_bound']
self.data_buf_size = user_config['data_buf_size']
self.t_factor = user_config['t_factor']
self.penalty = user_config['penalty']
self.sigma2 = train_config['sigma2']
self.init_path = ''
self.isUpdateActor = True
self.init_seqCnt = 0
if 'model' not in user_config:
self.channelModel = MarkovModel(self.dis, seed=train_config['random_seed'])
else:
n_t = 1
n_r = user_config['num_r']
self.channelModel = ARModel(self.dis, n_t, n_r, seed=train_config['random_seed'])
self.DataBuf = 0
self.Channel = self.channelModel.getCh()
self.SNR = 0
self.Power = np.zeros(self.action_dim)
self.Reward = 0
self.State = []
# some pre-defined parameters
self.k = 1e-27
self.t = 0.001
self.L = 500
def localProc(self, p):
return np.power(p / self.k, 1.0 / 3.0) * self.t / self.L / 1000
def localProcRev(self, b):
return np.power(b * 1000 * self.L / self.t, 3.0) * self.k
def offloadRev(self, b):
return (np.power(2.0, b) - 1) * self.sigma2 / np.power(np.linalg.norm(self.Channel), 2)
def offloadRev2(self, b):
return self.action_bound if self.SNR <= 1e-12 else (np.power(2.0, b) - 1) / self.SNR
def getCh(self):
return self.Channel
def setSNR(self, snr):
self.SNR = snr
self.sampleCh()
channel_gain = np.power(np.linalg.norm(self.Channel), 2) / self.sigma2
self.State = np.array([self.DataBuf, snr, channel_gain])
def sampleData(self):
data_t = np.log2(1 + self.Power[0] * self.SNR)
data_p = self.localProc(self.Power[1])
over_power = 0
self.DataBuf -= data_t + data_p
if self.DataBuf < 0:
over_power = self.Power[1] - self.localProcRev(np.fmax(0, self.DataBuf + data_p))
self.DataBuf = 0
data_r = np.random.poisson(self.rate)
self.DataBuf += data_r
return data_t, data_p, data_r, over_power
def sampleCh(self):
# self.Channel = self.channelModel.sampleCh()
# Calculate channel gain using channel quantization
raw_channel_gain = np.linalg.norm(self.channelModel.sampleCh())
min_val = np.min(self.Channel)
max_val = np.max(self.Channel)
# Quantize the channel gain into 10 levels
quantized_channel_gain = min_val + (max_val - min_val) * (raw_channel_gain - min_val) / (max_val - min_val)
quantized_channel_gain = np.clip(quantized_channel_gain, min_val, max_val)
self.Channel = quantized_channel_gain
return self.Channel
def reset(self, rate, seqCount):
self.rate = rate
self.DataBuf = np.random.randint(0, self.data_buf_size - 1) / 2.0
self.sampleCh()
if seqCount >= self.init_seqCnt:
self.isUpdateActor = True
return self.DataBuf
class MecTermRL(MecTer):
""""""
MEC terminal class using RL
""""""
# rate:packet poisson arrival, dis: distance in meters
def __init__(self, user_config, train_config):
MecTer.__init__(self, user_config, train_config)
self.agent = DDPGAgent(user_config, train_config)
if 'init_path' in user_config and len(user_config['init_path']) > 0:
self.init_path = user_config['init_path']
self.init_seqCnt = user_config['init_seqCnt']
self.isUpdateActor = False
def feedback(self, snr, done):
isOverflow = 0
self.SNR = snr
# update the data buffer
[data_t, data_p, data_r, over_power] = self.sampleData()
# get the reward for the current slot
self.Reward = -self.t_factor * np.sum(self.Power) * 10 - (1 - self.t_factor) * self.DataBuf
# estimate the channel for next slot
self.sampleCh()
# update the actor and critic network
channel_gain = np.power(np.linalg.norm(self.Channel), 2) / self.sigma2
next_state = np.array([self.DataBuf, snr, channel_gain])
self.agent.update(self.State, self.Power, self.Reward, done, next_state, self.isUpdateActor)
# update system state
self.State = next_state
# return the reward in this slot
sum_power = np.sum(self.Power) - over_power
return self.Reward, sum_power, over_power, data_t, data_p, data_r, self.DataBuf, channel_gain, isOverflow
def predict(self, isRandom):
power, noise = self.agent.predict(self.State, self.isUpdateActor)
self.Power = np.fmax(0, np.fmin(self.action_bound, power))
return self.Power, noise
class MecSvrEnv(object):
""""""
Simulation environment
""""""
def __init__(self, user_list, num_att, sigma2, max_len):
self.user_list = user_list
self.num_user = len(user_list)
self.num_att = num_att
self.sigma2 = sigma2
self.count = 0
self.seqCount = 0
self.max_len = max_len
# specially designed for Greedy agent training
# self.data_set = []
def init_target_network(self):
for user in self.user_list:
user.critic.init_target_network(path='data_set_OGD.npz')
def plot_channel_gains_histogram(self):
# Get the channel gains for all users
channel_gains = [np.abs(user.getCh()) for user in self.user_list]
# Flatten the channel gains to a 1D array
flat_channel_gains = np.concatenate(channel_gains)
# plot a histogram for the channel gains
plt.hist(np.abs(flat_channel_gains), bins=20, edgecolor='black')
plt.title(""Channel Gains Histogram"")
plt.xlabel(""Channel Gain Magnitude"")
plt.ylabel(""Frequency"")
plt.show()
def step_transmit(self, isRandom=True):
# get the channel vectors
channels = np.transpose([user.getCh() for user in self.user_list])
# get the transmit powers
powers = []
noises = []
for i in range(self.num_user):
p, n = self.user_list[i].predict(isRandom)
powers.append(p.copy())
noises.append(n.copy())
# compute the snr for each user
powers = np.array(powers)
noises = np.array(noises)
snr_list = self.compute_snr(channels, powers[:, 0])
rewards = np.zeros(self.num_user)
powers = np.zeros(self.num_user)
over_powers = np.zeros(self.num_user)
data_ts = np.zeros(self.num_user)
data_ps = np.zeros(self.num_user)
data_rs = np.zeros(self.num_user)
data_buf_sizes = np.zeros(self.num_user)
next_channels = np.zeros(self.num_user)
isOverflows = np.zeros(self.num_user)
self.count += 1
# feedback the snr to each user
for i in range(self.num_user):
[rewards[i], powers[i], over_powers[i], data_ts[i], data_ps[i], data_rs[i], data_buf_sizes[i],
next_channels[i], isOverflows[i]] = self.user_list[i].feedback(snr_list[i], self.count >= self.max_len)
return rewards, self.count >= self.max_len, powers, over_powers, noises, data_ts, data_ps, data_rs, data_buf_sizes, next_channels, isOverflows
def compute_snr(self, channels, powers):
# FDD - Computing SNR
H_inv = np.linalg.pinv(channels)
total_signal_power = np.power(np.linalg.norm(channels, axis=1), 2)
noise = np.power(np.linalg.norm(H_inv, axis=1), 2) * self.sigma2
snr_list = total_signal_power / noise
return snr_list
def reset(self, isTrain=True):
self.count = 0
if isTrain:
init_data_buf_size = [user.reset(user.rate, self.seqCount) for user in self.user_list]
# get the channel vectors
channels = np.transpose([user.getCh() for user in self.user_list])
# get the transmit powers to start
powers = [np.random.uniform(0, user.action_bound) for user in self.user_list]
# compute the snr for each user
snr_list = self.compute_snr(channels, powers)
else:
init_data_buf_size = [0 for user in self.user_list]
snr_list = [0 for user in self.user_list]
for i in range(self.num_user):
self.user_list[i].setSNR(snr_list[i])
self.seqCount += 1
return init_data_buf_size
# Create the environment
# def env():
# envi = MecSvrEnv(user_list, NUM_R, SIGMA2, MAX_EPISODE_LEN)
# return envi
# env = MecSvrEnv(user_list, NUM_R, SIGMA2, MAX_EPISODE_LEN)
# env.init_target_network()
train_config = {
'sigma2': 0.01,
'minibatch_size': 64,
'actor_lr': 0.0001,
'tau': 0.001,
'critic_lr': 0.001,
'gamma': 0.99,
'buffer_size': 250000,
'random_seed': int(time.perf_counter() * 1000 % 1000),
'noise_sigma': 0.12
}
# Define user_list_info with user information
user_list_info = [
{'state_dim': 3,
'action_dim': 1,
'id': '1',
'action_bound': 1,
'model': 'AR',
'num_r': 4,
'rate': 3.0,
'dis': 100,
'data_buf_size': 100,
't_factor': 1.0,
'penalty': 1000, }
]
# sess = tf.compat.v1.Session()
# Create instances of the User class from the dictionary in user_list
user_list = [
MecTermRL(user_config=user_info, train_config=train_config)
for user_info in user_list_info
]
# Initialize variables
for user in user_list:
user.agent.init_target_network()
# (
# path=""C:/Users/USER/PycharmProjects/mec_drl-masterr/mec_drl-master/mec_drl-master/data_set_OGD.npz""
# )
@pytest.fixture
def env():
# Create and return the environment object
# Make sure to adjust this to properly create your environment instance
return MecSvrEnv(user_list, NUM_R, SIGMA2, MAX_EPISODE_LEN)
```
### Relevant log output
```shell
WARNING:tensorflow:The following Variables were used in a Lambda layer's call (tf.__operators__.add_1), but are not present in its tracked objects: . This is a strong indication that the Lambda layer should be rewritten as a subclassed Layer.
Traceback (most recent call last):
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\saving\\legacy\\saved_model\\json_utils.py"", line 207, in get_json_type
type_spec_name = type_spec_registry.get_name(type(obj))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\tensorflow\\python\\framework\\type_spec_registry.py"", line 75, in get_name
raise ValueError(""TypeSpec %s.%s has not been registered."" %
ValueError: TypeSpec tensorflow.python.ops.resource_variable_ops.VariableSpec has not been registered.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File ""C:\\Users\\USER\\PycharmProjects\\mec_drl-masterr\\mec_drl-master\\mec_drl-master\\test.py"", line 303, in
user_list = [
^
File ""C:\\Users\\USER\\PycharmProjects\\mec_drl-masterr\\mec_drl-master\\mec_drl-master\\test.py"", line 304, in
MecTermRL(user_config=user_info, train_config=train_config)
File ""C:\\Users\\USER\\PycharmProjects\\mec_drl-masterr\\mec_drl-master\\mec_drl-master\\test.py"", line 125, in __init__
self.agent = DDPGAgent(user_config, train_config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\PycharmProjects\\mec_drl-masterr\\mec_drl-master\\mec_drl-master\\conftest.py"", line 24, in __init__
self.critic = CriticNetwork(self.state_dim, self.action_dim, float(train_config['critic_lr']),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\PycharmProjects\\mec_drl-masterr\\mec_drl-master\\mec_drl-master\\ddpg.py"", line 102, in __init__
self.target_model = tf.keras.models.clone_model(self.model)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\models\\cloning.py"", line 539, in clone_model
return _clone_functional_model(
^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\models\\cloning.py"", line 222, in _clone_functional_model
model_configs, created_layers = _clone_layers_and_model_config(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\models\\cloning.py"", line 298, in _clone_layers_and_model_config
config = functional.get_network_config(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\engine\\functional.py"", line 1583, in get_network_config
node_data = node.serialize(
^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\engine\\node.py"", line 219, in serialize
kwargs = tf.nest.map_structure(_serialize_keras_tensor, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\tensorflow\\python\\util\\nest.py"", line 624, in map_structure
return nest_util.map_structure(
^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\tensorflow\\python\\util\\nest_util.py"", line 1054, in map_structure
return _tf_core_map_structure(func, *structure, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\tensorflow\\python\\util\\nest_util.py"", line 1094, in _tf_core_map_structure
[func(*x) for x in entries],
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\tensorflow\\python\\util\\nest_util.py"", line 1094, in
[func(*x) for x in entries],
^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\engine\\node.py"", line 215, in _serialize_keras_tensor
return (_COMPOSITE_TYPE, json_utils.Encoder().encode(t))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\saving\\legacy\\saved_model\\json_utils.py"", line 55, in encode
return super().encode(_encode_tuple(obj))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\json\\encoder.py"", line 200, in encode
chunks = self.iterencode(o, _one_shot=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\json\\encoder.py"", line 258, in iterencode
return _iterencode(o, 0)
^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\saving\\legacy\\saved_model\\json_utils.py"", line 52, in default
return get_json_type(obj)
^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\saving\\legacy\\saved_model\\json_utils.py"", line 225, in get_json_type
""spec"": get_json_type(spec),
^^^^^^^^^^^^^^^^^^^
File ""C:\\Users\\USER\\anaconda3\\Lib\\site-packages\\keras\\src\\saving\\legacy\\saved_model\\json_utils.py"", line 214, in get_json_type
raise ValueError(
ValueError: Unable to serialize VariableSpec(shape=(300,), dtype=tf.float32, trainable=True, alias_id=None) to JSON, because the TypeSpec class has not been registered.
```
"
tensorflow/tensorflow,2023-08-20 18:45:47,bug,Activation function of a Dense hidden layer not getting invoked.,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
v2.13.0-rc2-7-g1cb1a030a62 2.13.0
### Custom code
Yes
### OS platform and distribution
MacOS 13.4, MacBook Pro M2 Max
### Mobile device
_No response_
### Python version
3.8.17
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Issue:
In the given auto-encoder setup, the encoder layers activation function (relu) is not getting invoked.
1. We create a simple auto-encoder, with Input size 3, hidden size 2, and output back to 3.
2. The activation function of the encoder layer is set a relu.
3. The weights of the encoder layers are all made negative. Idea is, if input is +ve, all the neutrons will have negative value and relu will o/p zero.
4. Give input as [1, 0, 0].
5. We expect the final decoder o/p layer, which has sigmoid activation, to o/p all [0.5, 0.5, 0.5] as the input to this layer from the encoder should have been [0, 0, 0].
6. But we find that is not the case, which clearly shows that 'relu' activation of the hidden layer is not getting invoked.
Installation:
pip install tensorflow-macos
pip install tensorflow-metal
### Standalone code to reproduce the issue
```shell
# https://colab.research.google.com/drive/14KKrdiBg8FT2cdUC5pjqJHi5S3BkTOHi?usp=sharing
# The above colab will run fine, but the same code on Mac with the said config has issue.
# Copying the code here for quick reference.
import tensorflow as tf
import tensorflow.keras
import tensorflow as tf
import platform
import sys
from tensorflow.keras.layers import Input, Dense, Layer
from tensorflow.keras.models import Model
# Print versions:
print(f""Python {sys.version}"")
print(f""Python Platform: {platform.platform()}"")
print(f""Tensor Flow Version: {tf.__version__}"")
gpu = len(tf.config.list_physical_devices('GPU'))>0
print(""GPU is"", ""available"" if gpu else ""NOT AVAILABLE"")
# Setup input
import numpy as np
X_check = np.array([[1, 0, 0]])
# Setup autoencoder model
input_layer = Input(shape=(X_check.shape[1]))
bottleneck = Dense(2, activation='relu', name='bottleneck')(input_layer)
output = Dense(X_check.shape[1], activation='sigmoid', name='output')(bottleneck)
autoencoder = Model(input_layer, output)
# Set encoder layer weights to all negative.
layer = autoencoder.layers[1]
weights = np.array([[-1, -1],[-1, -1], [-1, -1]])
biases = np.array([0, 0])
layer.set_weights([weights, biases])
# create encoder model.
encoder = Model(input_layer, bottleneck)
# create decoder model.
decoder_input = Input(shape=(2,), name='decoder_input')
decoder_layer = autoencoder.layers[-1]
decoder = Model(decoder_input, decoder_layer(decoder_input))
# Run auto-encoder, with [1, 0, 0], since encoder has all negative weights,
# and has 'relu' activation o/p of enocder should all be zeros. And that being
# the input of next sigmod we should get output [0.5, 0.5, 0.5]
output_data = autoencoder.predict(X_check)
print(output_data)
```
### Relevant log output
```shell
Python 3.8.17 (default, Jul 5 2023, 15:45:03)
[Clang 14.0.6 ]
Python Platform: macOS-13.4-arm64-arm-64bit
Tensor Flow Version: 2.13.0
GPU is available
1/1 [==============================] - 0s 38ms/step
[[0.287966 0.85427195 0.28276426]]
```
"
tensorflow/tensorflow,2023-08-19 23:04:06,bug,Check failure when running tf.config.experimental_connect_to_host,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
GTX 1660 TI
### Current behavior?
Due to feeding NaN input Argument
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import numpy as np
try:
try:
with tf.device('/CPU'):
arg_0 = ""nan""
out = tf.config.experimental_connect_to_host(arg_0,)
except Exception as e:
print(""Error:""+str(e))
try:
with tf.device('/GPU:0'):
tf.config.experimental_connect_to_host(arg_0,)
except Exception as e:
print(""Error:""+str(e))
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-19 19:02:09.775956: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-19 19:02:10.305057: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
2023-08-19 19:02:10.742608: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.761041: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.761185: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.762359: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.762491: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.762611: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.826641: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.826771: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.826888: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.826971: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1639] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 3389 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-19 19:02:10.829417: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.829515: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.829601: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.829701: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.829789: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-08-19 19:02:10.829854: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1639] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 3389 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-19 19:02:10.838878: E tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:600] INVALID_ARGUMENT: Could not interpret ""nan"" as a host-port pair.
E0819 19:02:10.839114974 187448 completion_queue.cc:244] assertion failed: queue.num_items() == 0
Aborted
```
```
"
tensorflow/tensorflow,2023-08-18 00:56:31,bug,Overflow when running tf.compat.v1.manip.tile,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large element in the input list
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
input_tensor = tf.random.uniform([1, 355, 768], dtype=tf.float32)
input = tf.identity(input_tensor)
multiples_0 = 125091515651
multiples_1 = True
multiples_2 = 125091515651
multiples = [multiples_0,multiples_1,multiples_2,]
name = None
out = tf.compat.v1.manip.tile(input=input,multiples=multiples,name=name,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__Tile_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 44407488056105 with 96070284019968, result: -1
[[{{node Tile}}]] [Op:Tile]
```
```
"
tensorflow/tensorflow,2023-08-18 00:52:39,bug,Overflow bug when running tf.compat.v1.image.resize,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large elements in input list
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
images_tensor = tf.constant(-15621075306911, shape=[218, 178, 3, 1], dtype=tf.int64,)
images = tf.identity(images_tensor)
size_0 = 8968073515812833920
size_1 = 536870912
size = [size_0,size_1,]
method = ""nearest""
align_corners = False
preserve_aspect_ratio = False
name = None
out = tf.compat.v1.image.resize(images=images,size=size,method=method,align_corners=align_corners,preserve_aspect_ratio=preserve_aspect_ratio,name=name,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__ResizeNearestNeighbor_device_/job:localhost/replica:0/task:0/device:CPU:0}} Encountered overflow when multiplying 20527214848 with 536870912, result: -7426279517443850240 [Op:ResizeNearestNeighbor] name:
```
```
"
tensorflow/tensorflow,2023-08-18 00:37:29,bug,Overflow bug when running tf.compat.v1.manip.tile,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large elements in the input tensor
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
input_tensor = tf.random.uniform([1, 355, 768], dtype=tf.float32)
input = tf.identity(input_tensor)
multiples_0 = 125091515651
multiples_1 = True
multiples_2 = 125091515651
multiples = [multiples_0,multiples_1,multiples_2,]
name = None
out = tf.compat.v1.manip.tile(input=input,multiples=multiples,name=name,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__Tile_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 44407488056105 with 96070284019968, result: -1
[[{{node Tile}}]] [Op:Tile]
```
```
"
tensorflow/tensorflow,2023-08-18 00:29:46,bug,Overflow bug when running tf.compat.v1.keras.layers.ZeroPadding2D,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large elements in the input list
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
padding_0_0 = 125091515651
padding_0_1 = 125091515651
padding_0 = [padding_0_0,padding_0_1,]
padding_1_0 = 125091515651
padding_1_1 = 125091515651
padding_1 = [padding_1_0,padding_1_1,]
padding = [padding_0,padding_1,]
data_format = None
arg_class = tf.compat.v1.keras.layers.ZeroPadding2D(padding=padding,data_format=data_format,)
arg_input_0_tensor = tf.random.uniform([3, 14, 14, 576], dtype=tf.float32)
arg_input_0 = tf.identity(arg_input_0_tensor)
arg_input = [arg_input_0,]
out = arg_class(*arg_input)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
{{function_node __wrapped__Pad_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 750549093948 with 250183031316, result: -1
[[{{node Pad}}]] [Op:Pad]
Call arguments received by layer 'zero_padding2d' (type ZeroPadding2D):
• inputs=tf.Tensor(shape=(3, 14, 14, 576), dtype=float32)
```
```
"
tensorflow/tensorflow,2023-08-17 23:59:05,bug,Overflow bug when running tf.raw_ops.Tile,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large elements in input list
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
input_tensor = tf.random.uniform([4, 1, 1, 20], dtype=tf.float32)
input = tf.identity(input_tensor)
multiples_0 = 125091515651
multiples_1 = True
multiples_2 = 125091515651
multiples_3 = 125091515651
multiples = [multiples_0,multiples_1,multiples_2,multiples_3,]
name = None
out = tf.raw_ops.Tile(input=input,multiples=multiples,name=name,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__Tile_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 500366062604 with 125091515651, result: -1
[[{{node Tile}}]] [Op:Tile]
```
```
"
tensorflow/tensorflow,2023-08-17 23:52:52,bug,Overflow bug when running tf.keras.layers.ZeroPadding3D,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large integer value
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
padding = 1610612736
arg_class = tf.keras.layers.ZeroPadding3D(padding=padding,)
arg_input_0_tensor = tf.random.uniform([1, 1, 2, 2, 3], dtype=tf.float32)
arg_input_0 = tf.identity(arg_input_0_tensor)
arg_input = [arg_input_0,]
out = arg_class(*arg_input)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:Exception encountered when calling layer 'zero_padding3d' (type ZeroPadding3D).
{{function_node __wrapped__Pad_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 3221225473 with 3221225474, result: -8070450522584252414 [Op:Pad]
Call arguments received by layer 'zero_padding3d' (type ZeroPadding3D):
• inputs=tf.Tensor(shape=(1, 1, 2, 2, 3), dtype=float32)
```
```
"
tensorflow/tensorflow,2023-08-17 23:32:12,bug,Colab session crashes for unknown reasons when when running tf.raw_ops.ResizeBilinear on colab,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large list element
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
images_tensor = tf.random.uniform([1, 5, 5, 1], minval=-256, maxval=257, dtype=tf.int32)
images = tf.identity(images_tensor)
size_0 = 125091515651
size_1 = True
size = [size_0,size_1,]
align_corners = False
half_pixel_centers = False
name = None
out = tf.raw_ops.ResizeBilinear(images=images,size=size,align_corners=align_corners,half_pixel_centers=half_pixel_centers,name=name,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.592 NotebookApp] Searching ['/root/.jupyter', '/root/.local/etc/jupyter', '/usr/etc/jupyter', '/usr/local/etc/jupyter', '/etc/jupyter'] for config files"",""time"":""2023-08-17T21:55:14.593Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.592 NotebookApp] Looking for jupyter_config in /etc/jupyter"",""time"":""2023-08-17T21:55:14.598Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.594 NotebookApp] Looking for jupyter_config in /usr/local/etc/jupyter"",""time"":""2023-08-17T21:55:14.600Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.601 NotebookApp] Looking for jupyter_config in /usr/etc/jupyter"",""time"":""2023-08-17T21:55:14.603Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.602 NotebookApp] Looking for jupyter_config in /root/.local/etc/jupyter"",""time"":""2023-08-17T21:55:14.603Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.602 NotebookApp] Looking for jupyter_config in /root/.jupyter"",""time"":""2023-08-17T21:55:14.604Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.604 NotebookApp] Looking for jupyter_notebook_config in /etc/jupyter"",""time"":""2023-08-17T21:55:14.605Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.604 NotebookApp] Loaded config file: /etc/jupyter/jupyter_notebook_config.py"",""time"":""2023-08-17T21:55:14.608Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.605 NotebookApp] Looking for jupyter_notebook_config in /usr/local/etc/jupyter"",""time"":""2023-08-17T21:55:14.608Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.597 NotebookApp] Searching ['/root/.jupyter', '/root/.local/etc/jupyter', '/usr/etc/jupyter', '/usr/local/etc/jupyter', '/etc/jupyter'] for config files"",""time"":""2023-08-17T21:55:14.598Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.609 NotebookApp] Loaded config file: /usr/local/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:14.610Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.610 NotebookApp] Looking for jupyter_notebook_config in /usr/etc/jupyter"",""time"":""2023-08-17T21:55:14.611Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.610 NotebookApp] Looking for jupyter_notebook_config in /root/.local/etc/jupyter"",""time"":""2023-08-17T21:55:14.611Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.610 NotebookApp] Looking for jupyter_notebook_config in /root/.jupyter"",""time"":""2023-08-17T21:55:14.611Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.612 NotebookApp] Loaded config file: /root/.jupyter/jupyter_notebook_config.py"",""time"":""2023-08-17T21:55:14.613Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.597 NotebookApp] Looking for jupyter_config in /etc/jupyter"",""time"":""2023-08-17T21:55:14.599Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.597 NotebookApp] Looking for jupyter_config in /usr/local/etc/jupyter"",""time"":""2023-08-17T21:55:14.601Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.598 NotebookApp] Looking for jupyter_config in /usr/etc/jupyter"",""time"":""2023-08-17T21:55:14.601Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.598 NotebookApp] Looking for jupyter_config in /root/.local/etc/jupyter"",""time"":""2023-08-17T21:55:14.602Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.598 NotebookApp] Looking for jupyter_config in /root/.jupyter"",""time"":""2023-08-17T21:55:14.602Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.599 NotebookApp] Looking for jupyter_notebook_config in /etc/jupyter"",""time"":""2023-08-17T21:55:14.610Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.600 NotebookApp] Loaded config file: /etc/jupyter/jupyter_notebook_config.py"",""time"":""2023-08-17T21:55:14.612Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.601 NotebookApp] Looking for jupyter_notebook_config in /usr/local/etc/jupyter"",""time"":""2023-08-17T21:55:14.612Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.603 NotebookApp] Loaded config file: /usr/local/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:14.612Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.603 NotebookApp] Looking for jupyter_notebook_config in /usr/etc/jupyter"",""time"":""2023-08-17T21:55:14.613Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.603 NotebookApp] Looking for jupyter_notebook_config in /root/.local/etc/jupyter"",""time"":""2023-08-17T21:55:14.614Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.603 NotebookApp] Looking for jupyter_notebook_config in /root/.jupyter"",""time"":""2023-08-17T21:55:14.614Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""[D 21:55:14.606 NotebookApp] Loaded config file: /root/.jupyter/jupyter_notebook_config.py"",""time"":""2023-08-17T21:55:14.614Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.051Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/usr/local/etc/jupyter/jupyter_notebook_config.d/panel-client-jupyter.json"",""time"":""2023-08-17T21:55:15.056Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/usr/local/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.056Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/usr/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.059Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/root/.local/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.060Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/root/.jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.061Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Writing notebook server cookie secret to /root/.local/share/jupyter/runtime/notebook_cookie_secret"",""time"":""2023-08-17T21:55:15.074Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Authentication of /metrics is OFF, since other authentication is disabled."",""time"":""2023-08-17T21:55:15.076Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""google.colab serverextension initialized."",""time"":""2023-08-17T21:55:15.159Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.173Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/usr/local/etc/jupyter/jupyter_notebook_config.d/panel-client-jupyter.json"",""time"":""2023-08-17T21:55:15.175Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/usr/local/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.177Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/usr/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.179Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/root/.local/etc/jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.180Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" \\t/root/.jupyter/jupyter_notebook_config.json"",""time"":""2023-08-17T21:55:15.183Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Writing notebook server cookie secret to /root/.local/share/jupyter/runtime/notebook_cookie_secret"",""time"":""2023-08-17T21:55:15.192Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Authentication of /metrics is OFF, since other authentication is disabled."",""time"":""2023-08-17T21:55:15.193Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""google.colab serverextension initialized."",""time"":""2023-08-17T21:55:15.213Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Serving notebooks from local directory: /"",""time"":""2023-08-17T21:55:19.313Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Serving notebooks from local directory: /"",""time"":""2023-08-17T21:55:19.314Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Jupyter Notebook 6.5.5 is running at:"",""time"":""2023-08-17T21:55:19.314Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""http://172.28.0.2:9000/"",""time"":""2023-08-17T21:55:19.314Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Use Control-C to stop this server and shut down all kernels (twice to skip confirmation)."",""time"":""2023-08-17T21:55:19.315Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Jupyter Notebook 6.5.5 is running at:"",""time"":""2023-08-17T21:55:19.314Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""http://172.28.0.12:9000/"",""time"":""2023-08-17T21:55:19.316Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Use Control-C to stop this server and shut down all kernels (twice to skip confirmation)."",""time"":""2023-08-17T21:55:19.316Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""Kernel started: 92d4365c-be07-4243-a024-4094c7317470, name: python3"",""time"":""2023-08-17T21:55:37.205Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""Got events for closed stream "",""time"":""2023-08-17T21:55:52.871Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 21:55:56.928312: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations."",""time"":""2023-08-17T21:55:56.928Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags."",""time"":""2023-08-17T21:55:56.928Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 21:55:58.601992: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT"",""time"":""2023-08-17T21:55:58.602Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 21:56:04.276761: W tensorflow/core/framework/op_kernel.cc:1830] OP_REQUIRES failed at tile_ops.cc:193 : INVALID_ARGUMENT: Encountered overflow when multiplying 500366062604 with 125091515651, result: -1"",""time"":""2023-08-17T21:56:04.276Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""Task exception was never retrieved"",""time"":""2023-08-17T22:10:59.200Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""future: .wrapper() done, defined at /usr/local/lib/python3.10/dist-packages/tornado/websocket.py:1085> exception=WebSocketClosedError()>"",""time"":""2023-08-17T22:10:59.200Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""Traceback (most recent call last):"",""time"":""2023-08-17T22:10:59.200Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" File \\""/usr/local/lib/python3.10/dist-packages/tornado/websocket.py\\"", line 1087, in wrapper"",""time"":""2023-08-17T22:10:59.200Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" await fut"",""time"":""2023-08-17T22:10:59.200Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""tornado.iostream.StreamClosedError: Stream is closed"",""time"":""2023-08-17T22:10:59.200Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""During handling of the above exception, another exception occurred:"",""time"":""2023-08-17T22:10:59.200Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""Traceback (most recent call last):"",""time"":""2023-08-17T22:10:59.201Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" File \\""/usr/lib/python3.10/asyncio/tasks.py\\"", line 232, in __step"",""time"":""2023-08-17T22:10:59.201Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" result = coro.send(None)"",""time"":""2023-08-17T22:10:59.201Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" File \\""/usr/local/lib/python3.10/dist-packages/tornado/websocket.py\\"", line 1089, in wrapper"",""time"":""2023-08-17T22:10:59.201Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":"" raise WebSocketClosedError()"",""time"":""2023-08-17T22:10:59.201Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""tornado.websocket.WebSocketClosedError"",""time"":""2023-08-17T22:10:59.201Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""Got events for closed stream "",""time"":""2023-08-17T23:11:18.034Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 23:17:17.631931: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 19327655268 exceeds 10% of free system memory."",""time"":""2023-08-17T23:17:17.634Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""KernelRestarter: restarting kernel (1/5), keep random ports"",""time"":""2023-08-17T23:17:28.209Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""WARNING:root:kernel 92d4365c-be07-4243-a024-4094c7317470 restarted"",""time"":""2023-08-17T23:17:28.209Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 23:19:54.827450: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations."",""time"":""2023-08-17T23:19:54.827Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags."",""time"":""2023-08-17T23:19:54.827Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 23:19:58.360271: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT"",""time"":""2023-08-17T23:19:58.360Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 23:20:03.457377: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 15342764032 exceeds 10% of free system memory."",""time"":""2023-08-17T23:20:03.457Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""KernelRestarter: restarting kernel (1/5), keep random ports"",""time"":""2023-08-17T23:20:40.231Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""WARNING:root:kernel 92d4365c-be07-4243-a024-4094c7317470 restarted"",""time"":""2023-08-17T23:20:40.234Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 23:25:19.461222: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations."",""time"":""2023-08-17T23:25:19.461Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags."",""time"":""2023-08-17T23:25:19.461Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 23:25:21.643520: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT"",""time"":""2023-08-17T23:25:21.643Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""2023-08-17 23:25:25.628780: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 2149856268 exceeds 10% of free system memory."",""time"":""2023-08-17T23:25:25.629Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":30,""msg"":""KernelRestarter: restarting kernel (1/5), keep random ports"",""time"":""2023-08-17T23:25:46.242Z"",""v"":0}
{""pid"":7,""type"":""jupyter"",""level"":40,""msg"":""WARNING:root:kernel 92d4365c-be07-4243-a024-4094c7317470 restarted"",""time"":""2023-08-17T23:25:46.242Z"",""v"":0}
```
```
"
tensorflow/tensorflow,2023-08-17 22:06:34,bug,Overflow bug when running tf.raw_ops.Pad,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to the large list of elements
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
input_tensor = tf.random.uniform([16, 16, 16, 512], dtype=tf.float32)
input = tf.identity(input_tensor)
paddings_0_0 = 125091515651
paddings_0_1 = 125091515651
paddings_0 = [paddings_0_0,paddings_0_1,]
paddings_1_0 = 125091515651
paddings_1_1 = False
paddings_1 = [paddings_1_0,paddings_1_1,]
paddings_2_0 = 125091515651
paddings_2_1 = 125091515651
paddings_2 = [paddings_2_0,paddings_2_1,]
paddings_3_0 = 125091515651
paddings_3_1 = 125091515651
paddings_3 = [paddings_3_0,paddings_3_1,]
paddings = [paddings_0,paddings_1,paddings_2,paddings_3,]
name = None
out = tf.raw_ops.Pad(input=input,paddings=paddings,name=name,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__Pad_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 250183031318 with 125091515667, result: -1
[[{{node Pad}}]] [Op:Pad]
```
```
"
tensorflow/tensorflow,2023-08-17 22:01:13,bug,Overflow bug when running tf.tile,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large list elements
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
arg_0_tensor = tf.random.uniform([452, 1, 768], dtype=tf.float32)
arg_0 = tf.identity(arg_0_tensor)
arg_1_0 = 125091515651
arg_1_1 = 125091515651
arg_1_2 = 125091515651
arg_1 = [arg_1_0,arg_1_1,arg_1_2,]
out = tf.tile(arg_0,arg_1,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__Tile_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 56541365074252 with 125091515651, result: -1
[[{{node Tile}}]] [Op:Tile]
```
```
"
tensorflow/tensorflow,2023-08-17 21:56:54,bug,Overflow bug when running tf.raw_ops.Tile on colab,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large list elements
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
input_tensor = tf.random.uniform([4, 1, 1, 20], dtype=tf.float32)
input = tf.identity(input_tensor)
multiples_0 = 125091515651
multiples_1 = True
multiples_2 = 125091515651
multiples_3 = 125091515651
multiples = [multiples_0,multiples_1,multiples_2,multiples_3,]
name = None
out = tf.raw_ops.Tile(input=input,multiples=multiples,name=name,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__Tile_device_/job:localhost/replica:0/task:0/device:CPU:0}} Encountered overflow when multiplying 500366062604 with 125091515651, result: -1 [Op:Tile]
```
```
"
tensorflow/tensorflow,2023-08-17 21:51:46,bug,Overflow bug when running tf.keras.layers.ZeroPadding3D,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to the large integer value
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
padding = 1610612736
arg_class = tf.keras.layers.ZeroPadding3D(padding=padding,)
arg_input_0_tensor = tf.random.uniform([1, 1, 2, 2, 3], dtype=tf.float32)
arg_input_0 = tf.identity(arg_input_0_tensor)
arg_input = [arg_input_0,]
out = arg_class(*arg_input)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:Exception encountered when calling layer 'zero_padding3d' (type ZeroPadding3D).
{{function_node __wrapped__Pad_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 3221225473 with 3221225474, result: -8070450522584252414 [Op:Pad]
Call arguments received by layer 'zero_padding3d' (type ZeroPadding3D):
• inputs=tf.Tensor(shape=(1, 1, 2, 2, 3), dtype=float32)
{}
```
```
"
tensorflow/tensorflow,2023-08-17 18:37:54,bug,Issue with nightly-gpu docker image,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.15.0-dev20230816
### Custom code
Yes
### OS platform and distribution
Ubuntu 20.04
### Mobile device
_No response_
### Python version
3.11
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
When using the tensorflow/tensorflow:nightly-gpu docker image I get an error saying the ""DNN library is not found""
However, when I change the base image to tensorflow/tensorflow:latest-gpu my code works fine.
Perhaps the nightly image broke something with the cuda / cudnn library paths?
### Standalone code to reproduce the issue
```shell
It seems that using a Conv1D layer is what causes the issue... see the log output below.
```
### Relevant log output
```shell
Detected at node 'peak_conv_1/Conv1D' defined at (most recent call last):
Node: 'peak_conv_1/Conv1D'
DNN library is not found.
[[{{node peak_conv_1/Conv1D}}]] [Op:__inference_train_step_224831]
```
"
tensorflow/tensorflow,2023-08-17 15:19:19,bug,Integer overflow when running tf.compat.v1.matrix_diag on colab,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large elements in the input lists
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
diagonal_0_0_0_0 = 1111
diagonal_0_0_0_1 = 1112
diagonal_0_0_0 = [diagonal_0_0_0_0,diagonal_0_0_0_1,]
diagonal_0_0_1_0 = 1121
diagonal_0_0_1_1 = 1122
diagonal_0_0_1 = [diagonal_0_0_1_0,diagonal_0_0_1_1,]
diagonal_0_0 = [diagonal_0_0_0,diagonal_0_0_1,]
diagonal_0_1_0_0 = 1211
diagonal_0_1_0_1 = 1212
diagonal_0_1_0 = [diagonal_0_1_0_0,diagonal_0_1_0_1,]
diagonal_0_1_1_0 = 1221
diagonal_0_1_1_1 = 1222
diagonal_0_1_1 = [diagonal_0_1_1_0,diagonal_0_1_1_1,]
diagonal_0_1 = [diagonal_0_1_0,diagonal_0_1_1,]
diagonal_0 = [diagonal_0_0,diagonal_0_1,]
diagonal_1_0_0_0 = 2111
diagonal_1_0_0_1 = 2112
diagonal_1_0_0 = [diagonal_1_0_0_0,diagonal_1_0_0_1,]
diagonal_1_0_1_0 = 2121
diagonal_1_0_1_1 = 2122
diagonal_1_0_1 = [diagonal_1_0_1_0,diagonal_1_0_1_1,]
diagonal_1_0 = [diagonal_1_0_0,diagonal_1_0_1,]
diagonal_1_1_0_0 = 2211
diagonal_1_1_0_1 = 2212
diagonal_1_1_0 = [diagonal_1_1_0_0,diagonal_1_1_0_1,]
diagonal_1_1_1_0 = 2221
diagonal_1_1_1_1 = 2222
diagonal_1_1_1 = [diagonal_1_1_1_0,diagonal_1_1_1_1,]
diagonal_1_1 = [diagonal_1_1_0,diagonal_1_1_1,]
diagonal_1 = [diagonal_1_0,diagonal_1_1,]
diagonal = [diagonal_0,diagonal_1,]
name = ""None""
k = 1610637938
padding_value = 0
align = ""RIGHT_LEFT""
out = tf.compat.v1.matrix_diag(diagonal=diagonal,name=name,k=k,padding_value=padding_value,align=align,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__MatrixDiagV3_device_/job:localhost/replica:0/task:0/device:CPU:0}} Encountered overflow when multiplying 12885103520 with 1610637940, result: -1 [Op:MatrixDiagV3]
{}
```
```
"
tensorflow/tensorflow,2023-08-17 14:55:02,bug,Integer overflow when running tf.raw_ops.MatrixDiagV3 on colab,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to large elements in input lists
### Standalone code to reproduce the issue
```shell
results = dict()
import tensorflow as tf
import os
import numpy as np
try:
diagonal_0_0_0_0 = 1111
diagonal_0_0_0_1 = 1112
diagonal_0_0_0 = [diagonal_0_0_0_0,diagonal_0_0_0_1,]
diagonal_0_0_1_0 = 1121
diagonal_0_0_1_1 = 1122
diagonal_0_0_1 = [diagonal_0_0_1_0,diagonal_0_0_1_1,]
diagonal_0_0 = [diagonal_0_0_0,diagonal_0_0_1,]
diagonal_0_1_0_0 = 1211
diagonal_0_1_0_1 = 1212
diagonal_0_1_0 = [diagonal_0_1_0_0,diagonal_0_1_0_1,]
diagonal_0_1_1_0 = 1221
diagonal_0_1_1_1 = 1222
diagonal_0_1_1 = [diagonal_0_1_1_0,diagonal_0_1_1_1,]
diagonal_0_1 = [diagonal_0_1_0,diagonal_0_1_1,]
diagonal_0 = [diagonal_0_0,diagonal_0_1,]
diagonal_1_0_0_0 = 2111
diagonal_1_0_0_1 = 2112
diagonal_1_0_0 = [diagonal_1_0_0_0,diagonal_1_0_0_1,]
diagonal_1_0_1_0 = 2121
diagonal_1_0_1_1 = 2122
diagonal_1_0_1 = [diagonal_1_0_1_0,diagonal_1_0_1_1,]
diagonal_1_0 = [diagonal_1_0_0,diagonal_1_0_1,]
diagonal_1_1_0_0 = 2211
diagonal_1_1_0_1 = 35.0
diagonal_1_1_0 = [diagonal_1_1_0_0,diagonal_1_1_0_1,]
diagonal_1_1_1_0 = 2221
diagonal_1_1_1_1 = 2222
diagonal_1_1_1 = [diagonal_1_1_1_0,diagonal_1_1_1_1,]
diagonal_1_1 = [diagonal_1_1_0,diagonal_1_1_1,]
diagonal_1 = [diagonal_1_0,diagonal_1_1,]
diagonal = [diagonal_0,diagonal_1,]
k = 3046875451
num_rows = -1
num_cols = -1
padding_value = 0
align = ""RIGHT_LEFT""
name = ""diag_part""
out = tf.raw_ops.MatrixDiagV3(diagonal=diagonal,k=k,num_rows=num_rows,num_cols=num_cols,padding_value=padding_value,align=align,name=name,)
except Exception as e:
print(""Error:""+str(e))
print(results)
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__MatrixDiagV3_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 9984734776 with 1248091847, result: -5984878005326580344
[[{{node MatrixDiagV3}}]] [Op:MatrixDiagV3]
{}
```
```
"
tensorflow/tensorflow,2023-08-17 14:22:48,bug,Integer overflow when running tf.linalg.diag,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
PRETTY_NAME=""Ubuntu 22.04.2 LTS"" NAME=""Ubuntu"" VERSION_ID=""22.04"" VERSION=""22.04.2 LTS (Jammy Jellyfish)"" VERSION_CODENAME=jammy ID=ubuntu ID_LIKE=debian HOME_URL=""https://www.ubuntu.com/"" SUPPORT_URL=""https://help.ubuntu.com/"" BUG_REPORT_URL=""https://bugs.launchpad.net/ubuntu/"" PRIVACY_POLICY_URL=""https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"" UBUNTU_CODENAME=jammy
### Mobile device
_No response_
### Python version
3.10.12 (main, Jun 11 2023, 05:26:28)
### Bazel version
_No response_
### GCC/compiler version
[GCC 11.4.0]
### CUDA/cuDNN version
[nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0](nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2022 NVIDIA Corporation Built on Wed_Sep_21_10:33:58_PDT_2022 Cuda compilation tools, release 11.8, V11.8.89 Build cuda_11.8.r11.8/compiler.31833905_0)
### GPU model and memory
T4
### Current behavior?
Due to the large list of elements
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
try:
diagonal_0_0_0 = []
diagonal_0_0_1_0 = True
diagonal_0_0_1_1 = 1122
diagonal_0_0_1 = [diagonal_0_0_1_0,diagonal_0_0_1_1,]
diagonal_0_0 = [diagonal_0_0_0,diagonal_0_0_1,]
diagonal_0_1_0_0 = 1211
diagonal_0_1_0_1 = 1212
diagonal_0_1_0 = [diagonal_0_1_0_0,diagonal_0_1_0_1,]
diagonal_0_1_1_0 = 1221
diagonal_0_1_1_1 = 1222
diagonal_0_1_1 = [diagonal_0_1_1_0,diagonal_0_1_1_1,]
diagonal_0_1 = [diagonal_0_1_0,diagonal_0_1_1,]
diagonal_0 = [diagonal_0_0,diagonal_0_1,]
diagonal_1_0_0_0 = True
diagonal_1_0_0_1 = """"
diagonal_1_0_0 = [diagonal_1_0_0_0,diagonal_1_0_0_1,]
diagonal_1_0_1_0 = 2121
diagonal_1_0_1_1 = 2122
diagonal_1_0_1 = [diagonal_1_0_1_0,diagonal_1_0_1_1,]
diagonal_1_0 = [diagonal_1_0_0,diagonal_1_0_1,]
diagonal_1_1_0_0 = 2211
diagonal_1_1_0_1 = 2212
diagonal_1_1_0 = [diagonal_1_1_0_0,diagonal_1_1_0_1,]
diagonal_1_1_1_0 = 30.0
diagonal_1_1_1_1 = 2222
diagonal_1_1_1 = [diagonal_1_1_1_0,diagonal_1_1_1_1,]
diagonal_1_1 = [diagonal_1_1_0,diagonal_1_1_1,]
diagonal_1 = [diagonal_1_0,diagonal_1_1,]
diagonal = [diagonal_0,diagonal_1,]
name = ""None""
k = 1610612736
padding_value = 0
align = ""RIGHT_LEFT""
out = tf.linalg.diag(diagonal=diagonal,name=name,k=k,padding_value=padding_value,align=align,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
Error:{{function_node __wrapped__MatrixDiagV3_device_/job:localhost/replica:0/task:0/device:GPU:0}} Encountered overflow when multiplying 12884901888 with 1610612736, result: -1
[[{{node MatrixDiagV3}}]] [Op:MatrixDiagV3]
```
```
"
tensorflow/tensorflow,2023-08-17 08:51:23,bug,AttributeError: module 'tensorflow.python.pywrap_mlir' has no attribute 'experimental_convert_saved_model_v1',"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
v1.12.1-96406-gfa4d29bfef8 2.14.0-dev20230706
### Custom code
No
### OS platform and distribution
UIbuntu 20.04
### Mobile device
UIbuntu 20.04
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
-
### Standalone code to reproduce the issue
```shell
-
```
### Relevant log output
```shell
File ""/home/fastDisk/jiahao/research/iree/.venv/lib/python3.8/site-packages/iree/tools/tf/scripts/iree_import_tf/__main__.py"", line 54, in main
import_saved_model(
File ""/home/fastDisk/jiahao/research/iree/.venv/lib/python3.8/site-packages/iree/tools/tf/scripts/iree_import_tf/__main__.py"", line 102, in import_saved_model
result = convert_saved_model_v1(
File ""/home/fastDisk/jiahao/research/iree/.venv/lib/python3.8/site-packages/tensorflow/python/compiler/mlir/mlir.py"", line 141, in convert_saved_model_v1
return pywrap_mlir.experimental_convert_saved_model_v1(
AttributeError: module 'tensorflow.python.pywrap_mlir' has no attribute 'experimental_convert_saved_model_v1'
```
"
tensorflow/tensorflow,2023-08-16 16:16:20,bug,TFLite GPU delegate: Broadcast output incorrect,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
Nightly at 09cf1b2a39023e617e003a51be39d419702c2d36
### Custom code
Yes
### OS platform and distribution
Android 12 2023-03-01
### Mobile device
Vivo X80
### Python version
_No response_
### Bazel version
CMake 3.19.0
### GCC/compiler version
Android NDK r25
### CUDA/cuDNN version
_No response_
### GPU model and memory
Mali-G710 MC10
### Current behavior?
TFLite model file: [model.tflite.zip](https://github.com/tensorflow/tensorflow/files/12360271/model.tflite.zip)
The provided TFLite model contains a single broadcast operation, which when executed by the provided C++ program, should produce the following output:
```
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8
```
However, when using the GPU delegate the following output is produced:
```
1 0 0 1 1 0 0 1 1 0 0 1 1 0 0 1
2 0 0 1 2 0 0 1 2 0 0 1 2 0 0 1
3 0 0 1 3 0 0 1 3 0 0 1 3 0 0 1
4 0 0 1 4 0 0 1 4 0 0 1 4 0 0 1
5 0 0 1 5 0 0 1 5 0 0 1 5 0 0 1
6 0 0 1 6 0 0 1 6 0 0 1 6 0 0 1
7 0 0 1 7 0 0 1 7 0 0 1 7 0 0 1
8 0 0 1 8 0 0 1 8 0 0 1 8 0 0 1
```
The correct output is produced when using the CPU and not the GPU delegate. We are concerned that this could be a security issue if memory is being accessed incorrectly.
Content of `model.tflite`:
```
Your TFLite model has '1' subgraph(s). In the subgraph description below,
T# represents the Tensor numbers. For example, in Subgraph#0, the MUL op takes
tensor #0 and tensor #1 as input and produces tensor #2 as output.
Subgraph#0 main(T#0) -> [T#2]
Op#0 MUL(T#0, T#1) -> [T#2]
Tensors of Subgraph#0
T#0(serving_default_input:0) shape:[8, 1], type:FLOAT32
T#1(BroadcastTo) shape:[8, 16], type:FLOAT32 RO 512 bytes, buffer: 2, data:[1, 1, 1, 1, 1, ...]
T#2(PartitionedCall:0) shape:[8, 16], type:FLOAT32
---------------------------------------------------------------
Your TFLite model has '1' signature_def(s).
Signature#0 key: 'serving_default'
- Subgraph: Subgraph#0
- Inputs:
'input' : T#0
- Outputs:
'output' : T#2
---------------------------------------------------------------
Model size: 1400 bytes
Non-data buffer size: 780 bytes (55.71 %)
Total data buffer size: 620 bytes (44.29 %)
(Zero value buffers): 0 bytes (00.00 %)
```
### Standalone code to reproduce the issue
```shell
#include
#include
#include ""tensorflow/lite/kernels/register.h""
#include ""tensorflow/lite/model.h""
#include ""tensorflow/lite/delegates/gpu/delegate.h""
int main() {
std::unique_ptr model =
tflite::FlatBufferModel::BuildFromFile(""model.tflite"");
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder builder(*model, resolver);
std::unique_ptr interpreter;
builder(&interpreter);
TfLiteGpuDelegateOptionsV2 options = TfLiteGpuDelegateOptionsV2Default();
auto* delegate = TfLiteGpuDelegateV2Create(&options);
if (interpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) return 1;
const TfLiteTensor *inTensor = interpreter->input_tensor(0);
const TfLiteIntArray *inShape = inTensor->dims;
float *input = inTensor->data.f;
for (int i = 0; i < inShape->data[0]; i++) {
input[i] = i + 1;
}
if (interpreter->Invoke() != kTfLiteOk) return 1;
std::vector outShapeVec;
const TfLiteTensor *outTensor = interpreter->output_tensor(0);
const TfLiteIntArray *outShape = outTensor->dims;
const float *output = outTensor->data.f;
const float *outPtr = output;
for (size_t i = 0; i < outShape->data[0]; i++) {
for (size_t j = 0; j < outShape->data[1]; j++) {
printf(""%.2g "", *(outPtr++));
}
printf(""\\n"");
}
}
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-08-15 09:33:59,bug,Build failure on AARCH64 - undeclared identifier 'memset',"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
git HEAD
### Custom code
No
### OS platform and distribution
Ubuntu 20.04
### Mobile device
n/a
### Python version
3.9.16
### Bazel version
6.1.0
### GCC/compiler version
16.0.6
### CUDA/cuDNN version
n/a
### GPU model and memory
n/a
### Current behavior?
Build fails since commit https://github.com/tensorflow/tensorflow/commit/4993fb9fe4e4dbe26657b3bb88dab152ab397b8c
### Standalone code to reproduce the issue
```shell
bazel build --config=mkl_aarch64_threadpool --copt=-flax-vector-conversions --test_env=TF_ENABLE_ONEDNN_OPTS=1 --test_env=TF2_BEHAVIOR=1 --define=tf_api_version=2 -- //tensorflow/tools/pip_package:build_pip_package
```
### Relevant log output
```shell
ERROR: /workspace/tensorflow/lite/kernels/internal/BUILD:448:11: Compiling tensorflow/lite/kernels/internal/optimized/4bit/neon_fully_connected.cc failed: (Exit 1): clang failed: error executing command (from target //tensorflow/lite/kernels/internal:optimized_4bit)
(cd /home/andrew/src/tf_test/tensorflow-git/bazel-ci_build-cache/.cache/bazel/_bazel_andrew/eab0d61a99b6696edb3d2aff87b585e8/execroot/org_tensorflow && \\
exec env - \\
CACHEBUSTER=20220325 \\
PATH=/home/andrew/src/tf_test/tensorflow-git/bazel-ci_build-cache/.cache/bazelisk/downloads/bazelbuild/bazel-6.1.0-linux-arm64/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin \\
PWD=/proc/self/cwd \\
TF2_BEHAVIOR=1 \\
/usr/lib/llvm-16/bin/clang -MD -MF bazel-out/aarch64-opt/bin/tensorflow/lite/kernels/internal/_objs/optimized_4bit/neon_fully_connected.pic.d '-frandom-seed=bazel-out/aarch64-opt/bin/tensorflow/lite/kernels/internal/_objs/optimized_4bit/neon_fully_connected.pic.o' -DFC_4BIT_NEON '-DBAZEL_CURRENT_REPOSITORY=""""' -iquote . -iquote bazel-out/aarch64-opt/bin -iquote external/cpuinfo -iquote bazel-out/aarch64-opt/bin/external/cpuinfo -isystem external/cpuinfo/include -isystem bazel-out/aarch64-opt/bin/external/cpuinfo/include -isystem external/cpuinfo/src -isystem bazel-out/aarch64-opt/bin/external/cpuinfo/src -fmerge-all-constants -Wno-builtin-macro-redefined '-D__DATE__=""redacted""' '-D__TIMESTAMP__=""redacted""' '-D__TIME__=""redacted""' -fPIC -U_FORTIFY_SOURCE '-D_FORTIFY_SOURCE=1' -fstack-protector -Wall -Wno-invalid-partial-specialization -fno-omit-frame-pointer -no-canonical-prefixes -DNDEBUG -g0 -O2 -ffunction-sections -fdata-sections -Wno-all -Wno-extra -Wno-deprecated -Wno-deprecated-declarations -Wno-ignored-attributes -Wno-array-bounds -Wunused-result '-Werror=unused-result' -Wswitch '-Werror=switch' '-Wno-error=unused-but-set-variable' -DAUTOLOAD_DYNAMIC_KERNELS -Wno-gnu-offsetof-extensions '-mtune=generic' '-march=armv8-a' -O3 -flax-vector-conversions '-std=c++17' -DFARMHASH_NO_CXX_STRING -Wno-sign-compare -O3 -fno-exceptions -O3 '--sysroot=/dt10' -c tensorflow/lite/kernels/internal/optimized/4bit/neon_fully_connected.cc -o bazel-out/aarch64-opt/bin/tensorflow/lite/kernels/internal/_objs/optimized_4bit/neon_fully_connected.pic.o)
# Configuration: 70a2ceb8c9b79ab96bab8f0b73bbfb70969f7e2a66f605b1d1332a62f7eef342
# Execution platform: @local_execution_config_platform//:platform
tensorflow/lite/kernels/internal/optimized/4bit/neon_fully_connected.cc:284:3: error: use of undeclared identifier 'memset'
memset(*dest, static_cast(119), sizeof(uint8_t) * size);
^
tensorflow/lite/kernels/internal/optimized/4bit/neon_fully_connected.cc:313:3: error: use of undeclared identifier 'memset'
memset(data, 0, sizeof(int8_t) * size);
^
tensorflow/lite/kernels/internal/optimized/4bit/neon_fully_connected.cc:314:3: error: use of undeclared identifier 'memset'
memset(input_offsets, 0, sizeof(int32_t) * layout_rows);
^
3 errors generated.
Target //tensorflow/tools/pip_package:build_pip_package failed to build
INFO: Elapsed time: 23.741s, Critical Path: 7.00s
INFO: 439 processes: 339 internal, 100 local.
FAILED: Build did NOT complete successfully
```
"
tensorflow/tensorflow,2023-08-15 02:03:40,bug,Abort when running tensorflow.python.ops.nn_ops.pool,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Due to Large list element
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import nn_ops
try:
input_tensor = tf.constant(-8968073515812833920, shape=[2, 9, 10, 2], dtype=tf.float32,)
input = tf.identity(input_tensor)
window_shape_0 = 1e+38
window_shape_1 = 536870912
window_shape = [window_shape_0,window_shape_1,]
padding = ""SAME""
pooling_type = ""MAX""
dilation_rate_0 = 1
dilation_rate_1 = 1
dilation_rate = [dilation_rate_0,dilation_rate_1,]
strides_0 = 1
strides_1 = 1
strides = [strides_0,strides_1,]
out = nn_ops.pool(input=input,window_shape=window_shape,padding=padding,pooling_type=pooling_type,dilation_rate=dilation_rate,strides=strides,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-14 22:03:21.255791: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 22:03:21.273228: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 22:03:21.273370: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 22:03:21.273661: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-14 22:03:21.274910: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 22:03:21.275021: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 22:03:21.275124: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 22:03:21.328191: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 22:03:21.328333: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 22:03:21.328435: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 22:03:21.328519: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 4361 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-14 22:03:21.392635: I tensorflow/stream_executor/cuda/cuda_dnn.cc:384] Loaded cuDNN version 8600
2023-08-14 22:03:21.392690: F tensorflow/stream_executor/cuda/cuda_dnn.cc:886] Check failed: cudnnSetPoolingNdDescriptor( handle_.get(), (pooling_descriptor.mode() == dnn::PoolingMode::kMaximum ? cudnn_max_pooling_mode : CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING), propagate_nans ? CUDNN_PROPAGATE_NAN : CUDNN_NOT_PROPAGATE_NAN, nd, shape.data(), padding.data(), strides.data()) == CUDNN_STATUS_SUCCESS (3 vs. 0)
Aborted
```
```
"
tensorflow/tensorflow,2023-08-15 01:40:14,bug,Crash when running tensorflow.python.ops.gen_image_ops.resize_area,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.13.0
### Custom code
No
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Due to negative large tensor
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import gen_image_ops
try:
arg_0_tensor = tf.constant(-1610612736, shape=[0, 6, 6, 1], dtype=tf.bfloat16,)
arg_0 = tf.identity(arg_0_tensor)
arg_1_tensor = tf.constant(-45932682421089, shape=[2], dtype=tf.int32,)
arg_1 = tf.identity(arg_1_tensor)
align_corners = False
out = gen_image_ops.resize_area(arg_0,arg_1,align_corners=align_corners,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-14 21:39:36.565382: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-14 21:39:37.126658: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:39:37.144298: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:39:37.144439: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:39:37.144729: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-14 21:39:37.146083: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:39:37.146208: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:39:37.146312: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:39:37.200697: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:39:37.200846: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:39:37.200954: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:39:37.201045: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 4036 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
Segmentation fault
```
```
"
tensorflow/tensorflow,2023-08-15 01:34:29,bug,Abort when running tensorflow.python.ops.nn_ops.conv3d_transpose_v2,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.10.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Due to invalid list elements
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import nn_ops
try:
arg_0_tensor = tf.random.uniform([2, 4, 4, 4, 3], dtype=tf.float32)
arg_0 = tf.identity(arg_0_tensor)
arg_1_tensor = tf.random.uniform([2, 2, 2, 5, 3], dtype=tf.float32)
arg_1 = tf.identity(arg_1_tensor)
arg_2_0 = 2
arg_2_1 = 8
arg_2_2 = 8
arg_2_3 = 8
arg_2_4 = False
arg_2 = [arg_2_0,arg_2_1,arg_2_2,arg_2_3,arg_2_4,]
arg_3 = 2
out = nn_ops.conv3d_transpose_v2(arg_0,arg_1,arg_2,arg_3,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-14 21:33:38.109289: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-14 21:33:38.668776: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:33:38.686385: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:33:38.686530: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:33:38.686817: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-14 21:33:38.687712: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:33:38.687825: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:33:38.687920: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:33:38.749104: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:33:38.749246: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:33:38.749344: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 21:33:38.749426: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 3978 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-14 21:33:38.812494: I tensorflow/stream_executor/cuda/cuda_dnn.cc:384] Loaded cuDNN version 8600
2023-08-14 21:33:38.825580: F tensorflow/stream_executor/cuda/cuda_dnn.cc:804] Check failed: cudnnSetConvolutionGroupCount( handle_.get(), convolution_descriptor.group_count()) == CUDNN_STATUS_SUCCESS (3 vs. 0)
Aborted
```
```
"
tensorflow/tensorflow,2023-08-15 01:29:57,bug,Crash when running tensorflow.python.framework.kernels.get_registered_kernels_for_op,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Due to feeding None argument.
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.framework import kernels
try:
arg_0 = None
out = kernels.get_registered_kernels_for_op(arg_0,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-14 21:28:52.279973: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
Segmentation fault
```
```
"
tensorflow/tensorflow,2023-08-14 23:40:39,bug,Abort when running tensorflow.python.ops.gen_array_ops.mirror_pad_grad,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.10.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
due to NEGATIVE LARGE TENSOR
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import gen_array_ops
try:
arg_0_tensor = tf.constant(-17, shape=[1, 4, 7, 1], dtype=tf.int64,)
arg_0 = tf.identity(arg_0_tensor)
arg_1_tensor = tf.constant(-43871863081293, shape=[4, 2], dtype=tf.int32,)
arg_1 = tf.identity(arg_1_tensor)
arg_2 = ""REFLECT""
out = gen_array_ops.mirror_pad_grad(arg_0,arg_1,arg_2,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-14 19:39:35.212387: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:39:35.230641: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:39:35.230799: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:39:35.231097: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-14 19:39:35.231945: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:39:35.232079: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:39:35.232187: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:39:35.287820: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:39:35.287970: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:39:35.288088: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:39:35.288181: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 4039 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-14 19:39:35.341105: F tensorflow/core/framework/tensor_shape.cc:404] Check failed: 0 <= new_num_elements (0 vs. -1)
Aborted
```
```
"
tensorflow/tensorflow,2023-08-14 23:16:28,bug,Abort when running tensorflow.python.ops.gen_math_ops.sobol_sample,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.10.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Due to an empty input argument
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import gen_math_ops
try:
arg_0 = 2
arg_1 = 4
arg_2 = [()]
dtype = None
out = gen_math_ops.sobol_sample(arg_0,arg_1,arg_2,dtype=dtype,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-14 19:14:54.117266: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:14:54.134520: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:14:54.134663: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:14:54.135032: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-14 19:14:54.135857: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:14:54.135969: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:14:54.136093: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:14:54.189645: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:14:54.189788: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:14:54.189886: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:14:54.189967: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 4373 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-14 19:14:54.237059: F tensorflow/core/framework/tensor.cc:733] Check failed: 1 == NumElements() (1 vs. 0)Must have a one element tensor
Aborted
```
```
"
tensorflow/tensorflow,2023-08-14 23:08:43,bug,Abort when running tensorflow.python.ops.gen_sparse_ops.sparse_slice,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.10.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Due to Large List Elements
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import gen_sparse_ops
try:
indices_0 = []
indices = [indices_0,]
values_0 = 0
values = [values_0,]
shape_0 = 1
shape_1 = 1
shape = [shape_0,shape_1,]
start_0 = 4611686018427387904
start_1 = -1
start = [start_0,start_1,]
size_0 = 4611686018427387904
size_1 = 4611686018427387904
size = [size_0,size_1,]
out = gen_sparse_ops.sparse_slice(indices=indices,values=values,shape=shape,start=start,size=size,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-14 19:04:18.473689: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:04:18.490933: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:04:18.491133: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:04:18.491455: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-14 19:04:18.492102: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:04:18.492212: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:04:18.492308: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:04:18.560188: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:04:18.560326: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:04:18.560424: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 19:04:18.560508: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 4029 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-14 19:04:18.619230: E tensorflow/stream_executor/cuda/cuda_event.cc:29] Error polling for event status: failed to query event: CUDA_ERROR_ILLEGAL_ADDRESS: an illegal memory access was encountered
2023-08-14 19:04:18.619470: F tensorflow/core/common_runtime/device/device_event_mgr.cc:221] Unexpected Event status: 1
Aborted
```
```
"
tensorflow/tensorflow,2023-08-14 22:25:16,bug,Crash when running tensorflow.python.ops.gen_data_flow_ops.record_input,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Due to very large integer values
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import gen_data_flow_ops
try:
file_pattern = ""/tmp/record_input_testzsuyf9ap/tmpsqjnp5o1/basic.*""
file_buffer_size = 1
file_parallelism = 1676240524292489355
file_shuffle_shift_ratio = 125091515651
batch_size = 1
file_random_seed = 125091515651
compression_type = ""GZIP""
out = gen_data_flow_ops.record_input(file_pattern=file_pattern,file_buffer_size=file_buffer_size,file_parallelism=file_parallelism,file_shuffle_shift_ratio=file_shuffle_shift_ratio,batch_size=batch_size,file_random_seed=file_random_seed,compression_type=compression_type,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-14 18:25:00.245344: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-14 18:25:01.275751: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 18:25:01.292863: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 18:25:01.293001: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 18:25:01.294465: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-14 18:25:01.295740: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 18:25:01.295849: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 18:25:01.295947: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 18:25:01.362610: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 18:25:01.363085: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 18:25:01.363185: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:980] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 18:25:01.363266: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 4105 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
Segmentation fault
```
```
"
tensorflow/tensorflow,2023-08-14 21:19:25,bug,Segmentation fault when running tensorflow.python.ops.gen_math_ops._histogram_fixed_width,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Due to negative float argument
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import gen_math_ops
try:
try:
with tf.device('/CPU'):
arg_0_0_0 = -1.0
arg_0_0_1 = 0.0
arg_0_0_2 = 1.5
arg_0_0 = [arg_0_0_0,arg_0_0_1,arg_0_0_2,]
arg_0_1_0 = 2.0
arg_0_1_1 = 5.0
arg_0_1_2 = 15
arg_0_1 = [arg_0_1_0,arg_0_1_1,arg_0_1_2,]
arg_0 = [arg_0_0,arg_0_1,]
arg_1_0 = -1.7976931348623157e+308
arg_1_1 = -1.4013e-45
arg_1 = [arg_1_0,arg_1_1,]
arg_2 = 5
dtype = tf.int32
out = gen_math_ops._histogram_fixed_width(arg_0,arg_1,arg_2,dtype=dtype,)
except Exception as e:
print(""Error:""+str(e))
try:
with tf.device('/GPU:0'):
arg_0_0 = [arg_0_0_0,arg_0_0_1,arg_0_0_2,]
arg_0_1 = [arg_0_1_0,arg_0_1_1,arg_0_1_2,]
arg_0 = [arg_0_0,arg_0_1,]
arg_1 = [arg_1_0,arg_1_1,]
dtype = tf.int32
gen_math_ops._histogram_fixed_width(arg_0,arg_1,arg_2,dtype=dtype,)
except Exception as e:
print(""Error:""+str(e))
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-14 17:17:24.916189: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-14 17:17:25.460939: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 17:17:25.478482: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 17:17:25.478631: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 17:17:25.478922: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-14 17:17:25.479696: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 17:17:25.479802: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 17:17:25.479896: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 17:17:25.548142: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 17:17:25.548279: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 17:17:25.548375: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-14 17:17:25.548456: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1613] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 3932 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
Segmentation fault
(fuzzer_tf_2.11.0) n
```
```
"
tensorflow/tensorflow,2023-08-14 19:17:35,bug,Avoid partially saved ckpt from preempted device (e.g. TPU),"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
source
### TensorFlow version
tf.2.11
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
I accidentally discovered from a TPU preemption that my ckpt cannot be used to resume training (the preemption occurs during the process of saving ckpt). The error is that some weights cannot be matched. This phenomenon is happening for the first time and has never happened before.
### Standalone code to reproduce the issue
```shell
It is very hard to reproduce because it must be preemption while saving the ckpt.
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-08-13 21:34:43,bug,I need TensorFlow 2.2.0 but it is removed how to find it?,"I need to install TensorFlow 2.2.0
why? because this repo (https://github.com/GantMan/nsfw_model) is requesting it and now matter what I tried can't make it work with newer TensorFlows
How can I install TensorFlow 2.2.0 on Windows 10 and Python 3.10?
The error I am getting is and I am not able to fix it
```
(venv) G:\\nsfw_model>python a.py
2023-08-14 00:33:39.437427: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-14 00:33:40.148302: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 21643 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3090 Ti, pci bus id: 0000:01:00.0, compute capability: 8.6
2023-08-14 00:33:40.149891: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 9603 MB memory: -> device: 1, name: NVIDIA GeForce RTX 3060, pci bus id: 0000:05:00.0, compute capability: 8.6
Traceback (most recent call last):
File ""G:\\nsfw_model\\a.py"", line 13, in
print(predict.classify(model, 'test'))
File ""G:\\nsfw_model\\nsfw_detector\\predict.py"", line 67, in classify
probs = classify_nd(model, images, predict_args)
File ""G:\\nsfw_model\\nsfw_detector\\predict.py"", line 77, in classify_nd
model_preds = model.predict(nd_images, **predict_args)
File ""G:\\nsfw_model\\venv\\lib\\site-packages\\keras\\utils\\traceback_utils.py"", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File ""G:\\nsfw_model\\venv\\lib\\site-packages\\keras\\engine\\training.py"", line 1997, in predict
raise ValueError('Unexpected result of `predict_function` '
ValueError: Unexpected result of `predict_function` (Empty batch_outputs). Please use `Model.compile(..., run_eagerly=True)`, or `tf.config.run_functions_eagerly(True)` for more information of where went wrong, or file a issue/bug to `tf.keras`.
```
predict.py
```
#! python
import argparse
import json
from os import listdir
from os.path import isfile, join, exists, isdir, abspath
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
IMAGE_DIM = 299 # required/default image dimensionality
def load_images(image_paths, image_size, verbose=True):
'''
Function for loading images into numpy arrays for passing to model.predict
inputs:
image_paths: list of image paths to load
image_size: size into which images should be resized
verbose: show all of the image path and sizes loaded
outputs:
loaded_images: loaded images on which keras model can run predictions
loaded_image_indexes: paths of images which the function is able to process
'''
loaded_images = []
loaded_image_paths = []
if isdir(image_paths):
parent = abspath(image_paths)
image_paths = [join(parent, f) for f in listdir(image_paths) if isfile(join(parent, f))]
elif isfile(image_paths):
image_paths = [image_paths]
for img_path in image_paths:
try:
if verbose:
print(img_path, ""size:"", image_size)
image = keras.preprocessing.image.load_img(img_path, target_size=image_size)
image = keras.preprocessing.image.img_to_array(image)
image /= 255
loaded_images.append(image)
loaded_image_paths.append(img_path)
except Exception as ex:
print(""Image Load Failure: "", img_path, ex)
return np.asarray(loaded_images), loaded_image_paths
def load_model(model_path):
if model_path is None or not exists(model_path):
raise ValueError(""saved_model_path must be the valid directory of a saved model to load."")
model = tf.keras.models.load_model(model_path, custom_objects={'KerasLayer': hub.KerasLayer},compile=False)
return model
def classify(model, input_paths, image_dim=IMAGE_DIM, predict_args={}):
""""""
Classify given a model, input paths (could be single string), and image dimensionality.
Optionally, pass predict_args that will be passed to tf.keras.Model.predict().
""""""
images, image_paths = load_images(input_paths, (image_dim, image_dim))
probs = classify_nd(model, images, predict_args)
return dict(zip(image_paths, probs))
def classify_nd(model, nd_images, predict_args={}):
""""""
Classify given a model, image array (numpy)
Optionally, pass predict_args that will be passed to tf.keras.Model.predict().
""""""
model_preds = model.predict(nd_images, **predict_args)
# preds = np.argsort(model_preds, axis = 1).tolist()
categories = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']
probs = []
for i, single_preds in enumerate(model_preds):
single_probs = {}
for j, pred in enumerate(single_preds):
single_probs[categories[j]] = float(pred)
probs.append(single_probs)
return probs
def main(args=None):
parser = argparse.ArgumentParser(
description=""""""A script to perform NFSW classification of images"""""",
epilog=""""""
Launch with default model and a test image
python nsfw_detector/predict.py --saved_model_path mobilenet_v2_140_224 --image_source test.jpg
"""""", formatter_class=argparse.RawTextHelpFormatter)
submain = parser.add_argument_group('main execution and evaluation functionality')
submain.add_argument('--image_source', dest='image_source', type=str, required=True,
help='A directory of images or a single image to classify')
submain.add_argument('--saved_model_path', dest='saved_model_path', type=str, required=True,
help='The model to load')
submain.add_argument('--image_dim', dest='image_dim', type=int, default=IMAGE_DIM,
help=""The square dimension of the model's input shape"")
if args is not None:
config = vars(parser.parse_args(args))
else:
config = vars(parser.parse_args())
if config['image_source'] is None or not exists(config['image_source']):
raise ValueError(""image_source must be a valid directory with images or a single image to classify."")
model = load_model(config['saved_model_path'])
image_preds = classify(model, config['image_source'], config['image_dim'])
print(json.dumps(image_preds, indent=2), '\\n')
if __name__ == ""__main__"":
main()
```
"
tensorflow/tensorflow,2023-08-13 06:08:12,bug,Abort when running tensorflow.python.ops.gen_sparse_ops.sparse_split,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.11.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Due to zero integer argument. It would be best if you ran multiple times to see the abort.
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import gen_sparse_ops
try:
arg_0 = 0
arg_1_tensor = tf.random.uniform([14, 2], minval=-256, maxval=257, dtype=tf.int64)
arg_1 = tf.identity(arg_1_tensor)
arg_2_tensor = tf.random.uniform([14], minval=-256, maxval=257, dtype=tf.int64)
arg_2 = tf.identity(arg_2_tensor)
arg_3_tensor = tf.random.uniform([2], minval=-256, maxval=257, dtype=tf.int64)
arg_3 = tf.identity(arg_3_tensor)
arg_4 = 2
out = gen_sparse_ops.sparse_split(arg_0,arg_1,arg_2,arg_3,arg_4,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-13 02:06:08.283954: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-13 02:06:09.159348: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 02:06:09.181201: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 02:06:09.181463: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 02:06:09.181927: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-13 02:06:09.182527: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 02:06:09.182681: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 02:06:09.182809: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 02:06:09.234678: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 02:06:09.234880: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 02:06:09.235018: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 02:06:09.235129: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1613] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 151 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-13 02:06:09.251615: I tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:735] failed to allocate 151.69M (159055872 bytes) from device: CUDA_ERROR_OUT_OF_MEMORY: out of memory
2023-08-13 02:06:09.251958: I tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:735] failed to allocate 136.52M (143150336 bytes) from device: CUDA_ERROR_OUT_OF_MEMORY: out of memory
2023-08-13 02:06:09.273191: E tensorflow/compiler/xla/stream_executor/cuda/cuda_event.cc:29] Error polling for event status: failed to query event: CUDA_ERROR_MISALIGNED_ADDRESS: misaligned address
2023-08-13 02:06:09.273688: F tensorflow/core/common_runtime/device/device_event_mgr.cc:221] Unexpected Event status: 1
Aborted
```
```
"
tensorflow/tensorflow,2023-08-13 05:22:51,bug,Abort when running tensorflow.python.eager.remote.connect_to_remote_host,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.11.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
NaN string argument
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import numpy as np
from tensorflow.python.eager import remote
try:
try:
with tf.device('/CPU'):
arg_0 = ""nan""
out = remote.connect_to_remote_host(arg_0,)
except Exception as e:
print(""Error:""+str(e))
try:
with tf.device('/GPU:0'):
remote.connect_to_remote_host(arg_0,)
except Exception as e:
print(""Error:""+str(e))
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-13 01:22:37.499369: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-13 01:22:38.459392: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 01:22:38.480510: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 01:22:38.480708: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 01:22:38.481081: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-13 01:22:38.481707: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 01:22:38.481844: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 01:22:38.481961: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 01:22:38.536637: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 01:22:38.536859: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 01:22:38.536991: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 01:22:38.537094: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1613] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 1725 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-13 01:22:38.546718: E tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:589] INVALID_ARGUMENT: Could not interpret ""nan"" as a host-port pair.
E0813 01:22:38.546961566 1686085 completion_queue.cc:244] assertion failed: queue.num_items() == 0
Aborted
```
```
"
tensorflow/tensorflow,2023-08-13 04:15:00,bug,Abort when running tensorflow.python.ops.gen_nn_ops.conv3d_backprop_input_v2,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.11.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Due to input tensor with zero shape
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import gen_nn_ops
try:
input_sizes_0 = 2
input_sizes_1 = 8
input_sizes_2 = 8
input_sizes_3 = 8
input_sizes_4 = 5
input_sizes = [input_sizes_0,input_sizes_1,input_sizes_2,input_sizes_3,input_sizes_4,]
filter_tensor = tf.random.uniform([0, 1, 2, 5, 3], dtype=tf.float32)
filter = tf.identity(filter_tensor)
out_backprop_tensor = tf.random.uniform([2, 4, 4, 4, 3], dtype=tf.float32)
out_backprop = tf.identity(out_backprop_tensor)
strides_0 = 1
strides_1 = 2
strides_2 = 2
strides_3 = 2
strides_4 = 1
strides = [strides_0,strides_1,strides_2,strides_3,strides_4,]
padding = ""SAME""
data_format = ""NDHWC""
dilations_0 = 1
dilations_1 = 1
dilations_2 = 1
dilations_3 = 1
dilations_4 = 1
dilations = [dilations_0,dilations_1,dilations_2,dilations_3,dilations_4,]
out = gen_nn_ops.conv3d_backprop_input_v2(input_sizes=input_sizes,filter=filter,out_backprop=out_backprop,strides=strides,padding=padding,data_format=data_format,dilations=dilations,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-13 00:13:03.668988: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-13 00:13:04.462547: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 00:13:04.483261: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 00:13:04.483427: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 00:13:04.483905: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-13 00:13:04.484753: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 00:13:04.484944: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 00:13:04.485057: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 00:13:04.585176: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 00:13:04.585346: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 00:13:04.585468: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-13 00:13:04.585565: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1613] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 744 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-13 00:13:04.615688: F ./tensorflow/core/util/gpu_launch_config.h:129] Check failed: work_element_count > 0 (0 vs. 0)
Aborted
```
```
"
tensorflow/tensorflow,2023-08-13 03:05:27,bug,Abort when running tensorflow.python.ops.linalg_ops.self_adjoint_eig,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.11.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
This behavior is very strange and should not throw OOM error.
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import linalg_ops
try:
arg_0_tensor = tf.random.uniform([1, 1], dtype=tf.float32)
arg_0 = tf.identity(arg_0_tensor)
out = linalg_ops.self_adjoint_eigvals(arg_0,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-12 23:02:34.613725: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-12 23:02:35.612147: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 23:02:35.634199: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 23:02:35.634612: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 23:02:35.635038: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-12 23:02:35.635637: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 23:02:35.635829: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 23:02:35.635948: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 23:02:35.639564: W tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:370] A non-primary context 0x5a7ae50 for device 0 exists before initializing the StreamExecutor. The primary context is now 0x7ffd00000000. We haven't verified StreamExecutor works with that.
2023-08-12 23:02:35.639662: F tensorflow/tsl/platform/statusor.cc:33] Attempting to fetch value instead of handling error INTERNAL: failed initializing StreamExecutor for CUDA device ordinal 0: INTERNAL: failed call to cuDevicePrimaryCtxRetain: CUDA_ERROR_OUT_OF_MEMORY: out of memory; total memory reported: 6216417280
Aborted
```
```
"
tensorflow/tensorflow,2023-08-12 19:32:57,bug,Abort when running tensorflow.python.ops.nn_ops.conv2d_transpose,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.11.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Probably due to the large input tensor
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.ops import nn_ops
try:
arg_0_tensor = tf.constant(-1048576, shape=[2, 6, 4, 3], dtype=tf.float16,)
arg_0 = tf.identity(arg_0_tensor)
arg_1_tensor = tf.constant(-1250999896764, shape=[0, 3, 2, 3], dtype=tf.float16,)
arg_1 = tf.identity(arg_1_tensor)
arg_2_0 = 2
arg_2_1 = 12
arg_2_2 = 8
arg_2_3 = 2
arg_2 = [arg_2_0,arg_2_1,arg_2_2,arg_2_3,]
strides_0 = 1
strides_1 = 2
strides_2 = 2
strides_3 = 1
strides = [strides_0,strides_1,strides_2,strides_3,]
padding = ""SAME""
out = nn_ops.conv2d_transpose(arg_0,arg_1,arg_2,strides=strides,padding=padding,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-12 15:32:22.021693: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-12 15:32:22.879671: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 15:32:22.899716: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 15:32:22.899926: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 15:32:22.900241: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-12 15:32:22.900789: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 15:32:22.900913: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 15:32:22.901022: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 15:32:22.952156: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 15:32:22.952340: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 15:32:22.952462: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 15:32:22.952557: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1613] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 4 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
2023-08-12 15:32:22.974261: F ./tensorflow/core/util/gpu_launch_config.h:129] Check failed: work_element_count > 0 (0 vs. 0)
Aborted
```
```
"
tensorflow/tensorflow,2023-08-12 18:32:22,bug,segmentation fault when running tensorflow.python.eager.context.add_function,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
22.04
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
nvidia-cudnn-cu11==8.6.0.163, cudatoolkit=11.8.0
### GPU model and memory
_No response_
### Current behavior?
Probably due to the NONE argument
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.eager import context
try:
arg_0 = None
out = context.add_function(arg_0,)
except Exception as e:
print(""Error:""+str(e))
```
```
### Relevant log output
```shell
2023-08-12 14:32:03.270708: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2023-08-12 14:32:04.485549: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 14:32:04.505262: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 14:32:04.505426: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 14:32:04.505739: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-12 14:32:04.506268: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 14:32:04.506389: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 14:32:04.506492: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 14:32:04.556530: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 14:32:04.556701: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 14:32:04.556817: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-08-12 14:32:04.556911: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1613] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 739 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
Segmentation fault
```
```
"
tensorflow/tensorflow,2023-08-11 09:46:28,bug,tflite-rutime: RuntimeError: Encountered unresolved custom op: FarthestPointSample.,"**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 20.04
- TensorFlow installed from (source or binary): source
- TensorFlow version (or github SHA if from source): 2.13.0
**Provide the text output from tflite_convert**
I did some test with pointnet++(https://github.com/charlesq34/pointnet2), and tried to inference with tf.lite or tflite-runtime, but both of them show the error message below:
```
Traceback (most recent call last):
File ""test.py"", line 13, in
predict = PointNetPredict('/kaggle/input/model-sign/model_sign.tflite')
File ""/kaggle/working/pointnet3c1/models/pointnet_predict.py"", line 27, in __init__
self.interpreter = self.init_model()
File ""/kaggle/working/pointnet3c1/models/pointnet_predict.py"", line 39, in init_model
interpreter.allocate_tensors()
File ""/opt/conda/lib/python3.7/site-packages/tensorflow/lite/python/interpreter.py"", line 513, in allocate_tensors
return self._interpreter.AllocateTensors()
RuntimeError: Encountered unresolved custom op: FarthestPointSample.
See instructions: https://www.tensorflow.org/lite/guide/ops_custom Node number 0 (FarthestPointSample) failed to prepare.Encountered unresolved custom op: FarthestPointSample.
See instructions: https://www.tensorflow.org/lite/guide/ops_custom Node number 0 (FarthestPointSample) failed to prepare.
```
I noticed there were some custom ops(tf_ops) in project pointnet2, but how to convert these ops to tflite-runtime operators?
```
# Copy and paste here
```
**Standalone code to reproduce the issue**
Provide a reproducible test case that is the bare minimum necessary to generate
the problem. If possible, please share a link to Colab/Jupyter/any notebook.
Also, please include a link to a GraphDef or the model if possible.
**Any other info / logs**
Include any logs or source code that would be helpful to diagnose the problem.
If including tracebacks, please include the full traceback. Large logs and files
should be attached.
"
tensorflow/tensorflow,2023-08-10 23:41:58,bug,AttributeError: can't set attribute in Plot or @property for example,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
tf 2.8
### Custom code
No
### OS platform and distribution
Windows
### Mobile device
na
### Python version
3.8
### Bazel version
na
### GCC/compiler version
?
### CUDA/cuDNN version
?
### GPU model and memory
colab notebook
### Current behavior?
I'm running this tensorflow example:
https://github.com/tensorflow/docs/blob/master/site/en/tutorials/structured_data/time_series.ipynb
I wanted to add some additional models at the end and tried to create new data windows as done above. I noticed the example already given, and my new code requires the following line to be run before the @property for ""example"" is created:
"" w2.example = example_inputs, example_labels ""
else you get a vague error ""AttributeError: can't set attribute "" but it looks like this has a setter?
This line is found under ""3. Plot"" and if moved to a later section after the
@property
def example
under section 4 this error occurs.
### Standalone code to reproduce the issue
```shell
Move:
w2.example = example_inputs, example_labels
to under section 4 which should be ok. Can't set error occurs.
```
### Relevant log output
```shell
AttributeError: can't set attribute
```
"
tensorflow/tensorflow,2023-08-09 07:58:04,bug,Question about @tf.function,"### Issue type
Others
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
tf 2.3.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
3.8
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
T4
### Current behavior?
After adding @tf.function, I found that each epoch only executes one batch_size, and this does not happen when @tf.function are removed
### Standalone code to reproduce the issue
```shell
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D,GlobalAveragePooling2D, Flatten, Dense, Dropout
from tensorflow.keras import Model, Sequential
from tensorflow.keras.regularizers import L2
class ResNetBlock(Model):
def __init__(self, filters=64, strides=1):
super(ResNetBlock, self).__init__()
self.strides = strides
self.c1 = Conv2D(filters=filters, kernel_size=(3, 3), strides=strides, padding='same')
self.b1 = BatchNormalization()
self.a1 = Activation('relu')
self.c2 = Conv2D(filters=filters, kernel_size=(3, 3), strides=1, padding='same')
self.b2 = BatchNormalization()
if(strides > 1):
self.c3 = Conv2D(filters=filters, kernel_size=(3, 3), strides=strides, padding='same')
self.b3 = BatchNormalization()
self.a2 = Activation('relu')
def call(self, inputs):
short_x = inputs
x = self.c1(inputs)
x = self.b1(x)
x = self.a1(x)
x = self.c2(x)
y = self.b2(x)
if(self.strides > 1):
short_x = self.c3(short_x)
short_x = self.b3(short_x)
return self.a2(short_x + y)
class ResNet(Model):
def __init__(self, model_lst, cur_filters = 64):
super(ResNet, self).__init__()
self.c1 = Conv2D(filters=cur_filters, kernel_size=(7, 7), strides=2, padding='same')
self.b1 = BatchNormalization()
self.a1 = Activation('relu')
self.p1 = MaxPool2D((2, 2), 2)
self.blocks = Sequential()
for (i, lst) in enumerate(model_lst):
for ids in range(lst):
if(i != 0 and ids == 0):
block = ResNetBlock(cur_filters, strides=2)
else:
block = ResNetBlock(cur_filters, strides=1)
self.blocks.add(block)
cur_filters *= 2
self.g1 = GlobalAveragePooling2D()
self.d1 = Dense(10, activation='softmax', kernel_regularizer=L2())
def call(self, inputs):
x = self.c1(inputs)
x = self.b1(x)
x = self.a1(x)
x = self.p1(x)
x = self.blocks(x)
x = self.g1(x)
y = self.d1(x)
return y
# ---------------------------------------------
# ResNet18
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# import matplotlib
# matplotlib.rcParams['font.family']=['SimHei', 'Arial']
from tensorflow.keras import *
from tensorflow.keras.layers import Conv2D, Dense, BatchNormalization, Activation, MaxPool2D,GlobalAveragePooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import Mean,SparseCategoricalAccuracy
from tensorflow.keras.datasets.fashion_mnist import load_data
batch_size = 64
epochs = 20
validation_freq = 2
(x_train, y_train), (x_test, y_test) = load_data()
x_train, x_test = x_train/255., x_test/255.
x_train = np.expand_dims(x_train, -1).astype(np.float32)
x_test = np.expand_dims(x_test, -1).astype(np.float32)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(len(x_train)).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).shuffle(len(x_test)).batch(batch_size)
model = ResNet([2, 2, 2, 2])
losses = SparseCategoricalCrossentropy(from_logits=False)
optimizer = Adam()
train_metrics_loss = Mean()
train_metrics_accuracy = SparseCategoricalAccuracy()
test_metrics_loss = Mean()
test_metrics_accuracy = SparseCategoricalAccuracy()
train_losses = []
train_accuracy = []
test_losses = []
test_accuracy = []
@tf.function
def train_step(model, input_images, y_real):
with tf.GradientTape() as tape:
y_pred = model(input_images, training=True)
loss = losses(y_real, y_pred)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_metrics_loss.update_state(loss)
train_metrics_accuracy.update_state(y_real, y_pred)
@tf.function
def test_step(model, input_images, y_real):
with tf.GradientTape() as tape:
y_pred = model(input_images, training=False)
loss = losses(y_real, y_pred)
test_metrics_loss.update_state(loss)
test_metrics_accuracy.update_state(y_real, y_pred)
for epoch in range(epochs):
train_metrics_loss.reset_states()
train_metrics_accuracy.reset_states()
test_metrics_accuracy.reset_states()
test_metrics_loss.reset_states()
for x_batch, y_batch in train_dataset:
train_step(model, x_batch, y_batch)
train_losses.append(train_metrics_loss.result())
train_accuracy.append(train_metrics_accuracy.result())
print(f""epoch={epoch}, train_loss={train_metrics_loss.result()}, train_accuracy={train_metrics_accuracy.result()}"")
if(epoch % validation_freq == 0):
for test_x_batch, test_y_batch in test_dataset:
test_step(model, test_x_batch, test_y_batch)
test_losses.append(test_metrics_loss.result())
test_accuracy.append(test_metrics_accuracy.result())
print(f""epoch={epoch}, test_loss={test_metrics_loss.result()}, test_accuracy={test_metrics_accuracy.result()}"")
plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.title('损失值变化图')
plt.plot(test_losses, 'g-', label=""Test_Loss"")
plt.plot(train_losses, 'r-', label=""Train_Loss"")
plt.legend()
plt.subplot(1, 2, 2)
plt.title(""准确率变化图"")
plt.plot(train_accuracy, 'r-', label=""Train_Accuracy"")
plt.plot(test_accuracy, 'g-', label=""Test_Accuracy"")
plt.legend()
plt.show()
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-08-03 10:02:23,bug,"""load_model"" method causes operating system level user-interface freeze","### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.6.2
### Custom code
Yes
### OS platform and distribution
Windows : 10.0.17763
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
CUDA : 11.2.0_460.89 /CUDNN : 8.1.0.77
### GPU model and memory
NVIDIA RTX A6000
### Current behavior?
There should not be any user interface freeze
### Standalone code to reproduce the issue
```shell
load_m = tf.keras.models.load_model('test.hdf5',custom_objects={'custom_loss':CustomLossFunction})
```
### Relevant log output
```shell
We just see operating system user interface freeze.
When the GPU (NVIDIA RTX A6000 ) mode is TCC we see that whole user interface of operating system ( not just the process which is execution this command ) is frozen for 10 seconds. Can this be fixed so that there is no freeze of user interface ?
Same code when GPU mode is WDDM will not freeze the user interface.
```
"
tensorflow/tensorflow,2023-08-02 12:29:15,bug,tf 2.13 - tflite convert error in topk when k is np.int64 ,"### 1. System information
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): mac and colab
- TensorFlow installation (pip package or built from source): pip
- TensorFlow library (version, if pip package or github SHA, if built from source): 2.13
### 2. Code
Colab code [here](https://colab.research.google.com/drive/163eKr3nkQM4vRqCnFu5U9K3QhgA5PWog?usp=sharing)
### 3. Bug
tf 2.13 model with `tf.math.top_k` error in tflite convert
tf 2.12 - **pass**
`k` is numpy.int64 - **fail**
`k` is numpy.int32 - **pass**
`k` is python int - **pass**
### 4. Error logs
```
---------------------------------------------------------------------------
ConverterError Traceback (most recent call last)
[](https://localhost:8080/#) in ()
1 # error
----> 2 create_model_and_convert(k=np.int64(5))
9 frames
[/usr/local/lib/python3.10/dist-packages/tensorflow/lite/python/convert.py](https://localhost:8080/#) in convert(model_flags, conversion_flags, input_data_str, debug_info_str, enable_mlir_converter)
365 enable_mlir_converter,
366 )
--> 367 raise converter_error
368
369 return _run_deprecated_conversion_binary(
ConverterError: /usr/local/lib/python3.10/dist-packages/tensorflow/python/saved_model/save.py:1313:0: error: 'tf.TopKV2' op is neither a custom op nor a flex op
:0: note: loc(fused[""PartitionedCall:"", ""PartitionedCall""]): called from
/usr/local/lib/python3.10/dist-packages/tensorflow/python/saved_model/save.py:1313:0: note: Error code: ERROR_NEEDS_FLEX_OPS
:0: error: failed while converting: 'main':
Some ops are not supported by the native TFLite runtime, you can enable TF kernels fallback using TF Select. See instructions: https://www.tensorflow.org/lite/guide/ops_select
TF Select ops: TopKV2
Details:
tf.TopKV2(tensor, tensor) -> (tensor, tensor) : {device = """", sorted = true}
```
"
tensorflow/tensorflow,2023-08-01 11:41:30,bug,try self.interpreter!.invoke() App got crashed on this line ,Swift 5
tensorflow/tensorflow,2023-08-01 04:31:13,bug,Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
tf 2.10.0
### Custom code
Yes
### OS platform and distribution
Windows 11
### Mobile device
N/A
### Python version
3.10(Microsoft Store)
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
Cuda: 11.2
### GPU model and memory
RTX 3070 Ti 8GB
### Current behavior?
I installed CUDA 11.2 as recommended for tf 2.10.0, here's the install:
![Screenshot](https://github.com/tensorflow/tensorflow/assets/1494132/59352a2a-f90f-45bf-b8bd-861dc893a9ff)
At first, I thought it was a path issue, but after restarting my pc, I was able to access exe files in that folder:
![image](https://github.com/tensorflow/tensorflow/assets/1494132/5d9ccfca-4417-4045-ba74-fffde7b8a121)
If the files are in path, why can't tensorflow find them?
Many people say to use miniconda, so I did, but I got the same result. Other resolved issues were resolved as the OP's were using the wrong version of CUDA, I checked on the website and I can confirm that my version is the required one.
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
```
### Relevant log output
```shell
2023-07-31 18:56:25.098058: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found
2023-07-31 18:56:25.098226: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
2023-07-31 18:56:26.164080: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found
2023-07-31 18:56:26.164320: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cublas64_11.dll'; dlerror: cublas64_11.dll not found
2023-07-31 18:56:26.164540: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cublasLt64_11.dll'; dlerror: cublasLt64_11.dll not found
2023-07-31 18:56:26.164818: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cufft64_10.dll'; dlerror: cufft64_10.dll not found
2023-07-31 18:56:26.368828: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cusparse64_11.dll'; dlerror: cusparse64_11.dll not found
2023-07-31 18:56:26.369092: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cudnn64_8.dll'; dlerror: cudnn64_8.dll not found
```
"
tensorflow/tensorflow,2023-07-31 09:26:26,bug,Visual Studio 2022 / MingW64: cant find source files,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.7.0
### Custom code
Yes
### OS platform and distribution
Windows 11
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
8.1.0
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Cant write complete application
### Standalone code to reproduce the issue
```shell
#include
#include
#include
#include
int main() {
// Инициализация TensorFlow
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
tensorflow::ClientSession session(root);
// Входные данные (5 предыдущих OHLC свечей)
std::vector input_data = { /* Ваши значения OHLC свечей */ };
tensorflow::Tensor input_tensor(tensorflow::DT_FLOAT, tensorflow::TensorShape({ 1, 5 }));
auto input_tensor_mapped = input_tensor.tensor();
for (int i = 0; i < 5; ++i) {
input_tensor_mapped(0, i) = input_data[i];
}
// Загружаем модель или определяем свою модель для прогнозирования
// tensorflow::GraphDef graph_def;
// tensorflow::ReadBinaryProto(tensorflow::Env::Default(), ""path/to/model.pb"", &graph_def);
// tensorflow::SessionOptions session_options;
// tensorflow::ClientSession session(root, session_options);
// session.Create(graph_def);
// Выполняем прогноз на основе входных данных
tensorflow::Tensor output_tensor;
tensorflow::Status run_status = session.Run({ { ""input_tensor_name"", input_tensor } },
{ ""output_tensor_name"" }, {}, &output_tensor);
if (!run_status.ok()) {
std::cerr << ""Ошибка выполнения: "" << run_status.error_message() << std::endl;
return 1;
}
// Обрабатываем результат прогноза
auto output_tensor_mapped = output_tensor.tensor();
// Выводим результаты прогноза OHLC свечи будущей
return 0;
}
```
### Relevant log output
```shell
Серьезность Код Описание Проект Файл Строка Состояние подавления
Ошибка (активно) E1696 не удается открыть источник файл ""third_party/eigen3/unsupported/Eigen/CXX11/ThreadPool"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\threadpool_interface.h 19
Ошибка (активно) E1696 не удается открыть источник файл ""absl/status/status.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\framework\\ops.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/str_cat.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\framework\\ops.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/tensor.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\framework\\ops.h 27
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/str_cat.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\framework\\scope.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/array_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 19
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/candidate_sampling_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 20
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/control_flow_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 22
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/data_flow_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 23
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/image_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/io_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/linalg_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 26
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/logging_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 27
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/lookup_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 28
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/math_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 29
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/nn_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 30
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/no_op.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 31
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/parsing_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 32
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/random_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 33
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/sparse_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 34
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/state_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 35
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/string_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 36
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/training_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 37
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/cc/ops/user_ops.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\cc\\ops\\standard_ops.h 38
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/graph.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\common_runtime\\graph_constructor.h 19
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/string_view.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\allocator.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/optional.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\allocator.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""absl/base/macros.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\device_base.h 23
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/string_view.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\device_base.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/device_attributes.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\device_base.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/full_type.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\full_type_inference_util.h 23
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/full_type.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\full_type_util.h 22
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/node_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\full_type_util.h 23
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/op_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\full_type_util.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/graph_debug_info.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""absl/container/flat_hash_map.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 30
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/optional.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 31
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/variant.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 32
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/attr_value.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 33
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/function.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 36
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/optimized_function_graph.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 40
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/protobuf/config.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 51
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/tsl/protobuf/error_codes.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 52
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/protobuf/remote_tensor_handle.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\function.h 54
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/node_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\node_def_builder.h 23
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/op_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\node_def_builder.h 26
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/node_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\node_def_util.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/op_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\node_def_util.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/types.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\node_def_util.h 29
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/node_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\node_properties.h 19
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/op_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\node_properties.h 20
Ошибка (активно) E1696 не удается открыть источник файл ""absl/container/flat_hash_map.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/full_type.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op.h 26
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/full_type.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_def_builder.h 26
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/op_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_def_builder.h 27
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/api_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_def_util.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/op_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_def_util.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""absl/time/time.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_kernel.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/optional.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_kernel.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/span.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_kernel.h 26
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/graph.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_kernel.h 31
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/kernel_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_kernel.h 32
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/node_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_kernel.h 34
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/tensor_shape.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_kernel.h 44
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/types.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_kernel.h 47
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/protobuf/config.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\op_kernel.h 59
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/registration/options.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\registration\\registration.h 38
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/types.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\resource_handle.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""third_party/eigen3/unsupported/Eigen/CXX11/Tensor"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\tensor.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/types.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\tensor.h 30
Ошибка (активно) E1696 не удается открыть источник файл ""third_party/eigen3/unsupported/Eigen/CXX11/Tensor"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\tensor_shape.h 21
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/types.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\tensor_shape.h 22
Ошибка (активно) E1696 не удается открыть источник файл ""third_party/eigen3/unsupported/Eigen/CXX11/Tensor"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\tensor_types.h 19
Ошибка (активно) E1696 не удается открыть источник файл ""third_party/eigen3/unsupported/Eigen/CXX11/Tensor"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\types.h 23
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/full_type.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\types.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/types.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\framework\\types.h 28
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/optional.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\graph.h 45
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/full_type.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\graph.h 46
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/node_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\graph.h 48
Ошибка (активно) E1696 не удается открыть источник файл ""absl/container/flat_hash_map.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\graph_debug_info_builder.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""absl/status/status.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\graph_debug_info_builder.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""absl/status/statusor.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\graph_debug_info_builder.h 26
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/string_view.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\graph_debug_info_builder.h 27
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/span.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\graph_debug_info_builder.h 28
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/graph_debug_info.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\graph_debug_info_builder.h 29
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/framework/op_def.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\graph\\node_builder.h 22
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/span.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\lib\\gtl\\array_slice.h 19
Ошибка (активно) E1696 не удается открыть источник файл ""absl/base/attributes.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\platform\\errors.h 23
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/str_join.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\platform\\errors.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/optional.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\platform\\threadpool.h 22
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/core/protobuf/config.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\public\\session_options.h 22
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/match.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\util\\managed_stack_trace.h 26
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/str_cat.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\util\\managed_stack_trace.h 27
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/optional.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\util\\managed_stack_trace.h 28
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/string_view.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\core\\util\\tensor_format.h 23
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/string_view.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\framework\\allocator.h 25
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/optional.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\framework\\allocator.h 26
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/string_view.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\framework\\device_type.h 22
Ошибка (активно) E1696 не удается открыть источник файл ""Eigen/Core"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\framework\\fixedpoint_types.h 21
Ошибка (активно) E1696 не удается открыть источник файл ""absl/container/inlined_vector.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\lib\\gtl\\inlined_vector.h 19
Ошибка (активно) E1696 не удается открыть источник файл ""third_party/eigen3/Eigen/Core"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\bfloat16.h 20
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/cord.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\default\\cord.h 22
Ошибка (активно) E1696 не удается открыть источник файл ""absl/base/log_severity.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\default\\logging.h 35
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/string_view.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\default\\logging.h 36
Ошибка (активно) E1696 не удается открыть источник файл ""absl/status/statusor.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\default\\statusor.h 18
Ошибка (активно) E1696 не удается открыть источник файл ""absl/functional/any_invocable.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\env.h 27
Ошибка (активно) E1696 не удается открыть источник файл ""absl/base/attributes.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\errors.h 26
Ошибка (активно) E1696 не удается открыть источник файл ""absl/status/status.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\errors.h 27
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/cord.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\errors.h 28
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/str_join.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\errors.h 29
Ошибка (активно) E1696 не удается открыть источник файл ""include/float8.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\float8.h 19
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/descriptor.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 30
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/arena.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 31
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/descriptor.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 32
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/dynamic_message.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 33
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/io/coded_stream.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 34
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/io/tokenizer.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 35
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/io/zero_copy_stream.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 36
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/io/zero_copy_stream_impl_lite.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 37
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/map.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 38
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/message.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 39
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/repeated_field.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 40
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/text_format.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 41
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/util/field_comparator.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 42
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/util/json_util.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 43
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/util/message_differencer.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 44
Ошибка (активно) E1696 не удается открыть источник файл ""google/protobuf/util/type_resolver_util.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\protobuf.h 45
Ошибка (активно) E1696 не удается открыть источник файл ""absl/base/attributes.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\status.h 28
Ошибка (активно) E1696 не удается открыть источник файл ""absl/functional/function_ref.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\status.h 29
Ошибка (активно) E1696 не удается открыть источник файл ""absl/status/status.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\status.h 30
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/cord.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\status.h 31
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/string_view.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\status.h 32
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/optional.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\status.h 33
Ошибка (активно) E1696 не удается открыть источник файл ""tensorflow/tsl/protobuf/error_codes.pb.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\status.h 39
Ошибка (активно) E1696 не удается открыть источник файл ""absl/base/attributes.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\statusor.h 71
Ошибка (активно) E1696 не удается открыть источник файл ""absl/status/statusor.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\statusor.h 72
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/string_view.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\stringpiece.h 29
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/str_join.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\str_util.h 23
Ошибка (активно) E1696 не удается открыть источник файл ""absl/strings/str_split.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\str_util.h 24
Ошибка (активно) E1696 не удается открыть источник файл ""absl/types/optional.h"" ai C:\\Users\\User\\source\\repos\\ai\\include\\tensorflow\\tsl\\platform\\threadpool.h 22
```
"
tensorflow/tensorflow,2023-07-28 18:24:21,bug,Failed assertion in tf.linalg.sqrtm (and possibly other functions) crashes entire program instead of raising Exception,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.9.1
### Custom code
No
### OS platform and distribution
Macbook 2020 M1 air, Ventura 13.2
### Mobile device
-
### Python version
3.8.13
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Currently, providing a degenerate matrix to tf.linalg.sqrtm crashes the entire program, producing output:
```
Assertion failed: (T(i,i) >= 0), function matrix_sqrt_quasi_triangular_diagonal, file external/eigen_archive/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h, line 128.
Process finished with exit code 134 (interrupted by signal 6: SIGABRT)
```
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
def demo_assertion_error_crashes_program():
degenerate_matrix = tf.ones((3, 3), dtype=tf.float64)
try:
tf.linalg.sqrtm(degenerate_matrix)
# tf.linalg.inv(degenerate_matrix) # <- This also fails, but raises an actual exception
print(""Calculated root"") # This is never run
except Exception as err:
print(""Caught exception: "", err) # Neither is this
if __name__ == '__main__':
demo_assertion_error_crashes_program()
```
### Relevant log output
```shell
Assertion failed: (T(i,i) >= 0), function matrix_sqrt_quasi_triangular_diagonal, file external/eigen_archive/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h, line 128.
Process finished with exit code 134 (interrupted by signal 6: SIGABRT)
```
### Workaround
One option is to add a small regularizing term to make it non-degenerate, but I don't (yet) know how to do this such that it always prevents the crash, and also does not significantly affect results then the matrix-square-root would have worked.
Instead, I now just use the Denmann-Beavers iteration to approximate the matrix square-root
```
def tf_denmann_beavers_sqrtm(matrix: tf.Tensor, n_iter=10):
""""""
Approximate the matrix-square-root by Denmann Beavers iteration
https://en.wikipedia.org/wiki/Square_root_of_a_matrix#By_Denman%E2%80%93Beavers_iteration
Convergence is not guaranteed. Use at your own risk!
This is handy for tflite, which does not yet support tf.linalg.sqrtm
https://github.com/tensorflow/tensorflow/issues/60154
Or for regular tensorflow, which crashes your entire program when input matrix is degenerate
https://github.com/tensorflow/tensorflow/issues/61423
""""""
ym = matrix
zm = tf.eye(tf.shape(matrix[0])[0], dtype=matrix.dtype)
for i in range(n_iter):
ym_ = 0.5 * (ym + tf.linalg.inv(zm))
zm = 0.5 * (zm + tf.linalg.inv(ym))
ym = ym_
return ym
```
... obviously this is not ideal.
"
tensorflow/tensorflow,2023-07-26 21:56:14,bug,"model.fit() occur ""Cudnn graph failed to build: UNKNOWN: CUDNN_STATUS_BAD_PARAM""","### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.11, 2.12, 2.13
### Custom code
Yes
### OS platform and distribution
Linux Ubuntu 22.04
### Mobile device
_No response_
### Python version
3.10
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
11.8 / 8.6 & 11.8 / 8.9.2
### GPU model and memory
RTX 3090 Ti & RTX 4090
### Current behavior?
This is first time experience to have such error message.
When I try
""model.fit()""
server stops with error message below
Tried cuDNN version 8.6 (as [tensorflow.org](https://www.tensorflow.org/install/pip) ) and 8.9.2 (lateset for CUDA 11.8)
Both have problem.
How can I solve the issue?
Thanks!
### Standalone code to reproduce the issue
```shell
gpu_id = ""2"" # 0 or 1
import os
os.environ[""CUDA_VISIBLE_DEVICES""] = gpu_id
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
size_y = 256
size_x = 256
#--- load dataset
dic_path = './seg_dataset/train/dic'
msk_path = './seg_dataset/train/msk'
seed = 1004 # random number in your mind
dic_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
validation_split=0.2
)
msk_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
validation_split=0.2
)
dic_train = \\
dic_datagen.flow_from_directory(
dic_path,
target_size=(size_y, size_x),
class_mode=None,
seed=seed,
subset='training'
)
msk_train = \\
msk_datagen.flow_from_directory(
msk_path,
target_size=(size_y, size_x),
class_mode=None,
color_mode='grayscale',
seed=seed,
subset='training'
)
dic_valid = \\
dic_datagen.flow_from_directory(
dic_path,
target_size=(size_y, size_x),
class_mode=None,
seed=seed,
subset='validation'
)
msk_valid = \\
msk_datagen.flow_from_directory(
msk_path,
target_size=(size_y, size_x),
class_mode=None,
color_mode='grayscale',
seed=seed,
subset='validation'
)
train_ds = zip(dic_train, msk_train)
valid_ds = zip(dic_valid, msk_valid)
f = [16, 32, 64, 128, 256]
kernel_size=(3,3)
padding='same'
strides=1
# number of filters at each level
inputs = layers.Input((size_y, size_x, 1))
p0 = inputs
# downblock 1
x = layers.Conv2D(16, kernel_size, padding=padding, strides=strides, activation=""relu"")(p0)
c1 = layers.Conv2D(16, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
x = layers.MaxPool2D((2, 2), (2, 2))(c1)
# downblock 2
x = layers.Conv2D(32, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
c2 = layers.Conv2D(32, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
x = layers.MaxPool2D((2, 2), (2, 2))(c2)
# downblock 3
x = layers.Conv2D(64, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
c3 = layers.Conv2D(64, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
x = layers.MaxPool2D((2, 2), (2, 2))(c3)
# downblock 4
x = layers.Conv2D(128, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
c4 = layers.Conv2D(128, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
x = layers.MaxPool2D((2, 2), (2, 2))(c4)
# bottle neck
x = layers.Conv2D(256, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
x = layers.Conv2D(256, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
# up block 1
x = layers.UpSampling2D((2, 2))(x)
concat = layers.Concatenate()([x, c4])
x = layers.Conv2D(128, kernel_size, padding=padding, strides=strides, activation=""relu"")(concat)
x = layers.Conv2D(128, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
# up block 1
x = layers.UpSampling2D((2, 2))(x)
concat = layers.Concatenate()([x, c3])
x = layers.Conv2D(64, kernel_size, padding=padding, strides=strides, activation=""relu"")(concat)
x = layers.Conv2D(64, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
# up block 1
x = layers.UpSampling2D((2, 2))(x)
concat = layers.Concatenate()([x, c2])
x = layers.Conv2D(32, kernel_size, padding=padding, strides=strides, activation=""relu"")(concat)
x = layers.Conv2D(32, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
# up block 1
x = layers.UpSampling2D((2, 2))(x)
concat = layers.Concatenate()([x, c1])
x = layers.Conv2D(16, kernel_size, padding=padding, strides=strides, activation=""relu"")(concat)
x = layers.Conv2D(16, kernel_size, padding=padding, strides=strides, activation=""relu"")(x)
# last convolution 1x1
outputs = layers.Conv2D(1, (1, 1), padding=""same"", activation=""sigmoid"")(x)
model = models.Model(inputs, outputs)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
path_checkpoint = './seg_checkpoint'
os.makedirs(path_checkpoint,exist_ok=True)
model_checkpointer = keras.callbacks.ModelCheckpoint(
filepath = path_checkpoint,
save_weights_only=True,
monitor='val_loss',
mode='min',
save_best_only=True,
verbose = 1
)
#--- additional
callbacks = [
model_checkpointer,
keras.callbacks.EarlyStopping(
patience=50*3,
monitor='val_loss',
mode='min',
verbose=1
),
]
#--- train start
EPOCH = 10
history = model.fit(
train_ds,
validation_data=valid_ds,
validation_steps=15,
# Total number of steps (batches of samples)
# to draw before stopping when performing validation at the end of every epoch.
batch_size=16,
steps_per_epoch=50,
epochs=EPOCH,
callbacks=callbacks
)
```
### Relevant log output
```shell
2023-07-26 14:46:27.667380: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:8942] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2023-07-26 14:46:27.667411: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2023-07-26 14:46:27.667426: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2023-07-26 14:46:27.671343: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-07-26 14:46:28.183018: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
WARNING:tensorflow:From /home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distribution.py:259: ReparameterizationType.__init__ (from tensorflow.python.ops.distributions.distribution) is deprecated and will be removed after 2019-01-01.
Instructions for updating:
The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.
WARNING:tensorflow:From /home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bernoulli.py:165: RegisterKL.__init__ (from tensorflow.python.ops.distributions.kullback_leibler) is deprecated and will be removed after 2019-01-01.
Instructions for updating:
The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.
2023-07-26 14:46:28.727203: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:28.741660: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:28.741864: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:28.807551: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:28.807748: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:28.807913: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:28.808057: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1884] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 22168 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3090 Ti, pci bus id: 0000:41:00.0, compute capability: 8.6
2023-07-26 14:46:28.809796: I tensorflow/core/common_runtime/direct_session.cc:380] Device mapping:
/job:localhost/replica:0/task:0/device:GPU:0 -> device: 0, name: NVIDIA GeForce RTX 3090 Ti, pci bus id: 0000:41:00.0, compute capability: 8.6
Found 40000 images belonging to 1 classes.
Found 40000 images belonging to 1 classes.
Found 10000 images belonging to 1 classes.
Found 10000 images belonging to 1 classes.
2023-07-26 14:46:30.293048: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:30.293252: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:30.293411: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:30.293658: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:30.293824: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:30.293974: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:30.294158: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:30.294315: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:894] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-26 14:46:30.294450: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1884] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 22168 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3090 Ti, pci bus id: 0000:41:00.0, compute capability: 8.6
Epoch 1/10
2023-07-26 14:46:31.666051: I tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:440] Loaded cuDNN version 8600
2023-07-26 14:46:31.674917: W tensorflow/core/framework/op_kernel.cc:1839] OP_REQUIRES failed at conv_ops_fused_impl.h:625 : INTERNAL: Cudnn graph failed to build: UNKNOWN: CUDNN_STATUS_BAD_PARAM
in tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc(4340): 'conv_op' CUDNN_BACKEND_OPERATION: cudnnFinalize Failed
Traceback (most recent call last):
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/tensorflow/python/eager/execute.py"", line 53, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InternalError: Graph execution error:
Detected at node model/conv2d/Relu defined at (most recent call last):
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 515, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 515, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 672, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 515, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 672, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 515, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 672, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 515, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 672, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 515, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 672, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/layers/convolutional/base_conv.py"", line 321, in call
return self.activation(outputs)
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 515, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 672, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/layers/convolutional/base_conv.py"", line 321, in call
return self.activation(outputs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/activations.py"", line 306, in relu
return backend.relu(
File ""/home/bootcamp/train_unet.py"", line 161, in
history = model.fit(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1783, in fit
tmp_logs = self.train_function(iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1377, in train_function
return step_function(self, iterator)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1360, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1349, in run_step
outputs = model.train_step(data)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 1126, in train_step
y_pred = self(x, training=True)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/training.py"", line 589, in __call__
return super().__call__(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 515, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/functional.py"", line 672, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 65, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/engine/base_layer.py"", line 1149, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py"", line 96, in error_handler
return fn(*args, **kwargs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/layers/convolutional/base_conv.py"", line 321, in call
return self.activation(outputs)
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/activations.py"", line 306, in relu
return backend.relu(
File ""/home/bootcamp/miniconda3/envs/tf/lib/python3.10/site-packages/keras/src/backend.py"", line 5397, in relu
x = tf.nn.relu(x)
Cudnn graph failed to build: UNKNOWN: CUDNN_STATUS_BAD_PARAM
in tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc(4340): 'conv_op' CUDNN_BACKEND_OPERATION: cudnnFinalize Failed
[[{{node model/conv2d/Relu}}]] [Op:__inference_train_function_4359]
```
"
tensorflow/tensorflow,2023-07-25 08:13:21,bug,OneDNN logs are not printing while building TF with --config=mkl_aarch64,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
Ubuntu 22.04.2 LTS
### Mobile device
_No response_
### Python version
3.10.6
### Bazel version
6.3
### GCC/compiler version
11.3.0
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
I am expecting OneDNN logs should print while running deep learning model such as resnet50, if we export ONEDNN_VERBOSE=1
### Standalone code to reproduce the issue
```shell
To reproduce same, we have to build TF on Arm CPU, and use following command to build:
bazel build --config=mkl_aarch64 //tensorflow/tools/pip_package:build_pip_package
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-25 01:30:55,bug,"""ValueError: Cannot take the length of shape with unknown rank."" when using MultiHeadRelativeAttention","### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.13.1
### Custom code
Yes
### OS platform and distribution
mac M2 pro
### Mobile device
mac M2 pro
### Python version
3.10.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
when using MultiHeadRelativeAttention from official.nlp.modeling.layers, I face on this error, ""ValueError: Cannot take the length of shape with unknown rank."" I'm sorry I'm not good at English. Thank you!
### Standalone code to reproduce the issue
```shell
from official.nlp.modeling.layers import MultiHeadRelativeAttention
import tensorflow as tf
vec= tf.constant([[[[0.1]*4]*3]*3])
layers=MultiHeadRelativeAttention(num_heads=4,key_dim=3)
output=layers(vec,vec,content_attention_bias=0.1, positional_attention_bias=0.1)
```
### Relevant log output
```shell
ValueError: Cannot take the length of shape with unknown rank.
```
"
tensorflow/tensorflow,2023-07-24 16:51:23,bug,Unable to save model when using EfficientNetB0,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.12.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
I’m trying to use EfficientNetB0 to create a model and save the model to my local disk. However, when saving it, it throws the error below.
> TypeError: Unable to serialize [2.0896919 2.1128857 2.1081853] to JSON. Unrecognized type .
Also, I tried to downgrade tensorflow from V2.12.0 to V2.9.1, this works as expected. In other words, this is a bug in 2.12.0. Hope it helps and please fix this bug for V2.12.0
### Standalone code to reproduce the issue
```shell
model = tf.keras.applications.EfficientNetB0()
model.save(""model"")
```
### Relevant log output
```shell
TypeError: Unable to serialize [2.0896919 2.1128857 2.1081853] to JSON. Unrecognized type .
```
"
tensorflow/tensorflow,2023-07-22 07:11:34,bug,TFLite Error,"**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):Windows 11
- TensorFlow installed from (source or binary):source
- TensorFlow version (or github SHA if from source):2.15.0
**Provide the text output from tflite_convert**
The below is the code, I am using to convert the deep learning model to tflite
converter = tf.lite.TFLiteConverter.from_keras_model(best_model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
with open('compressed_model.tflite', 'wb') as f:
f.write(tflite_model)
**Standalone code to reproduce the issue**
Provide a reproducible test case that is the bare minimum necessary to generate
the problem. If possible, please share a link to Colab/Jupyter/any notebook.
https://colab.research.google.com/drive/1QlquN0xR94xMdiUNer00Nu0n5UDXdiWQ
![error](https://github.com/tensorflow/tensorflow/assets/107172150/19056701-1422-4ab2-939a-545f3f799f48)
"
tensorflow/tensorflow,2023-07-21 18:21:26,bug,Tensorflow Load Datasets Failure for Python 3.11.4 and Tensorflow 2.13.0,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
2.13.0
### Custom code
No
### OS platform and distribution
Rocky Linux 8.7
### Mobile device
_No response_
### Python version
3.11.4
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
11.8/8.9.0.131-1
### GPU model and memory
_No response_
### Current behavior?
Dataset written and loaded in python 3.11.4 and tensorflow 2.12.1 should load in tensorflow 2.13
However, loading dataset in Tensorflow 2.13 with python 3.11.4 fails on Linux and windows:
TensorFlow version: 2.13.0
Python version: 3.11.4
Download dev dataset...
Extract dev dataset...
Loading dev dataset...
[libprotobuf ERROR external/com_google_protobuf/src/google/protobuf/text_format.cc:337] Error parsing text-format tensorflow.data.experimental.DistributedSnapshotMetadata: 1:1: Invalid control characters encountered in text.
[libprotobuf ERROR external/com_google_protobuf/src/google/protobuf/text_format.cc:337] Error parsing text-format tensorflow.data.experimental.DistributedSnapshotMetadata: 1:3: Expected identifier, got: 14022746025082002701
Download train dataset...
Extract train dataset...
Loading train dataset...
[libprotobuf ERROR external/com_google_protobuf/src/google/protobuf/text_format.cc:337] Error parsing text-format tensorflow.data.experimental.DistributedSnapshotMetadata: 1:1: Invalid control characters encountered in text.
[libprotobuf ERROR external/com_google_protobuf/src/google/protobuf/text_format.cc:337] Error parsing text-format tensorflow.data.experimental.DistributedSnapshotMetadata: 1:3: Expected identifier, got: 10775564831112808841
### Standalone code to reproduce the issue
```shell
import io
import sys
from zipfile import ZipFile
import requests
import tensorflow as tf
print(""TensorFlow version:"", tf.__version__)
print(""Python version:"", sys.version.split()[0])
dev_url = (
""https://drive.google.com/uc?export=download&id=1-MJAgrTNZkaMpyBQLIgqqwM8gP9LKdDL""
)
print(""Download dev dataset..."")
r = requests.get(dev_url)
z = ZipFile(io.BytesIO(r.content))
print(""Extract dev dataset..."")
z.extractall()
print(""\\nLoading dev dataset..."")
ds_dev = tf.data.Dataset.load(""squadv2_dev_tf"")
train_url = (
""https://drive.google.com/uc?export=download&id=1-NWGcJz0ZaFGFeHOPG2PKvn8gmf3MwKn""
)
print(""Download train dataset..."")
r = requests.get(train_url)
z = ZipFile(io.BytesIO(r.content))
print(""Extract train dataset..."")
z.extractall()
print(""\\nLoading train dataset..."")
ds_train = tf.data.Dataset.load(""squadv2_train_tf"")
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-19 12:46:52,bug,tensorflow/core/common_runtime/gpu/gpu_util.cc:293] GPU->CPU Memcpy failed,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
1.15
### Custom code
Yes
### OS platform and distribution
Windows
### Mobile device
_No response_
### Python version
3.7
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
cuda version = 10.0
cudnn=7.6.4
_No response_
### GPU model and memory
Geforce RTX 4070 TI 12 GB
_No response_
### Current behavior?
I am using
gpu geforce rtx 4070 ti 12 gb
i add in my training file
config1 = tf.compat.v1.ConfigProto()
config1.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config1)
But Nothing happen I get this issue
### Standalone code to reproduce the issue
```shell
2023-07-19 13:40:37.744821: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudart64_100.dll
2023-07-19 13:40:37.744955: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cublas64_100.dll
2023-07-19 13:40:37.745047: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cufft64_100.dll
2023-07-19 13:40:37.745166: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library curand64_100.dll
2023-07-19 13:40:37.745290: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cusolver64_100.dll
2023-07-19 13:40:37.745374: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cusparse64_100.dll
2023-07-19 13:40:37.745493: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudnn64_7.dll
2023-07-19 13:40:37.745596: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1746] Adding visible gpu devices: 0
2023-07-19 13:40:37.745706: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1159] Device interconnect StreamExecutor with strength 1 edge matrix:
2023-07-19 13:40:37.745789: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1165] 0
2023-07-19 13:40:37.745866: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1178] 0: N
2023-07-19 13:40:37.746009: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1304] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10400 MB memory) -> physical GPU (device: 0, name: NVIDIA GeForce RTX 4070 Ti, pci bus id: 0000:01:00.0, compute capability: 8.9)
WARNING:tensorflow:From C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\backend\\tensorflow_backend.py:300: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.
WARNING:tensorflow:From C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\backend\\tensorflow_backend.py:308: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.
2023-07-19 13:40:40.254879: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudnn64_7.dll
2023-07-19 13:43:12.728939: W tensorflow/stream_executor/cuda/redzone_allocator.cc:312] Internal: Invoking ptxas not supported on Windows
Relying on driver to perform ptx compilation. This message will be only logged once.
2023-07-19 13:43:12.932855: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cublas64_100.dll
2023-07-19 13:43:21.066511: E tensorflow/stream_executor/cuda/cuda_blas.cc:428] failed to run cuBLAS routine: CUBLAS_STATUS_EXECUTION_FAILED
Exception: Blas GEMM launch failed : a.shape=(2, 2048), b.shape=(2, 36), m=2048, n=36, k=2
[[node gradients_1/dense_regress_10/MatMul_grad/MatMul_1 (defined at C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\framework\\ops.py:1748) ]]
Original stack trace for 'gradients_1/dense_regress_10/MatMul_grad/MatMul_1':
File ""c:/Users/user/Desktop/Binarios/keras_frcnn-master-atelier-B/keras_frcnn-master/train_frcnn_kitti.py"", line 262, in
train_kitti()
File ""c:/Users/user/Desktop/Binarios/keras_frcnn-master-atelier-B/keras_frcnn-master/train_frcnn_kitti.py"", line 205, in train_kitti
[Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\engine\\training.py"", line 1620, in train_on_batch
self._make_train_function()
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\engine\\training.py"", line 1002, in _make_train_function
self.total_loss)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\optimizers.py"", line 381, in get_updates
grads = self.get_gradients(loss, params)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\optimizers.py"", line 47, in get_gradients
grads = K.gradients(loss, params)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\backend\\tensorflow_backend.py"", line 2138, in gradients
return tf.gradients(loss, variables, colocate_gradients_with_ops=True)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\ops\\gradients_impl.py"", line 158, in gradients
unconnected_gradients)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\ops\\gradients_util.py"", line 679, in _GradientsHelper
lambda: grad_fn(op, *out_grads))
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\ops\\gradients_util.py"", line 350, in _MaybeCompile
return grad_fn() # Exit early
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\ops\\gradients_util.py"", line 679, in
lambda: grad_fn(op, *out_grads))
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\ops\\math_grad.py"", line 1586, in _MatMulGrad
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\ops\\gen_math_ops.py"", line 6136, in mat_mul
name=name)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\framework\\op_def_library.py"", line 794, in _apply_op_helper
op_def=op_def)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\util\\deprecation.py"", line 507, in new_func
return func(*args, **kwargs)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\framework\\ops.py"", line 3357, in create_op
attrs, op_def, compute_device)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\framework\\ops.py"", line 3426, in _create_op_internal
op_def=op_def)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\framework\\ops.py"", line 1748, in __init__
self._traceback = tf_stack.extract_stack()
...which was originally created as op 'dense_regress_10/MatMul', defined at:
File ""c:/Users/user/Desktop/Binarios/keras_frcnn-master-atelier-B/keras_frcnn-master/train_frcnn_kitti.py"", line 262, in
train_kitti()
File ""c:/Users/user/Desktop/Binarios/keras_frcnn-master-atelier-B/keras_frcnn-master/train_frcnn_kitti.py"", line 88, in train_kitti
classifier = nn.classifier(shared_layers, roi_input, cfg.num_rois, nb_classes=len(classes_count), trainable=True)
File ""c:\\Users\\user\\Desktop\\Binarios\\keras_frcnn-master-atelier-B\\keras_frcnn-master\\keras_frcnn\\resnet.py"", line 270, in classifier
name='dense_regress_{}'.format(nb_classes))(out)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\engine\\topology.py"", line 578, in __call__
output = self.call(inputs, **kwargs)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\layers\\wrappers.py"", line 177, in call
y = self.layer.call(inputs) # (num_samples * timesteps, ...)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\layers\\core.py"", line 840, in call
output = K.dot(inputs, self.kernel)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\keras\\backend\\tensorflow_backend.py"", line 848, in dot
out = tf.matmul(x, y)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\util\\dispatch.py"", line 180, in wrapper
return target(*args, **kwargs)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\ops\\math_ops.py"", line 2754, in matmul
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\ops\\gen_math_ops.py"", line 6136, in mat_mul
name=name)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\framework\\op_def_library.py"", line 794, in _apply_op_helper
op_def=op_def)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\util\\deprecation.py"", line 507, in new_func
return func(*args, **kwargs)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\framework\\ops.py"", line 3357, in create_op
attrs, op_def, compute_device)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\framework\\ops.py"", line 3426, in _create_op_internal
op_def=op_def)
File ""C:\\Users\\user\\AppData\\Roaming\\Python\\Python37\\site-packages\\tensorflow_core\\python\\framework\\ops.py"", line 1748, in __init__
self._traceback = tf_stack.extract_stack()
2023-07-19 13:43:21.297408: I tensorflow/stream_executor/stream.cc:1990] [stream=0000029EFE927EC0,impl=0000029EB937CFB0] did not wait for [stream=0000029EFE926CC0,impl=0000029EB937CF20]
2023-07-19 13:43:21.297815: I tensorflow/stream_executor/stream.cc:4925] [stream=0000029EFE927EC0,impl=0000029EB937CFB0] did not memcpy device-to-host; source: 00000007129B6500
2023-07-19 13:43:21.298246: F tensorflow/core/common_runtime/gpu/gpu_util.cc:293] GPU->CPU Memcpy failed
2023-07-19 13:43:21.298255: I tensorflow/stream_executor/stream.cc:1990] [stream=0000029EFE927EC0,impl=0000029EB937CFB0] did not wait for [stream=0000029EFE926CC0,impl=0000029EB937CF20]
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-18 01:29:12,bug,KerasTensor和tf.tensor之间的转换,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
tf2.4
### Custom code
Yes
### OS platform and distribution
Window10
### Mobile device
_No response_
### Python version
3.8
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
KerasTensor没有numpy()这种,应该如何转换
### Standalone code to reproduce the issue
```shell
File ""C:/Users/KM Group/Desktop/lmx/SemanticCompression-Speech/DeepSC-S-main/random_mask_training.py"", line 73, in
sem_dec = sem_dec_model(frame_length, stride_length, args)
File ""C:\\Users\\KM Group\\Desktop\\lmx\\SemanticCompression-Speech\\DeepSC-S-main\\model_tfnn.py"", line 191, in sem_dec_model
_output = sem_dec(_intput, batch_mean, batch_var)
File ""C:\\Users\\KM Group\\Desktop\\lmx\\SemanticCompression-Speech\\DeepSC-S-main\\model_tfnn.py"", line 142, in __call__
_input = tf.convert_to_tensor(keras.backend.get_value(_input))
File ""C:\\Users\\KM Group\\Anaconda3\\envs\\speech-SC\\lib\\site-packages\\tensorflow\\python\\keras\\backend.py"", line 3615, in get_value
return x.numpy()
AttributeError: 'KerasTensor' object has no attribute 'numpy'
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-17 12:19:06,bug,tensorflow keras model.predict() is not thread safe,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
binary
### TensorFlow version
tf 2.13.0
### Custom code
Yes
### OS platform and distribution
Linux CentOS 7.9
### Mobile device
_No response_
### Python version
3.11.4
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
We executed model.predict() in multi-thread. And sometimes, the code raised the exception: Functional' object has no attribute 'predict_function.
### Standalone code to reproduce the issue
```shell
def predict(self, x, tf_server=False, port=8501, model_path='', step=0):
if tf_server:
return self.predict_tf_server_grpc(x, port, step)
pred = None
try:
if not self.model_trained:
print('try to load model ...\\n')
self.load_model(model_path)
self.model_trained = True
if step == 0:
pred = self.model.predict(x)
else:
pred = self.model.predict(x, steps=step)
except Exception as ex:
print(ex)
return pred
```
While the exception raised, we executed the code ""self.model.predict(x)"" in debug window again, and it returned the correct prediction results.
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-15 13:33:28,bug,`tf.image.decode_jpeg` can not decode jpeg base64 encoded image,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.10.1
### Custom code
Yes
### OS platform and distribution
Win11 22H2
### Mobile device
_No response_
### Python version
3.10.11
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
It raise: `InvalidArgumentError: {{function_node __wrapped__DecodeJpeg_device_/job:localhost/replica:0/task:0/device:CPU:0}} Unknown image file format. One of JPEG, PNG, GIF, BMP required. [Op:DecodeJpeg]`
![image](https://github.com/tensorflow/tensorflow/assets/4510984/342f8602-a2ba-48b7-bdaa-4d0684f9299f)
![image](https://github.com/tensorflow/tensorflow/assets/4510984/6177e9d3-90ed-42c8-881f-cc18bd045f39)
### Standalone code to reproduce the issue
```shell
import base64
from PIL import Image
import tensorflow as tf
img = Image.open('xxx.jpg')
base64str = base64.b64encode(img.tobytes()).decode()
tf.image.decode_jpeg(base64str, channels=3)
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-14 22:13:11,bug,Tensor dimension mismatch when `tf.keras.Input` is used as input,"### 1. System information
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Linux Ubuntu 20.04
- TensorFlow installation (pip package or built from source): pip
- TensorFlow library (version, if pip package or github SHA, if built from source): 2.14.0-dev20230602
### 2. Code
This is the minimized code to reproduce the issue:
```python
import tensorflow as tf
import numpy as np
input_shape = [1, 2]
x1 = tf.keras.Input(shape=input_shape, dtype=""float32"")
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
self.w1 = tf.Variable([[3., 4.], [5., 6.]])
self.b1 = tf.Variable([7., 8.])
@tf.function(input_signature=[tf.TensorSpec(x1.shape, x1.dtype)])
def call(self, x1):
return tf.matmul(x1, self.w1) + self.b1
m = Model()
converter = tf.lite.TFLiteConverter.from_keras_model(m)
tflite_model = converter.convert()
def _evaluateTFLiteModel(tflite_model, input_data):
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(f'Keras input shape: {input_data[0].shape}') # print keras input shape
print(f'Lite input shape: {input_details[0][""shape""]}') # print lite input shape
for i in range(len(input_data)):
interpreter.set_tensor(input_details[i]['index'], input_data[i])
interpreter.invoke()
output_data = [interpreter.get_tensor(output_details[i]['index'])
for i in range(len(output_details))]
return output_data
x = tf.constant([1., 2.], shape=input_shape)
actual_value = _evaluateTFLiteModel(tflite_model,[x])
```
### 3. Failure after conversion
Output
```
Keras input shape: (1, 2)
Lite input shape: [1 1 2]
```
Error Message:
```
ValueError: Cannot set tensor: Dimension mismatch. Got 2 but expected 3 for input 0.
```
"
tensorflow/tensorflow,2023-07-13 13:03:01,bug,TFlite running interpreter->invoke() has failed - Segmentation fault,"In TFLite, I wrote a custom delegate in C++ and encountered an error: ""Segmentation fault"". This error occurs after the initialization is complete and specifically after the invocation of interpreter->invoke(). The custom delegate's prepare function is executed, but the eva function is not executed."
tensorflow/tensorflow,2023-07-13 03:41:33,bug,TypeError: _lookup_dependency() takes 2 positional arguments but 3 were given,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.14.0-dev20230712
### Custom code
Yes
### OS platform and distribution
Linux moe 5.10.0-12-amd64 #1 SMP Debian 5.10.103-1 (2022-03-07) x86_64 GNU/Linux
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
Can't load saved model
### Standalone code to reproduce the issue
```shell
Saved models can't be loaded:
model = tf.keras.models.Sequential([tf.keras.layers.Input((256,256,3)), tf.keras.layers.Dense(1)])
model.save(""../models/test"")
model = tf.keras.models.load_model(""../models/test/"")
```
same thing with more complex models
```
model = tf.keras.applications.efficientnet.EfficientNetB0()
model.save(""../models/test"")
model = tf.keras.models.load_model(""../models/test/"")
```
```
### Relevant log output
```shell
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[11], line 1
----> 1 model = tf.keras.models.load_model(""../models/test/"")
File ~/.local/lib/python3.9/site-packages/keras/src/saving/saving_api.py:262, in load_model(filepath, custom_objects, compile, safe_mode, **kwargs)
254 return saving_lib.load_model(
255 filepath,
256 custom_objects=custom_objects,
257 compile=compile,
258 safe_mode=safe_mode,
259 )
261 # Legacy case.
--> 262 return legacy_sm_saving_lib.load_model(
263 filepath, custom_objects=custom_objects, compile=compile, **kwargs
264 )
File ~/.local/lib/python3.9/site-packages/keras/src/utils/traceback_utils.py:70, in filter_traceback..error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File ~/.local/lib/python3.9/site-packages/tensorflow/python/checkpoint/restore.py:606, in _queue_children_for_restoration(checkpoint_position, visit_queue)
604 continue
605 child_position = checkpoint_position.create_child_position(child.node_id)
--> 606 local_object = trackable._lookup_dependency(child.local_name,
607 trackable_children)
608 child_proto = child_position.object_proto
609 if local_object is None:
610 # We don't yet have a dependency registered with this name. Save it
611 # in case we do.
TypeError: _lookup_dependency() takes 2 positional arguments but 3 were given
```
"
tensorflow/tensorflow,2023-07-12 05:48:51,bug,ImportError: libcudart.so.8.0: cannot open shared object file: No such file or directory,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
1.0.1
### Custom code
Yes
### OS platform and distribution
Ubuntu 20.04
### Mobile device
_No response_
### Python version
3.6
### Bazel version
_No response_
### GCC/compiler version
9.4.0
### CUDA/cuDNN version
nvcc --version gives 10.1 nvidia-smi gives CUDA Version: 12.2
### GPU model and memory
_No response_
### Current behavior?
ImportError: libcudart.so.8.0: cannot open shared object file: No such file or directory
### Standalone code to reproduce the issue
```shell
This error occured when I imported tensorflow
import tensorflow as tf
To resolve this issue I have set my $CUDA_HOME=/usr/lib/cuda/
and $LD_LIBRARY_PATH=usr/lib/cuda/lib64
But surprisingly usr/lib/cuda/lib64 is empty. I dont have cuda folder in usr/local directory.
```
### Relevant log output
```shell
Traceback (most recent call last):
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/site-packages/tensorflow/python/__init__.py"", line 61, in
from tensorflow.python import pywrap_tensorflow
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/site-packages/tensorflow/python/pywrap_tensorflow.py"", line 28, in
_pywrap_tensorflow = swig_import_helper()
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/site-packages/tensorflow/python/pywrap_tensorflow.py"", line 24, in swig_import_helper
_mod = imp.load_module('_pywrap_tensorflow', fp, pathname, description)
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/imp.py"", line 242, in load_module
return load_dynamic(name, filename, file)
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/imp.py"", line 342, in load_dynamic
return _load(spec)
ImportError: libcudart.so.8.0: cannot open shared object file: No such file or directory
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File """", line 1, in
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/site-packages/tensorflow/__init__.py"", line 24, in
from tensorflow.python import *
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/site-packages/tensorflow/python/__init__.py"", line 72, in
raise ImportError(msg)
ImportError: Traceback (most recent call last):
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/site-packages/tensorflow/python/__init__.py"", line 61, in
from tensorflow.python import pywrap_tensorflow
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/site-packages/tensorflow/python/pywrap_tensorflow.py"", line 28, in
_pywrap_tensorflow = swig_import_helper()
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/site-packages/tensorflow/python/pywrap_tensorflow.py"", line 24, in swig_import_helper
_mod = imp.load_module('_pywrap_tensorflow', fp, pathname, description)
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/imp.py"", line 242, in load_module
return load_dynamic(name, filename, file)
File ""/home/nkaushal/anaconda3/envs/lipnet3.6/lib/python3.6/imp.py"", line 342, in load_dynamic
return _load(spec)
ImportError: libcudart.so.8.0: cannot open shared object file: No such file or directory
Failed to load the native TensorFlow runtime.
See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/g3doc/get_started/os_setup.md#import_error
for some common reasons and solutions. Include the entire stack trace
above this error message when asking for help.
```
"
tensorflow/tensorflow,2023-07-11 08:09:33,bug,"Different reference order may cause other modules to be unavailable, e.g. xgboost, sklearn.","### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.11.0
### Custom code
Yes
### OS platform and distribution
centos 7.6
### Mobile device
_No response_
### Python version
python 3.10.11
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
cuda=12.1
### GPU model and memory
32510MiB
### Current behavior?
I was trying to import xgboost(or sklearn) and tensorflow modules at same time, but when I imported modules by different order, it just return me error message that I can not handle it, I don't whether it a bug or some issues that can be fixed by myself? and I also search something resource, which said that it was a bug caused by glibc : https://sourceware.org/bugzilla/show_bug.cgi?id=17090. and then I was trying to reintstall glibc on my server, unfortunately, the plan finally failed and now I am just trying to rebuild my whole environment by rollbacking to previous mirror backup, sad.
### Standalone code to reproduce the issue
```shell
import numpy as np
## bad import order
import tensorflow as tf
from xgboost import XGBClassifier ## order # or import sklearn, may report different error messages.
## good import order
# from xgboost import XGBClassifier
# import tensorflow as tf
hparams = {
'booster':'gbtree',
'objective': 'binary:logistic',
'eval_metric': 'aucpr',
'max_depth': 10,
'gamma': 4,
'lambda':0.001,
'subsample':0.7,
'colsample_bytree':0.8,
'colsample_bylevel':0.8,
'colsample_bynode': 0.8,
'min_child_weight':20,
'eta': 0.03,
'seed': 42,
'nthread':15,
'tree_method':'gpu_hist',
'n_estimators': 350
}
estimator = XGBClassifier(**hparams)
X_train = np.random.rand(10000, 10)
y_train = np.random.randint(0, 2, (10000, 1))
X_eval = np.random.rand(1000, 10)
y_eval = np.random.randint(0, 2, (1000, 1))
estimator.fit(X_train, y_train, eval_set=[(X_train, y_train),(X_eval, y_eval)])
```
### Relevant log output
```shell
2023-07-11 15:57:01.594620: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
Traceback (most recent call last):
File ""/home/haojiaxiang/projects/test/test.py"", line 6, in
from xgboost import XGBClassifier ## order # or import sklearn, may report different error messages.
File ""/home/haojiaxiang/miniconda3/envs/gms/lib/python3.10/site-packages/xgboost/__init__.py"", line 7, in
from . import collective, dask, rabit
File ""/home/haojiaxiang/miniconda3/envs/gms/lib/python3.10/site-packages/xgboost/collective.py"", line 12, in
from .core import _LIB, _check_call, c_str, py_str, from_pystr_to_cstr
File ""/home/haojiaxiang/miniconda3/envs/gms/lib/python3.10/site-packages/xgboost/core.py"", line 264, in
_LIB = _load_lib()
File ""/home/haojiaxiang/miniconda3/envs/gms/lib/python3.10/site-packages/xgboost/core.py"", line 216, in _load_lib
raise XGBoostError(
xgboost.core.XGBoostError:
XGBoost Library (libxgboost.so) could not be loaded.
Likely causes:
* OpenMP runtime is not installed
- vcomp140.dll or libgomp-1.dll for Windows
- libomp.dylib for Mac OSX
- libgomp.so for Linux and other UNIX-like OSes
Mac OSX users: Run `brew install libomp` to install OpenMP runtime.
* You are running 32-bit Python on a 64-bit OS
Error message(s): ['dlopen: cannot load any more object with static TLS']
```
"
tensorflow/tensorflow,2023-07-10 18:41:30,bug,Memory out of bounds in compiled tflite with emscripten.,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
Ubuntu 20.04
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
I have compiled tflite using cmake (without XNNPACK support) and emscripten (both latest 3.1.42 and 3.1.10).
When trying to perform inference at the browser with my model I get the following error:
vmt.wasm:0x31cff Uncaught RuntimeError: memory access out of bounds
at vmt.wasm:0x31cff
at vmt.wasm:0x1f7a94
at vmt.wasm:0x3c4910
at vmt.wasm:0x65ace
at vmt.wasm:0x231c3e
at vmt.wasm:0x458a49
at vmt.wasm:0x517c60
at img.onload (index.html:772:28)
This happens with all of my models at the very first operation (pad). When inspecting the .wasm file using chrome dev tools I see that the error happens at a ""memory.fill"" operation.
### Standalone code to reproduce the issue
```shell
I have compiled tflite with the following emcmake command:
cmake -DCMAKE_CXX_FLAGS=""-lpthread -pthread -lpthread -s USE_PTHREADS"" -DTFLITE_ENABLE_MMAP=OFF -DTFLITE_ENABLE_NNAPI=OFF -DTFLITE_ENABLE_RUY=ON -DTFLITE_ENABLE_XNNPACK=OFF ..
while when compiling my project with emscripten (including the above resulting libraries) I use the following flags:
SET(CMAKE_CXX_FLAGS ""${CMAKE_CXX_FLAGS} -s INITIAL_MEMORY=512MB"")
SET(CMAKE_CXX_FLAGS ""${CMAKE_CXX_FLAGS} -s ALLOW_MEMORY_GROWTH=1"")
SET(CMAKE_CXX_FLAGS ""${CMAKE_CXX_FLAGS} -s ALLOW_TABLE_GROWTH=1"")
```
### Relevant log output
```shell
This is the output of PrintInterpreterState right before the first inference.
[WASM] === Pre-invoke Interpreter State ===
pre-vmt.js:11 [WASM] Interpreter has 1 subgraphs.
pre-vmt.js:11 [WASM]
pre-vmt.js:11 [WASM] -----------Subgraph-0 has 134 tensors and 49 nodes------------
pre-vmt.js:11 [WASM] 1 Inputs: [0] -> 602112B (0.57MB)
pre-vmt.js:11 [WASM] 1 Outputs: [122] -> 708B (0.00MB)
pre-vmt.js:11 [WASM]
pre-vmt.js:11 [WASM] Tensor ID Name Type AllocType Size (Bytes/MB) Shape MemAddr-Offset
pre-vmt.js:11 [WASM] Tensor 0 ��ʻ䯻9:*.. kTfLiteFloat32 kTfLiteArenaRw 602112 / 0.57 [1,224,224,3] [0, 602112)
pre-vmt.js:11 [WASM] Tensor 1 騅:����m... kTfLiteFloat32 kTfLiteMmapRo 64 / 0.00 [16] [690960, 691024)
pre-vmt.js:11 [WASM] Tensor 2 r畼��匹��t;��... kTfLiteFloat32 kTfLiteMmapRo 64 / 0.00 [16] [690864, 690928)
pre-vmt.js:11 [WASM] Tensor 3 jԐ;��4i;⦄;Q;箮. kTfLiteFloat32 kTfLiteMmapRo 160 / 0.00 [40] [690688, 690848)
pre-vmt.js:11 [WASM] Tensor 4 究çc6ԯ#6ߗ<... kTfLiteFloat32 kTfLiteMmapRo 160 / 0.00 [40] [690512, 690672)
pre-vmt.js:11 [WASM] Tensor 5 :#��
��7tU��... kTfLiteFloat32 kTfLiteMmapRo 224 / 0.00 [56] [690272, 690496)
pre-vmt.js:11 [WASM] Tensor 6 ƙl��7/4��.. kTfLiteFloat32 kTfLiteMmapRo 224 / 0.00 [56] [690032, 690256)
pre-vmt.js:11 [WASM] Tensor 7 ����뻪��3z}��.. kTfLiteFloat32 kTfLiteMmapRo 256 / 0.00 [64] [689760, 690016)
pre-vmt.js:11 [WASM] Tensor 8 ᡁ������땐6ԝ... kTfLiteFloat32 kTfLiteMmapRo 256 / 0.00 [64] [689488, 689744)
pre-vmt.js:11 [WASM] Tensor 9 ��5.8^L... kTfLiteFloat32 kTfLiteMmapRo 576 / 0.00 [144] [688896, 689472)
pre-vmt.js:11 [WASM] Tensor 10 ��""ۀ7��Ce����... kTfLiteFloat32 kTfLiteMmapRo 576 / 0.00 [144] [688304, 688880)
pre-vmt.js:11 [WASM] Tensor 11 ģ
pre-vmt.js:11 [WASM] 7C^J𐬣6... kTfLiteFloat32 kTfLiteMmapRo 576 / 0.00 [144] [687712, 688288)
pre-vmt.js:11 [WASM] Tensor 12 ��ȏ6띓����8h... kTfLiteFloat32 kTfLiteMmapRo 576 / 0.00 [144] [687120, 687696)
pre-vmt.js:11 [WASM] Tensor 13 ��A윶��䞏��.. kTfLiteFloat32 kTfLiteMmapRo 288 / 0.00 [72] [686816, 687104)
pre-vmt.js:11 [WASM] Tensor 14 ԙõ`F6Oc... kTfLiteFloat32 kTfLiteMmapRo 288 / 0.00 [72] [686512, 686800)
pre-vmt.js:11 [WASM] Tensor 15 ū쵄罷{\\ѵfW... kTfLiteFloat32 kTfLiteMmapRo 288 / 0.00 [72] [686208, 686496)
pre-vmt.js:11 [WASM] Tensor 16 &ꋷᛸ6��ꮮ. kTfLiteFloat32 kTfLiteMmapRo 288 / 0.00 [72] [685904, 686192)
pre-vmt.js:11 [WASM] Tensor 17 |㗶��\\Y7... kTfLiteFloat32 kTfLiteMmapRo 576 / 0.00 [144] [685312, 685888)
pre-vmt.js:11 [WASM] Tensor 18 蕷ΒU7��`&÷勸f+... kTfLiteFloat32 kTfLiteMmapRo 576 / 0.00 [144] [684720, 685296)
pre-vmt.js:11 [WASM] Tensor 19 w춃֣74ٿ𣔝����.. kTfLiteFloat32 kTfLiteMmapRo 1152 / 0.00 [288] [683552, 684704)
pre-vmt.js:11 [WASM] Tensor 20 ;""6!膷ڽ��... kTfLiteFloat32 kTfLiteMmapRo 1152 / 0.00 [288] [682384, 683536)
pre-vmt.js:11 [WASM] Tensor 21 YPQ������75... kTfLiteFloat32 kTfLiteMmapRo 1152 / 0.00 [288] [681216, 682368)
pre-vmt.js:11 [WASM] Tensor 22 ��IB}������.. kTfLiteFloat32 kTfLiteMmapRo 1152 / 0.00 [288] [680048, 681200)
pre-vmt.js:11 [WASM] Tensor 23 wް5��^ 8. kTfLiteFloat32 kTfLiteMmapRo 1152 / 0.00 [288] [678880, 680032)
pre-vmt.js:11 [WASM] Tensor 24 _��m.. kTfLiteFloat32 kTfLiteMmapRo 32 / 0.00 [8] [678832, 678864)
pre-vmt.js:11 [WASM] Tensor 25 P��&;����7#.. kTfLiteFloat32 kTfLiteMmapRo 64 / 0.00 [16] [678752, 678816)
pre-vmt.js:11 [WASM] Tensor 26 ��5aᘑ��.. kTfLiteFloat32 kTfLiteMmapRo 64 / 0.00 [16] [678672, 678736)
pre-vmt.js:11 [WASM] Tensor 27 卞7����. kTfLiteFloat32 kTfLiteMmapRo 96 / 0.00 [24] [678560, 678656)
pre-vmt.js:11 [WASM] Tensor 28 FRd������.. kTfLiteFloat32 kTfLiteMmapRo 96 / 0.00 [24] [678448, 678544)
pre-vmt.js:11 [WASM] Tensor 29 W7̒#�� kTfLiteFloat32 kTfLiteMmapRo 96 / 0.00 [24] [678336, 678432)
pre-vmt.js:11 [WASM] Tensor 30 $뀷��Ru쭌7. kTfLiteFloat32 kTfLiteMmapRo 96 / 0.00 [24] [678224, 678320)
pre-vmt.js:11 [WASM] Tensor 31 \\~��ط$i
6҄*觮.. kTfLiteFloat32 kTfLiteMmapRo 96 / 0.00 [24] [678112, 678208)
pre-vmt.js:11 [WASM] Tensor 32 ��0��涂f5��. kTfLiteFloat32 kTfLiteMmapRo 192 / 0.00 [48] [677904, 678096)
pre-vmt.js:11 [WASM] Tensor 33 ؉h������7˜... kTfLiteFloat32 kTfLiteMmapRo 192 / 0.00 [48] [677696, 677888)
pre-vmt.js:11 [WASM] Tensor 34 \\7͛$♩��7[... kTfLiteFloat32 kTfLiteMmapRo 192 / 0.00 [48] [677488, 677680)
pre-vmt.js:11 [WASM] Tensor 35 9X����������.. kTfLiteFloat32 kTfLiteMmapRo 1728 / 0.00 [16,3,3,3] [675744, 677472)
pre-vmt.js:11 [WASM] Tensor 36 jť<箯;Љ&.. kTfLiteFloat32 kTfLiteMmapRo 576 / 0.00 [1,3,3,16] [675152, 675728)
pre-vmt.js:11 [WASM] Tensor 37 λ""<����?<ɓ... kTfLiteFloat32 kTfLiteMmapRo 512 / 0.00 [8,1,1,16] [674624, 675136)
pre-vmt.js:11 [WASM] Tensor 38 ޛȻx׃<ěֺ舮.. kTfLiteFloat32 kTfLiteArenaRw 18816 / 0.02 [1,14,14,24] [846720, 865536)
pre-vmt.js:11 [WASM] Tensor 95 i粼**&<ļ;B... kTfLiteFloat32 kTfLiteArenaRw 112896 / 0.11 [1,14,14,144] [602112, 715008)
pre-vmt.js:11 [WASM] Tensor 96 S{.ZR����<9��1... kTfLiteFloat32 kTfLiteArenaRw 112896 / 0.11 [1,14,14,144] [715008, 827904)
pre-vmt.js:11 [WASM] Tensor 97 Pw��㢥.. kTfLiteFloat32 kTfLiteArenaRw 18816 / 0.02 [1,14,14,24] [827904, 846720)
pre-vmt.js:11 [WASM] Tensor 98 ߴüs��9 kTfLiteFloat32 kTfLiteArenaRw 18816 / 0.02 [1,14,14,24] [715008, 733824)
pre-vmt.js:11 [WASM] Tensor 99 t#<.. kTfLiteFloat32 kTfLiteArenaRw 56448 / 0.05 [1,14,14,72] [602112, 658560)
pre-vmt.js:11 [WASM] Tensor 100 ��ݿ������9\\r... kTfLiteFloat32 kTfLiteArenaRw 56448 / 0.05 [1,14,14,72] [658560, 715008)
pre-vmt.js:11 [WASM] Tensor 101 ��ڛ����N.. kTfLiteFloat32 kTfLiteArenaRw 18816 / 0.02 [1,14,14,24] [602112, 620928)
pre-vmt.js:11 [WASM] Tensor 102 iܨ<<<;. kTfLiteFloat32 kTfLiteArenaRw 18816 / 0.02 [1,14,14,24] [733824, 752640)
pre-vmt.js:11 [WASM] Tensor 103 ��hԂ 133 -> 75 -> 74.
pre-vmt.js:11 [WASM]
pre-vmt.js:11 [WASM] kTfLiteArenaRwPersistent Info: not holding any allocation.
pre-vmt.js:11 [WASM]
pre-vmt.js:11 [WASM] kTfLiteMmapRo Info:
pre-vmt.js:11 [WASM] Tensor 73 has the max size 203904 bytes (0.194 MB).
pre-vmt.js:11 [WASM] This memory arena is estimated as[0x10dde70, 0x1035320), taking 691024 bytes (0.659 MB).
pre-vmt.js:11 [WASM] One possible set of tensors that have non-overlapping memory spaces with each other, and they take up the whole arena:
pre-vmt.js:11 [WASM] Tensor 73 -> 72 -> 71 -> 70 -> 69 -> 68 -> 67 -> 66 -> 65 -> 64 -> 63 -> 62 -> 61 -> 60 -> 59 -> 58 -> 57 -> 56 -> 55 -> 54 -> 53 -> 52 -> 51 -> 50 -> 49 -> 48 -> 47 -> 46 -> 45 -> 44 -> 43 -> 42 -> 41 -> 40 -> 39 -> 38 -> 37 -> 36 -> 35 -> 34 -> 33 -> 32 -> 31 -> 30 -> 29 -> 28 -> 27 -> 26 -> 25 -> 24 -> 23 -> 22 -> 21 -> 20 -> 19 -> 18 -> 17 -> 16 -> 15 -> 14 -> 13 -> 12 -> 11 -> 10 -> 9 -> 8 -> 7 -> 6 -> 5 -> 4 -> 3 -> 2 -> 1.
pre-vmt.js:11 [WASM]
pre-vmt.js:11 [WASM] kTfLiteDynamic Info: not holding any allocation.
pre-vmt.js:11 [WASM]
pre-vmt.js:11 [WASM] Node 0 Operator Builtin Code 34 PAD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[0,72] -> 602144B (0.57MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[74] -> 612912B (0.58MB)
pre-vmt.js:11 [WASM] Node 1 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[74,35,1] -> 614704B (0.59MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[75] -> 802816B (0.77MB)
pre-vmt.js:11 [WASM] 1 Temporary Tensors:[133] -> 1354752B (1.29MB)
pre-vmt.js:11 [WASM] Node 2 Operator Builtin Code 34 PAD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[75,72] -> 802848B (0.77MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[76] -> 831744B (0.79MB)
pre-vmt.js:11 [WASM] Node 3 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[76,36,2] -> 832384B (0.79MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[77] -> 200704B (0.19MB)
pre-vmt.js:11 [WASM] Node 4 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[77,37,24] -> 201248B (0.19MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[78] -> 100352B (0.10MB)
pre-vmt.js:11 [WASM] Node 5 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[78,38,3] -> 101792B (0.10MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[79] -> 501760B (0.48MB)
pre-vmt.js:11 [WASM] Node 6 Operator Builtin Code 34 PAD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[79,72] -> 501792B (0.48MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[80] -> 538240B (0.51MB)
pre-vmt.js:11 [WASM] Node 7 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[80,39,4] -> 539840B (0.51MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[81] -> 125440B (0.12MB)
pre-vmt.js:11 [WASM] Node 8 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[81,40,25] -> 128064B (0.12MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[82] -> 50176B (0.05MB)
pre-vmt.js:11 [WASM] Node 9 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[82,41,5] -> 53984B (0.05MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[83] -> 175616B (0.17MB)
pre-vmt.js:11 [WASM] Node 10 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[83,42,6] -> 177856B (0.17MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[84] -> 175616B (0.17MB)
pre-vmt.js:11 [WASM] Node 11 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[84,43,26] -> 179264B (0.17MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[85] -> 50176B (0.05MB)
pre-vmt.js:11 [WASM] Node 12 Operator Builtin Code 0 ADD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[85,82] -> 100352B (0.10MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[86] -> 50176B (0.05MB)
pre-vmt.js:11 [WASM] Node 13 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[86,44,7] -> 54528B (0.05MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[87] -> 200704B (0.19MB)
pre-vmt.js:11 [WASM] Node 14 Operator Builtin Code 34 PAD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[87,72] -> 200736B (0.19MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[88] -> 230400B (0.22MB)
pre-vmt.js:11 [WASM] Node 15 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[88,45,8] -> 232960B (0.22MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[89] -> 50176B (0.05MB)
pre-vmt.js:11 [WASM] Node 16 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[89,46,27] -> 56416B (0.05MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[90] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] Node 17 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[90,47,9] -> 33216B (0.03MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[91] -> 112896B (0.11MB)
pre-vmt.js:11 [WASM] Node 18 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[91,48,10] -> 118656B (0.11MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[92] -> 112896B (0.11MB)
pre-vmt.js:11 [WASM] Node 19 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[92,49,28] -> 126816B (0.12MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[93] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] Node 20 Operator Builtin Code 0 ADD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[93,90] -> 37632B (0.04MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[94] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] Node 21 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[94,50,11] -> 33216B (0.03MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[95] -> 112896B (0.11MB)
pre-vmt.js:11 [WASM] Node 22 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[95,51,12] -> 118656B (0.11MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[96] -> 112896B (0.11MB)
pre-vmt.js:11 [WASM] Node 23 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[96,52,29] -> 126816B (0.12MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[97] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] Node 24 Operator Builtin Code 0 ADD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[97,94] -> 37632B (0.04MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[98] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] Node 25 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[98,53,13] -> 26016B (0.02MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[99] -> 56448B (0.05MB)
pre-vmt.js:11 [WASM] Node 26 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[99,54,14] -> 59328B (0.06MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[100] -> 56448B (0.05MB)
pre-vmt.js:11 [WASM] Node 27 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[100,55,30] -> 63456B (0.06MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[101] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] Node 28 Operator Builtin Code 0 ADD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[101,98] -> 37632B (0.04MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[102] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] Node 29 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[102,56,15] -> 26016B (0.02MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[103] -> 56448B (0.05MB)
pre-vmt.js:11 [WASM] Node 30 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[103,57,16] -> 59328B (0.06MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[104] -> 56448B (0.05MB)
pre-vmt.js:11 [WASM] Node 31 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[104,58,31] -> 63456B (0.06MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[105] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] Node 32 Operator Builtin Code 0 ADD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[105,102] -> 37632B (0.04MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[106] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] Node 33 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[106,59,17] -> 33216B (0.03MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[107] -> 112896B (0.11MB)
pre-vmt.js:11 [WASM] Node 34 Operator Builtin Code 34 PAD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[107,72] -> 112928B (0.11MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[108] -> 147456B (0.14MB)
pre-vmt.js:11 [WASM] Node 35 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[108,60,18] -> 153216B (0.15MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[109] -> 28224B (0.03MB)
pre-vmt.js:11 [WASM] Node 36 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[109,61,32] -> 56064B (0.05MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[110] -> 9408B (0.01MB)
pre-vmt.js:11 [WASM] Node 37 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[110,62,19] -> 65856B (0.06MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[111] -> 56448B (0.05MB)
pre-vmt.js:11 [WASM] Node 38 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[111,63,20] -> 67968B (0.06MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[112] -> 56448B (0.05MB)
pre-vmt.js:11 [WASM] Node 39 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[112,64,33] -> 111936B (0.11MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[113] -> 9408B (0.01MB)
pre-vmt.js:11 [WASM] Node 40 Operator Builtin Code 0 ADD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[113,110] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[114] -> 9408B (0.01MB)
pre-vmt.js:11 [WASM] Node 41 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[114,65,21] -> 65856B (0.06MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[115] -> 56448B (0.05MB)
pre-vmt.js:11 [WASM] Node 42 Operator Builtin Code 4 DEPTHWISE_CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[115,66,22] -> 67968B (0.06MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[116] -> 56448B (0.05MB)
pre-vmt.js:11 [WASM] Node 43 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[116,67,34] -> 111936B (0.11MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[117] -> 9408B (0.01MB)
pre-vmt.js:11 [WASM] Node 44 Operator Builtin Code 0 ADD (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[117,114] -> 18816B (0.02MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[118] -> 9408B (0.01MB)
pre-vmt.js:11 [WASM] Node 45 Operator Builtin Code 3 CONV_2D (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[118,68,23] -> 65856B (0.06MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[119] -> 56448B (0.05MB)
pre-vmt.js:11 [WASM] Node 46 Operator Builtin Code 40 MEAN (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[119,69] -> 56456B (0.05MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[120] -> 1152B (0.00MB)
pre-vmt.js:11 [WASM] 4 Temporary Tensors:[123-126] -> 1176B (0.00MB)
pre-vmt.js:11 [WASM] Node 47 Operator Builtin Code 9 FULLY_CONNECTED (not delegated)
pre-vmt.js:11 [WASM] 3 Input Tensors:[120,73,71] -> 205764B (0.20MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[121] -> 708B (0.00MB)
pre-vmt.js:11 [WASM] Node 48 Operator Builtin Code 22 RESHAPE (not delegated)
pre-vmt.js:11 [WASM] 2 Input Tensors:[121,70] -> 720B (0.00MB)
pre-vmt.js:11 [WASM] 1 Output Tensors:[122] -> 708B (0.00MB)
pre-vmt.js:11 [WASM]
pre-vmt.js:11 [WASM] Execution plan as the list of 49 nodes invoked in-order: [0-48]
pre-vmt.js:11 [WASM] --------------Subgraph-0 dump has completed--------------
pre-vmt.js:11 [WASM]
pre-vmt.js:11 [WASM] --------------Memory Arena Status Start--------------
pre-vmt.js:11 [WASM] Total memory usage: 3372848 bytes (3.217 MB)
pre-vmt.js:11 [WASM] - Total arena memory usage: 3372848 bytes (3.217 MB)
pre-vmt.js:11 [WASM] - Total dynamic memory usage: 0 bytes (0.000 MB)
pre-vmt.js:11 [WASM]
pre-vmt.js:11 [WASM] Subgraph#0 Arena (Normal) 3372720 (100.00%)
pre-vmt.js:11 [WASM] Subgraph#0 Arena (Persistent) 128 (0.00%)
pre-vmt.js:11 [WASM] --------------Memory Arena Status End--------------
vmt.wasm:0x31cff Uncaught RuntimeError: memory access out of bounds
at vmt.wasm:0x31cff
at vmt.wasm:0x1f7a94
at vmt.wasm:0x3c4910
at vmt.wasm:0x65ace
at vmt.wasm:0x231c3e
at vmt.wasm:0x458a49
at vmt.wasm:0x517c60
at Module._landmarkDetection (vmt.js:6100:85)
at VmtHelper.cycleForSingleImage (vmt-helper.js:115:29)
at img.onload (index.html:772:28)
```
"
tensorflow/tensorflow,2023-07-10 09:15:00,bug,ctc_ops.py deprecation warning,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
source
### TensorFlow version
2.13
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
WARNING:tensorflow:From /home/sronen/code/.venv/lib/python3.10/site-packages/tensorflow/python/ops/ctc_ops.py:1514: alias_inplace_add (from tensorflow.python.ops.inplace_ops) is deprecated and will be removed in a future version.
### Standalone code to reproduce the issue
```shell
I suspect any call to tf.nn.ctc_loss
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-08 04:32:10,bug,"savedmodel convert to tflite and merge labels. tx to new tflite model,but resulte is error by Xcode, use python api is correct","### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
2.13.0
### Custom code
Yes
### OS platform and distribution
mac os 12.6
### Mobile device
ios 16.1
### Python version
3.10.0
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
convert savedmodel to tflite in ios is error
model download url https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/classification/5
convert steps
1.
2.
script is use offical
metadata_writer_for_image_classifier.py
use convert savedmodel to convert tflite (merge labels.txt and tflite)
python api result is correct
ios is error (self-converted)
ios is correct (download tflite is correct )
### Standalone code to reproduce the issue
```shell
ios code:
python code:
def classify_image_tflite_no_sin(model_path, predicted_image_path, labels_path):
TF_MODEL_FILE_PATH = model_path
interpreter = tf.lite.Interpreter(model_path=TF_MODEL_FILE_PATH)
interpreter.allocate_tensors()
# 加载标签文件
with open(labels_path, 'r') as f:
labels = f.read().splitlines()
# 读取和预处理图像
image_path = predicted_image_path
image = Image.open(image_path).resize((224, 224)) # 调整图像大小
image = np.array(image) # 将图像转换为NumPy数组
image = image / 255.0 # 归一化图像
image = np.expand_dims(image, axis=0).astype(np.float32) # 添加批次维度并转换为float32
# 设置模型输入和输出张量
input_tensor_index = interpreter.get_input_details()[0]['index']
output_tensor_index = interpreter.get_output_details()[0]['index']
# 设置输入张量的值
interpreter.set_tensor(input_tensor_index, image)
# 执行推断
interpreter.invoke()
# 获取输出张量的结果
output_data = interpreter.get_tensor(output_tensor_index)
score_lite = tf.nn.softmax(output_data)
# 获取预测类别索引
class_index = np.argmax(score_lite)
predicted_label = labels[class_index]
confidence = score_lite[0][class_index]
# 输出预测结果
print('预测类别:', predicted_label)
print('预测准确度:', confidence)
```
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-07 07:27:24,bug,tf.keras.models.model_from_json() missing a safe_mode parameter,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
binary
### TensorFlow version
tf 2.13
### Custom code
No
### OS platform and distribution
Ubuntu 20.04
### Mobile device
_No response_
### Python version
3.11
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
When loading a model with a lambda layer from a json model config, TensorFlow provides the following error:
`
ValueError: Requested the deserialization of a Lambda layer with a Python 'lambda' inside it. This carries a potential risk of arbitrary code execution and thus it is disallowed by default. If you trust the source of the saved model, you can pass
'safe_mode=False' to the loading function in order to allow Lambda layer loading.
`
However `tf.keras.models.model_from_json()` does not have a `safe_mode` parameter. So there does not seem to be a way to load models with lamda layers using a json config. This issue does not appear in TensorFlow 2.12
### Standalone code to reproduce the issue
```shell
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Input, Lambda
inputs = Input(shape=(1,))
x = Lambda(lambda x: x*2)(inputs)
out = Dense(1)(x)
model = Model(inputs=inputs,outputs=out)
model_config = model.to_json()
tf.keras.models.model_from_json(model_config)
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-06 12:56:59,bug,Issue with Reproducible Results: Inconsistent Behavior of Random Seeds in TensorFlow,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
No
### Source
source
### TensorFlow version
2.12
### Custom code
Yes
### OS platform and distribution
Ubuntu 22.04
### Mobile device
_No response_
### Python version
3.8-3.9-3.10
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
I would like to report an issue regarding the reproducibility of results in TensorFlow. Currently, in order to achieve consistent and deterministic results, it seems necessary to set both random.seed(42) and tf.random.set_seed(1) together.
Expected Behavior:
Setting tf.random.set_seed(1) alone should be sufficient to ensure reproducible results across different runs.
Observed Behavior:
Without setting random.seed(42) alongside tf.random.set_seed(1), the results obtained from TensorFlow exhibit inconsistency and do not remain fixed between runs.
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import random
tf.random.set_seed(1)
random.seed(42) # This line seems to be redundant
x = tf.constant(tf.random.uniform([2, 3, 2]), dtype=tf.float32)
```
### Relevant log output
_No response_"
tensorflow/tensorflow,2023-07-05 15:38:42,bug,ValueError: Checkpoint was expecting to be a trackable object (an object derived from `Trackable`),"### Issue type
Bug
### Source
binary
### TensorFlow version
tf 2.10.0
### Custom code
Yes
### OS platform and distribution
Windows 10 Enterprise
### Python version
3.9.16
### Current behavior?
I'm receiving an error when I try to restore the model checkpoint. I've seen a posting on here that's similar, but I think my case is different. Help is very much appreciated!
I'm using a pre-trained object detection model called SSD MobileNet V2 FPNLite 320x320
### Standalone code to reproduce the issue
```python
import os
import tensorflow as tf
import pandas as pd
import openpyxl
import cv2
import numpy as np
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.builders import model_builder
from object_detection.utils import config_util
from matplotlib import pyplot as plt
from pathlib import Path
os.chdir(r""C:\\Users\\mill286"")
CUSTOM_MODEL_NAME = 'my_ssd_resnet50_v1_fpn' # *** Enter here the name of the model you trained. ***
files = {
'PIPELINE_CONFIG':os.path.join('tensorflow', 'workspace','models', CUSTOM_MODEL_NAME, 'pipeline.config')
}
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])
detection_model = model_builder.build(model_config=configs['model'], is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(paths['CHECKPOINT_PATH'], 'ckpt-54.index')).expect_partial() # *** Replace the number in 'ckpt-XX' with the checkpoint you want to use. ***
```
### Relevant log output
```shell
ValueError Traceback (most recent call last)
Cell In[18], line 2
1 # Restore checkpoint
----> 2 ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
3 ckpt.restore(os.path.join(paths['CHECKPOINT_PATH'], 'ckpt-54.index')).expect_partial()
File ~\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\checkpoint\\checkpoint.py:2142, in Checkpoint.__init__(self, root, **kwargs)
2140 if isinstance(converted_v, weakref.ref):
2141 converted_v = converted_v()
-> 2142 _assert_trackable(converted_v, k)
2144 if root:
2145 # Make sure that root doesn't already have dependencies with these names
2146 child = trackable_root._lookup_dependency(k)
File ~\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\checkpoint\\checkpoint.py:1562, in _assert_trackable(obj, name)
1559 def _assert_trackable(obj, name):
1560 if not isinstance(
1561 obj, (base.Trackable, def_function.Function)):
-> 1562 raise ValueError(
1563 f""`Checkpoint` was expecting {name} to be a trackable object (an ""
1564 f""object derived from `Trackable`), got {obj}. If you believe this ""
1565 ""object should be trackable (i.e. it is part of the ""
1566 ""TensorFlow Python API and manages state), please open an issue."")
ValueError: `Checkpoint` was expecting model to be a trackable object (an object derived from `Trackable`), got . If you believe this object should be trackable (i.e. it is part of the TensorFlow Python API and manages state), please open an issue.
```
"
tensorflow/tensorflow,2023-07-02 10:13:52,bug,tf.image.extract_patches error for tf.RaggedTensor inputs,"### Issue type
Bug
### Have you reproduced the bug with TensorFlow Nightly?
Yes
### Source
source
### TensorFlow version
tf 2.12.0
### Custom code
Yes
### OS platform and distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current behavior?
`tf.image.extract_patches` should be able to extract patches from `ragged `tensors.
### Standalone code to reproduce the issue
```shell
def build_model():
input = tf.keras.Input([None, None, 3], ragged=True, name=""image"")
patches = tf.image.extract_patches(
images=input,
sizes=[1, 4, 4, 1],
strides=[1, 4, 4, 1],
rates=[1, 1, 1, 1],
padding=""SAME"",
)
return tf.keras.Model(
inputs=input,
outputs=patches)
model = build_model()
```
### Relevant log output
```shell
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
in ()
14
15
---> 16 model = build_model()
3 frames
/usr/local/lib/python3.10/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
TypeError: Exception encountered when calling layer ""tf.image.extract_patches_3"" (type TFOpLambda).
Failed to convert elements of tf.RaggedTensor(values=tf.RaggedTensor(values=Tensor(""Placeholder:0"", shape=(None, 3), dtype=float32), row_splits=Tensor(""Placeholder_1:0"", shape=(None,), dtype=int64)), row_splits=Tensor(""Placeholder_2:0"", shape=(None,), dtype=int64)) to Tensor. Consider casting elements to a supported type. See https://www.tensorflow.org/api_docs/python/tf/dtypes for supported TF dtypes.
Call arguments received by layer ""tf.image.extract_patches_3"" (type TFOpLambda):
• images=tf.RaggedTensor(values=tf.RaggedTensor(values=Tensor(""Placeholder:0"", shape=(None, 3), dtype=float32), row_splits=Tensor(""Placeholder_1:0"", shape=(None,), dtype=int64)), row_splits=Tensor(""Placeholder_2:0"", shape=(None,), dtype=int64))
• sizes=['1', '4', '4', '1']
• strides=['1', '4', '4', '1']
• rates=['1', '1', '1', '1']
• padding='SAME'
• name=None
```
"
tensorflow/tensorflow,2023-06-30 16:50:24,bug,mutiple issues with the new parameter server strategy,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
binary
### Tensorflow Version
2.10.1
### Custom Code
Yes
### OS Platform and Distribution
ubuntu 16.04
### Mobile device
_No response_
### Python version
3.8
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current Behaviour?
We meet multiple issues in using the Tensorflow 2.x strategy:
1. how to shard data by files, how to use the later binding mechanism to shard data by files, which is critical for high performance training using tf.data.Dataset apis, all workers reading the total data is not scalable.
2. how to control the placement of ops, in one attempt, we build dataset using tf.data.Dataset.from_tensor_slices and.
dataset = dataset.interleave(lambda x: tf.data.TextLineDataset(x).skip(1), cycle_length=15, num_parallel_calls=15)
with parameter server strategy, the dataset related ops runs on workers, however, if I add one more op dataset.repeat(), the dataset ops all runs on chief, which is surprising.
4. how to ensure even the workers are with uneven amount of data, the training process with model.fit could end elegantly instead of having to using try catch or data.repeat, as recommendation models generally assume one epoch training.
[yuefengz@google.com](mailto:yuefengz@google.com)
[rchao@google.com](mailto:rchao@google.com)
### Standalone code to reproduce the issue
```shell
# for chief:
file_list = tf.io.gfile.glob(input_pattern)
dataset = tf.data.Dataset.from_tensor_slices(file_list)
dataset = dataset.shard(worker_num, worker_id) # how to later bind worker_id?
dataset = dataset.interleave(lambda x: tf.data.TextLineDataset(x).skip(1), cycle_length=15, num_parallel_calls=15)
def _parse_csv(line):
record_defaults = []
for x in all_columns:
record_defaults.append(0)
with tf.control_dependencies([tf.print(tf.shape(line), line[0], output_stream=sys.stderr)]):
fields = tf.io.decode_csv(
line,
field_delim=',',
record_defaults=record_defaults,
name='decode_csv')
return fields
dataset = dataset.map(_parse_csv, num_parallel_calls=8)
dataset = dataset.repeat()
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver, variable_partitioner=variable_partitioner)
with strategy.scope():
model = build_model(...)
model.fit(dataset, epochs=num_epoch,
callbacks=[
tf.keras.callbacks.ProgbarLogger(count_mode='steps'),
tf.keras.callbacks.TensorBoard(log_dir='/train/tensorboard/', histogram_freq=0)
],
steps_per_epoch=steps_per_epoch)
# for workers and ps:
server = tf.distribute.Server(
cluster_resolver.cluster_spec(),
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol=cluster_resolver.rpc_layer or 'grpc',
start=True)
server.join()
```
### Relevant log output
```shell
# tf.print outputs are all on chief stdout, and no improvements if we place the dataset built process under strategy.scope() or using ops.device() operations.
```
"
tensorflow/tensorflow,2023-06-28 15:59:05,bug,//tensorflow/python/data/kernel_tests:snapshot_test is flaky,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
source
### Tensorflow Version
git HEAD
### Custom Code
No
### OS Platform and Distribution
Ubuntu 20.04
### Mobile device
n/a
### Python version
3.9.16
### Bazel version
6.1.0
### GCC/Compiler version
10.2.1
### CUDA/cuDNN version
n/a
### GPU model and memory
n/a
### Current Behaviour?
//tensorflow/python/data/kernel_tests:snapshot_test sometimes fails
x86 log
https://source.cloud.google.com/results/invocations/8b60bfba-b6b6-4503-aa43-62e8bbe1a094/log
AARCH64 log
### Standalone code to reproduce the issue
```shell
bazel --bazelrc=/usertools/cpu.bazelrc test --config=pycpp --config=build_event_export --remote_cache=https://storage.googleapis.com/tensorflow-devinfra-bazel-cache/norbe --google_default_credentials
```
### Relevant log output
```shell
ERROR: testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart_test_mode_eager_tfapiversion_2 (__main__.SnapshotTest)
SnapshotTest.testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart_test_mode_eager_tfapiversion_2
testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart_test_mode_eager_tfapiversion_2(mode='eager', tf_api_version=2)
----------------------------------------------------------------------
Traceback (most recent call last):
File ""/root/.cache/bazel/_bazel_root/fbac33eb30dbfb6b11b15a7ff5ac830d/execroot/org_tensorflow/bazel-out/k8-opt/bin/tensorflow/python/data/kernel_tests/snapshot_test.runfiles/org_tensorflow/tensorflow/python/data/kernel_tests/snapshot_test.py"", line 63, in tearDown
shutil.rmtree(self._snapshot_dir)
File ""/usr/lib/python3.9/shutil.py"", line 734, in rmtree
_rmtree_safe_fd(fd, path, onerror)
File ""/usr/lib/python3.9/shutil.py"", line 673, in _rmtree_safe_fd
onerror(os.rmdir, fullname, sys.exc_info())
File ""/usr/lib/python3.9/shutil.py"", line 671, in _rmtree_safe_fd
os.rmdir(entry.name, dir_fd=topfd)
OSError: [Errno 39] Directory not empty: '5643068742232426178'
======================================================================
FAIL: testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart_test_mode_eager_tfapiversion_2 (__main__.SnapshotTest)
SnapshotTest.testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart_test_mode_eager_tfapiversion_2
testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart_test_mode_eager_tfapiversion_2(mode='eager', tf_api_version=2)
----------------------------------------------------------------------
Traceback (most recent call last):
File ""/root/.cache/bazel/_bazel_root/fbac33eb30dbfb6b11b15a7ff5ac830d/execroot/org_tensorflow/bazel-out/k8-opt/bin/tensorflow/python/data/kernel_tests/snapshot_test.runfiles/absl_py/absl/testing/parameterized.py"", line 314, in bound_param_test
return test_method(self, **testcase_params)
File ""/root/.cache/bazel/_bazel_root/fbac33eb30dbfb6b11b15a7ff5ac830d/execroot/org_tensorflow/bazel-out/k8-opt/bin/tensorflow/python/data/kernel_tests/snapshot_test.runfiles/org_tensorflow/tensorflow/python/framework/test_combinations.py"", line 360, in decorated
execute_test_method()
File ""/root/.cache/bazel/_bazel_root/fbac33eb30dbfb6b11b15a7ff5ac830d/execroot/org_tensorflow/bazel-out/k8-opt/bin/tensorflow/python/data/kernel_tests/snapshot_test.runfiles/org_tensorflow/tensorflow/python/framework/test_combinations.py"", line 343, in execute_test_method
test_method(**kwargs_to_pass)
File ""/root/.cache/bazel/_bazel_root/fbac33eb30dbfb6b11b15a7ff5ac830d/execroot/org_tensorflow/bazel-out/k8-opt/bin/tensorflow/python/data/kernel_tests/snapshot_test.runfiles/org_tensorflow/tensorflow/python/data/kernel_tests/snapshot_test.py"", line 318, in testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart
self.assertSnapshotDirectoryContains(
File ""/root/.cache/bazel/_bazel_root/fbac33eb30dbfb6b11b15a7ff5ac830d/execroot/org_tensorflow/bazel-out/k8-opt/bin/tensorflow/python/data/kernel_tests/snapshot_test.runfiles/org_tensorflow/tensorflow/python/data/kernel_tests/snapshot_test.py"", line 108, in assertSnapshotDirectoryContains
self.assertLen(run_dirlist, num_snapshot_shards_per_run)
AssertionError: ['00000000.shard', '00000001.shard', '00000002.shard', '00000003.shard', '00000004.shard', '00000005.shard', '00000006.shard', '00000007.shard', '00000008.shard', '00000009.shard', '00000010.shard', '00000011.shard', '00000012.shard', '00000013.shard', '00000014.shard', '00000015.shard', '00000016.shard', '00000017.shard', '00000018.shard', '00000019.shard', '00000020.shard', '00000021.shard', '00000022.shard', '00000023.shard', '00000024.shard', '00000025.shard', '00000026.shard', '00000027.shard', '00000028.shard', '00000029.shard', '00000030.shard', '00000031.shard', '00000032.shard', '00000033.shard', '00000034.shard', '00000035.shard', '00000036.shard', '00000037.shard', '00000038.shard', '00000039.shard', '00000040.shard', '00000041.shard', '00000042.shard', '00000043.shard', '00000044.shard', '00000045.shard', '00000046.shard', '00000047.shard', '00000048.shard', '00000049.shard', '00000050.shard', '00000051.shard', '00000052.shard', '00000053.shard', '00000054.shard', '00000055.shard', '00000056.shard', '00000057.shard', '00000058.shard', '00000059.shard', '00000060.shard', '00000061.shard', '00000062.shard', '00000063.shard', '00000064.shard', '00000065.shard', '00000066.shard', '00000067.shard', '00000068.shard', '00000069.shard', '00000070.shard', '00000071.shard', '00000072.shard', '00000073.shard', '00000074.shard', '00000075.shard', '00000076.shard', '00000077.shard', '00000078.shard', '00000079.shard', '00000080.shard', '00000081.shard', '00000082.shard', '00000083.shard', '00000084.shard', '00000085.shard', '00000086.shard', '00000087.shard', '00000088.shard', '00000089.shard', '00000090.shard', '00000091.shard', '00000092.shard', '00000093.shard', '00000094.shard', '00000095.shard', '00000096.shard', '00000099.shard', '00000101.shard', '00000103.shard', '00000104.shard', '00000105.shard', '00000108.shard', '00000110.shard', '00000115.shard', '00000116.shard', '00000121.shard'] has length of 107, expected 128.
----------------------------------------------------------------------
Ran 20 tests in 5.563s
```
"
tensorflow/tensorflow,2023-06-27 17:09:43,bug,UnsatisfiedLinkError: Failed to load native TensorFlow Lite methods ,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
binary
### Tensorflow Version
0.0.0-nightly-SNAPSHOT
### Custom Code
No
### OS Platform and Distribution
_No response_
### Mobile device
Android Samsung Galaxy J5
### Python version
_No response_
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current Behaviour?
Fatal Exception: java.lang.UnsatisfiedLinkError: Failed to load native TensorFlow Lite methods. Check that the correct native libraries are present, and, if using a custom native library, have been properly loaded via System.loadLibrary():
java.lang.UnsatisfiedLinkError: dlopen failed: cannot locate symbol ""__register_atfork"" referenced by ""libtensorflowlite_jni.so""
This is reproducible on lot of android devices running android version 5,6,7
This is happening in the nightly snapshots from probably last 2 weeks. Did not encounter this in previous nightly snapshots.
### Standalone code to reproduce the issue
```shell
val nnApiOption = NnApiDelegate.Options()
nnApiOption.setUseNnapiCpu(true)
val nnApiDelegate = NnApiDelegate(nnApiOption)
```
### Relevant log output
2023-06-28 10:52:35.629 29536-29620 InterpreterApi I Didn't load native library: tensorflowlite_jni
2023-06-28 10:52:35.637 29536-29620 InterpreterApi I Didn't load native library: tensorflowlite_jni_stable
2023-06-28 10:52:35.638 29536-29620 InterpreterApi I Didn't load native library: tensorflowlite_jni_gms_client
"
tensorflow/tensorflow,2023-06-27 09:22:41,bug,The relationship between the parameters of Conv2D is unclear,"Click to expand!
### Issue Type
Documentation Bug
### Have you reproduced the bug with TF nightly?
No
### Source
source
### Tensorflow Version
tf2.12.0
### Custom Code
Yes
### OS Platform and Distribution
MacOs
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current Behaviour?
```
ValueError: `strides > 1` not supported in conjunction with `dilation_rate > 1`. Received: strides=[2, 2] and dilation_rate=[4, 5]
```
The relationship between these two parameters is not clearly defined in the documentation, and it is not certain. Be unaware of that `strides > 1` not supported in conjunction with `dilation_rate > 1`.
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
from tensorflow.python.keras.layers import Conv2D
input_tensor = tf.random.normal(shape=(1, 32, 32, 3))
x = Conv2D(filters=2, kernel_size=(1,1), strides=(2,2), padding=""same"", use_bias=False, dilation_rate=(4, 5))(input_tensor)
print(x.shape)
```
### Relevant log output
_No response_ "
tensorflow/tensorflow,2023-06-26 17:39:13,bug,Not initialized delegate kernel after tflite conversion,"### 1. System information
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):
Windows 11
- TensorFlow installation (pip package or built from source):
pip package
- TensorFlow library (version, if pip package or github SHA, if built from source):
TensorFlow 2.10.0
### 2. Code
Code to reproduce my issue is attached to this issue.
[tf_issue.zip](https://github.com/tensorflow/tensorflow/files/11871912/tf_issue.zip)
### 3. Failure after conversion
```
File ""tf_issue\\test_ocr.py"", line 62, in __call__
self._interpreter.invoke()
File ""...\\venv\\lib\\site-packages\\tensorflow\\lite\\python\\interpreter.py"", line 917, in invoke
self._interpreter.Invoke()
RuntimeError: Current implementation only supports equal length strides in the row and column dimensions.Delegate kernel was not initializedNode number 510 (TfLiteFlexDelegate) failed to prepare.
```
### 5. (optional) Any other info / logs
Hello,
I downloaded the OCR model called **en_PP-OCRv3_rec_infer** from the [Paddle repository](https://github.com/PaddlePaddle/PaddleOCR). To prepare it for my purposes, I converted it into the ONNX format and optimized it, following the guidelines provided [here](https://github.com/PaddlePaddle/Paddle2ONNX/blob/develop/README_en.md#command-line-conversion). To ensure compatibility, I defined a static input/output size for the model.
Subsequently, I proceeded to convert the ONNX format to TFLite using this [repository](https://github.com/sithu31296/PyTorch-ONNX-TFLite/tree/master#onnx-to-tf). Once the conversion was complete, I loaded the resulting .tflite model into [Neutron](https://netron.app/) without any issues, as it successfully read and visualized the model.
However, the problem arises when I attempt to test this model using Python (3.10). The attached zip file contains the code, the model itself, and a sample testing image (It also contain a requirements file with all the packages of my environment).
In my case, I utilized an input_shape_dict of ""{'x': [1, 3, 48, 320]}"" and exported the model in both fp16 and fp32 formats. I also experimented with opset_versions 10 and 16. However, despite these attempts, I encountered the reported failure repeatedly.
"
tensorflow/tensorflow,2023-06-24 15:13:55,bug,Requested feature_data_ size 536907080 doesn't match 1960; Feature generation failed;,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
source
### Tensorflow Version
V2.8
### Custom Code
Yes
### OS Platform and Distribution
_No response_
### Mobile device
_No response_
### Python version
_No response_
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current Behaviour?
Hello Together,
I'm having a Problem with the micro_speech example for arduino from this repo: https://github.com/tensorflow/tflite-micro-arduino-examples/tree/main/examples/micro_speech
When trying to use this example with a new trained model from this jupyter noteboobk: https://github.com/tensorflow/tflite-micro/tree/main/tensorflow/lite/micro/examples/micro_speech/train
I always get the same error message:
Requested feature_data_ size 536907080 doesn't match 1960
Feature generation failed
The only thing i changed in the notebook was the tensorflow version. This is because this notebook was using 1.x Version which is no longer supported by colab and i changed it to work with the latest 2.x version
Can anyone help here?
Greetings,
Patrick
### Standalone code to reproduce the issue
```shell
https://github.com/tensorflow/tflite-micro-arduino-examples/tree/main/examples/micro_speech
https://github.com/tensorflow/tflite-micro/tree/main/tensorflow/lite/micro/examples/micro_speech/train
```
### Relevant log output
_No response_ "
tensorflow/tensorflow,2023-06-23 08:51:18,bug,"Model runs without error in Tensorflow, but crashes with a segmentation fault in TFLite","### 1. System information
- OS Platform and Distribution: Ubuntu 20.04.4 LTS
- TensorFlow installation: pip
- TensorFlow library: 2.12.0
- TFLite runtime: 2.12.0
### 2. Code
The model is exported from PyTorch using ONNX. I have not included the PyTorch code below for brevity's sake (and because it is used for an active Kaggle competition); you can download the saved Keras model [here](https://cloud.ilabt.imec.be/index.php/s/Dgpi9SQTcyc23wm). The TFLite conversion code is given below, but you can also download the TFLite model [here](https://cloud.ilabt.imec.be/index.php/s/Dgpi9SQTcyc23wm) (same link).
Below is the code to create and save the Keras model from two PyTorch models `feat_gen` and `model`, converted using ONNX:
```python
class TFInferModel(tf.Module):
def __init__(self):
super(TFInferModel, self).__init__()
self.feat_gen = tf.saved_model.load(""feat_gen.pb"")
self.model = tf.saved_model.load(""model.pb"")
self.feat_gen.trainable = False
self.model.trainable = False
@tf.function(input_signature=[tf.TensorSpec(shape=[None, 126], dtype=tf.float32, name=""inputs"")])
def call(self, inputs):
output_tensors = {}
# Add batch dimension.
inputs = inputs[None]
# Process using ported PyTorch model.
features = self.feat_gen(inputs=inputs)[""outputs""]
outputs = self.model(inputs=features)[""outputs""]
# Remove batch dimension.
outputs = outputs[0]
output_tensors[""outputs""] = outputs
return output_tensors
tf_model = TFInferModel()
tf.saved_model.save(tf_model, ""tf_model"", signatures={""serving_default"": tf_model.call})
```
The model can be loaded in Keras and run:
```python
model = tf.saved_model.load(""tf_model"")
inputs = tf.zeros((100, 126), dtype=tf.float32)
output = model.call(inputs=inputs)
```
It can also be converted to TFLite:
```python
converter = tf.lite.TFLiteConverter.from_saved_model(""tf_model"")
tf_lite_model = converter.convert()
output_path = ""model.tflite""
with open(output_path, ""wb"") as f:
f.write(tf_lite_model)
```
And finally the code for TFLite inference:
```python
interpreter = tflite.Interpreter(model_path=""model.tflite"")
prediction_fn = interpreter.get_signature_runner(""serving_default"")
inputs = np.zeros((100, 126), dtype=np.float32)
output = prediction_fn(inputs=inputs)
```
### 3. Failure after conversion
The Keras inference code runs without issue. The TFLite inference code crashes immediately with a segmentation fault (no further info is given)."
tensorflow/tensorflow,2023-06-22 22:29:39,bug,"Crashes in model.save, wrapt error","Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
No
### Source
binary
### Tensorflow Version
2.12
### Custom Code
No
### OS Platform and Distribution
Fodera Linux
### Mobile device
_No response_
### Python version
3.11
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current Behaviour?
Crashed when calling model.save()
See log below.
Worked after deinstalling tensorflow and wrapt. wrapt was 1.15.x
and installing tensorflow and wrapt==1.14.1
The problem is that when installing tensorflow, the wrapt 1.15.x is installed automatically and this is not playing with tensorflow.
### Standalone code to reproduce the issue
```shell
model.save('Modelname')
Causes the problem for any trained network.
```
### Relevant log output
```shell
Traceback (most recent call last):
File ""ModelPredictorTraining.py"", line 1415, in
run_hparam_on_grid(branched_model_1,
File ""ModelPredictorTraining.py"", line 1403, in run_hparam_on_grid
fitted_model.save('Model-name')
File ""/anaconda3/envs/tf2/lib/python3.11/site-packages/keras/utils/traceback_utils.py"", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File ""/anaconda3/envs/tf2/lib/python3.11/site-packages/tensorflow/python/trackable/data_structures.py"", line 823, in __getattribute__
return super().__getattribute__(name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
```
"
tensorflow/tensorflow,2023-06-21 16:46:50,bug,tf.mul after tf.split + tf.sigmoid produces wrong numerical results with MKL enabled,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
No
### Source
binary
### Tensorflow Version
intel-tensorflow 2.8 - 2.12
### Custom Code
No
### OS Platform and Distribution
Ubuntu 22.04
### Mobile device
_No response_
### Python version
3.10
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current Behaviour?
When running with an MKL enabled tensorflow (e.g. intel-tensorflow from pypi) (or self-compiled with `--config=mkl`). Starting with tensorflow 2.8.0 up until 2.12.0 The attached code produces the wrong numerical result. (1.7615 vs expected 2.6439).
If line 25 is changed to `m = sig * (b + 0.0)` one can get the correct result.
This issue does not occur if installing ""vanilla"" tesorflow from pip with `pip install tensorflow`.
This issue also does not occur if one uses `tf.exp` or `tf.log` instead of `tf.sigmoid`.
### Standalone code to reproduce the issue
```shell
#!/usr/bin/env python3
import math
import tensorflow as tf
import numpy as np
def sigmoid(x):
return 1 / (1 + math.exp(-x))
data = [[[2.0, 3.0]]]
tf.compat.v1.disable_eager_execution()
s = tf.compat.v1.Session()
p = tf.compat.v1.placeholder(dtype=tf.float32)
a, b = tf.split(p, 2, axis=2)
sig = tf.sigmoid(a)
m = sig * b
out = s.run([p, m], feed_dict={p: data})
print(out)
print('computed: ', out[-1][0,0,0])
print('expected: ', sigmoid(data[0][0][0]) * data[0][0][1])
```
### Relevant log output
```shell
2023-06-21 18:37:35.713027: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-06-21 18:37:35.715264: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting:
[array([[[2., 3.]]], dtype=float32), array([[[1.7615942]]], dtype=float32)]
computed: 1.7615942
expected: 2.642391233933647
```
"
tensorflow/tensorflow,2023-06-20 19:30:00,bug,fit() fails with CUDNN_STATUS_BAD_PARAM when using Conv3D and multi-GPU MirroredStrategy,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
binary
### Tensorflow Version
v1.12.1-95675-g47602c0bad8 2.14.0-dev20230620
### Custom Code
Yes
### OS Platform and Distribution
Rocky Linux release 8.6 (Green Obsidian)
### Mobile device
_No response_
### Python version
3.8.10
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
cuda_11.8.r11.8/compiler.31833905_0 / cuDNN version 8600
### GPU model and memory
4 NVIDIA A100s w/ 80GB each
### Current Behaviour?
When executing a model fit that includes a `Conv3D` layer on multiple GPUs, I'm encountering a `CUDNN_STATUS_BAD_PARAM` error in the gradient computation step. No errors occur when running on a single GPU, nor when I swap out `Conv3D` with `AveragePooling3D` or `Conv2D`. However, `Conv3DTranspose` also fails.
```none
(0) UNKNOWN: CUDNN_STATUS_BAD_PARAM
in tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc(3549): 'tensor' CUDNN_BACKEND_TENSOR_DESCRIPTOR: Check and Set the CUDNN_ATTR_TENSOR_DIMENSIONS Correctly
[[{{node gradient_tape/replica_2/model/conv3d/Conv3D/Conv3DBackpropFilterV2}}]]
[[div_no_nan/ReadVariableOp_1/_52]]
[[group_deps/_95]]
[[Adam/update_2_2/AssignAddVariableOp/_119]]
[[group_deps/_103]]
```
With the `Graph execution error` traceback:
```none
Traceback (most recent call last):
File ""conv3_multi_gpu_fail_repro.py"", line 25, in
model.fit(x_data, y_data, batch_size=1, epochs=1, verbose=1)
File ""/***/env/lib/python3.8/site-packages/keras/src/utils/traceback_utils.py"", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File ""/***/env/lib/python3.8/site-packages/tensorflow/python/eager/execute.py"", line 53, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.UnknownError: Graph execution error:
Detected at node gradient_tape/replica_3/model/conv3d/Conv3D/Conv3DBackpropFilterV2 defined at (most recent call last):
File ""/usr/lib/python3.8/threading.py"", line 890, in _bootstrap
self._bootstrap_inner()
File ""/usr/lib/python3.8/threading.py"", line 890, in _bootstrap
self._bootstrap_inner()
File ""/usr/lib/python3.8/threading.py"", line 932, in _bootstrap_inner
self.run()
File ""/usr/lib/python3.8/threading.py"", line 890, in _bootstrap
self._bootstrap_inner()
File ""/usr/lib/python3.8/threading.py"", line 932, in _bootstrap_inner
self.run()
File ""/***/env/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1348, in run_step
outputs = model.train_step(data)
File ""/usr/lib/python3.8/threading.py"", line 890, in _bootstrap
self._bootstrap_inner()
File ""/usr/lib/python3.8/threading.py"", line 932, in _bootstrap_inner
self.run()
File ""/***/env/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1348, in run_step
outputs = model.train_step(data)
File ""/***/env/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1129, in train_step
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
File ""/usr/lib/python3.8/threading.py"", line 890, in _bootstrap
self._bootstrap_inner()
File ""/usr/lib/python3.8/threading.py"", line 932, in _bootstrap_inner
self.run()
File ""/***/env/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1348, in run_step
outputs = model.train_step(data)
File ""/***/env/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1129, in train_step
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
File ""/***/env/lib/python3.8/site-packages/keras/src/optimizers/optimizer.py"", line 543, in minimize
grads_and_vars = self.compute_gradients(loss, var_list, tape)
File ""/usr/lib/python3.8/threading.py"", line 890, in _bootstrap
self._bootstrap_inner()
File ""/usr/lib/python3.8/threading.py"", line 932, in _bootstrap_inner
self.run()
File ""/***/env/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1348, in run_step
outputs = model.train_step(data)
File ""/***/env/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1129, in train_step
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
File ""/***/env/lib/python3.8/site-packages/keras/src/optimizers/optimizer.py"", line 543, in minimize
grads_and_vars = self.compute_gradients(loss, var_list, tape)
File ""/***/env/lib/python3.8/site-packages/keras/src/optimizers/optimizer.py"", line 276, in compute_gradients
grads = tape.gradient(loss, var_list)
```
I'm running from the `tensorflow/tensorflow:nightly-gpu` docker image.
### Standalone code to reproduce the issue
```python
import tensorflow as tf
from tensorflow.keras import layers, models
input_shape = (28, 28, 28, 1)
num_samples = 10
x_data = tf.random.uniform((num_samples, *input_shape), 0, 1)
y_data = tf.random.uniform((num_samples, *input_shape), 0, 1)
multi_gpu=True # <== fails
#multi_gpu=False # <== works
devices = [] if multi_gpu else ['/gpu:0']
mirrored_strategy = tf.distribute.MirroredStrategy(devices=devices)
print(f""{mirrored_strategy.num_replicas_in_sync} replica(s)"")
with mirrored_strategy.scope():
inputs = layers.Input(shape=input_shape)
outputs = layers.Conv3D(1, 1)(inputs) # <== fails
#outputs = layers.AveragePooling3D(1)(inputs) # <== works
#outputs = layers.Conv2D(1, 1)(inputs) # <== works
model = models.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss='binary_crossentropy')
model.fit(x_data, y_data, batch_size=1, epochs=1, verbose=1)
```
### Relevant log output
_No response_ "
tensorflow/tensorflow,2023-06-19 19:28:28,bug,FFT produces wrong results when using multiple GPUs with MirroredStrategy,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
binary
### Tensorflow Version
2.12.0 2.14.0-dev20230619
### Custom Code
No
### OS Platform and Distribution
Linux Ubuntu 18.04
### Mobile device
_No response_
### Python version
3.9.16
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
11.8.0 / 8.6.0.163
### GPU model and memory
_No response_
### Current Behaviour?
Using TensorFlow FFT in a Keras model will produce incorrect results when using MirroredStrategy and multiple GPUs.
This is not an accuracy issue. The results of consecutive calls seem to be either correct or garbage.
I created a test Keras model that has one layer that does FFT. There is also a reference model using a DFT layer that is used to verify that incorrect behavior only happens when using tf.signal.fft.
Attached is a test application that runs both models in different combinations of MirroredStrategy/default strategy and eager/graph execution.
MirroredStrategy and graph execution is the combination that produces the error. At least two GPUs are required to reproduce the problem.
The output MAE loss is around 6.5, which translates to 650% error. (The absolute value of each entry in the correct output is 1.0.)
I think it's not a user error, but if it is, there should be an error or warning instead of incorrect results.
I was able to reproduce the issue with all TF fft variants (tf.signal.fft, tf.signal.rfft, tf.signal.stft, tf.signal.fft2d)
### Standalone code to reproduce the issue
```shell
import tensorflow as tf
import numpy as np
from scipy.linalg import dft
from math import sqrt
# Layer that does tf.signal.fft operation
class FFTLayer(tf.keras.layers.Layer):
def call(self, x):
fx = tf.signal.fft(x)
return fx
# Layer that returns same results as tf.signal.fft op, but
# uses slower direct computation of DFT, implemented as matrix multiply.
class MatrixDFTLayer(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
self.dft = tf.cast(dft(1024), tf.complex64)
def call(self, x):
fx = self.dft @ tf.transpose(x)
return tf.transpose(fx)
def create_model(use_mirrored_strategy: bool = True,
run_eagerly: bool = True,
layer_to_use: tf.keras.layers.Layer = FFTLayer) -> None:
print(f""\\ncreate model with: use_mirrored_strategy: {use_mirrored_strategy}, "",
f""run_eagerly: {run_eagerly}, "",
f""layer_to_use: {layer_to_use}"")
if use_mirrored_strategy:
distribution_strategy = tf.distribute.MirroredStrategy()
else:
distribution_strategy = tf.distribute.get_strategy()
with distribution_strategy.scope():
ins = tf.keras.layers.Input([1024], dtype=tf.complex64)
x = layer_to_use()(ins)
model = tf.keras.Model(inputs=ins, outputs=x)
model.compile(
loss=tf.keras.losses.MeanAbsoluteError(),
run_eagerly=run_eagerly
)
return model
def create_data(fft_size, batch_size, num_steps):
num_examples = num_steps * batch_size
# y data is a complex vector of all (1/sqrt(2), (1/sqrt(2)j)
train_y = np.ones([fft_size], np.float32)
train_y = (1/sqrt(2))*train_y + (1/sqrt(2))*1j*train_y
# abs mean is 1 -> MAE magnitude should be compared to 1
print(""train_y mean: "", tf.reduce_mean(tf.abs(train_y)))
# use inverse transform to create input data
# fft(train_x) will produce train_y
train_x = tf.signal.ifft(train_y)
# clone data to get larger training set
train_y = train_y[tf.newaxis, ...]
train_x = train_x[tf.newaxis, ...]
train_x = tf.tile(train_x, [num_examples, 1])
train_y = tf.tile(train_y, [num_examples, 1])
return train_x, train_y
fft_size = 1024
batch_size = 9
num_steps = 100
train_x, train_y = create_data(fft_size, batch_size, num_steps)
# Test cases with MatrixDFTLayer
# These are all ok, MAE close to 0.0
# ok
model = create_model(use_mirrored_strategy=False, run_eagerly=False, layer_to_use=MatrixDFTLayer)
loss = model.evaluate(train_x, train_y, batch_size=batch_size, verbose=0)
print(f""loss: {loss}"")
# ok
model = create_model(use_mirrored_strategy=False, run_eagerly=True, layer_to_use=MatrixDFTLayer)
loss = model.evaluate(train_x, train_y, batch_size=batch_size, verbose=0)
print(f""loss: {loss}"")
# ok
model = create_model(use_mirrored_strategy=True, run_eagerly=False, layer_to_use=MatrixDFTLayer)
loss = model.evaluate(train_x, train_y, batch_size=batch_size, verbose=0)
print(f""loss: {loss}"")
# Test Cases using TF FFT. These fail when using MirroredStrategy.
# ok
model = create_model(use_mirrored_strategy=False, run_eagerly=False, layer_to_use=FFTLayer)
loss = model.evaluate(train_x, train_y, batch_size=batch_size, verbose=0)
print(f""loss: {loss}"")
# ok
model = create_model(use_mirrored_strategy=False, run_eagerly=True, layer_to_use=FFTLayer)
loss = model.evaluate(train_x, train_y, batch_size=batch_size, verbose=0)
print(f""loss: {loss}"")
# fail,
model = create_model(use_mirrored_strategy=True, run_eagerly=False,layer_to_use= FFTLayer)
loss = model.evaluate(train_x, train_y, batch_size=batch_size, verbose=0)
print(f""loss: {loss}"")
```
### Relevant log output
```shell
train_y mean: tf.Tensor(1.0, shape=(), dtype=float32)
create model with: use_mirrored_strategy: False, run_eagerly: False, layer_to_use:
loss: 0.0
create model with: use_mirrored_strategy: False, run_eagerly: True, layer_to_use:
loss: 0.0
create model with: use_mirrored_strategy: True, run_eagerly: False, layer_to_use:
loss: 0.0
create model with: use_mirrored_strategy: False, run_eagerly: False, layer_to_use:
loss: 0.0
create model with: use_mirrored_strategy: False, run_eagerly: True, layer_to_use:
loss: 0.0
create model with: use_mirrored_strategy: True, run_eagerly: False, layer_to_use:
loss: 6.451958656311035
```
"
tensorflow/tensorflow,2023-06-19 11:45:23,bug,F1 score error on multi class data,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
binary
### Tensorflow Version
v1.12.1-95639-g08bd7e1a8e5 2.14.0-dev20230618
### Custom Code
Yes
### OS Platform and Distribution
OS Ventura 13.0.1
### Mobile device
_No response_
### Python version
3.8
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current Behaviour?
Implementing the F1 score available in the nightly builds on multi-class data such as below:
```
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics= tf.keras.metrics.F1Score())
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
```
triggers the following error:
```
Epoch 1/10
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[8], line 5
1 model.compile(optimizer='adam',
2 loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
3 metrics= tf.keras.metrics.F1Score())
----> 5 history = model.fit(train_images, train_labels, epochs=10,
6 validation_data=(test_images, test_labels))
File /opt/homebrew/lib/python3.8/site-packages/keras/src/utils/traceback_utils.py:70, in filter_traceback..error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File /var/folders/f5/mkqkf_0d42qcsqc37hd_y0hm0000gn/T/__autograph_generated_fileb8tcgui2.py:15, in outer_factory..inner_factory..tf__train_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
ValueError: in user code:
File ""/opt/homebrew/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1338, in train_function *
return step_function(self, iterator)
File ""/opt/homebrew/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1322, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ""/opt/homebrew/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1303, in run_step **
outputs = model.train_step(data)
File ""/opt/homebrew/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1085, in train_step
return self.compute_metrics(x, y, y_pred, sample_weight)
File ""/opt/homebrew/lib/python3.8/site-packages/keras/src/engine/training.py"", line 1179, in compute_metrics
self.compiled_metrics.update_state(y, y_pred, sample_weight)
File ""/opt/homebrew/lib/python3.8/site-packages/keras/src/engine/compile_utils.py"", line 605, in update_state
metric_obj.update_state(y_t, y_p, sample_weight=mask)
File ""/opt/homebrew/lib/python3.8/site-packages/keras/src/utils/metrics_utils.py"", line 77, in decorated
update_op = update_state_fn(*args, **kwargs)
File ""/opt/homebrew/lib/python3.8/site-packages/keras/src/metrics/base_metric.py"", line 140, in update_state_fn
return ag_update_state(*args, **kwargs)
File ""/opt/homebrew/lib/python3.8/site-packages/keras/src/metrics/f_score_metrics.py"", line 176, in update_state **
y_true = tf.convert_to_tensor(y_true, dtype=self.dtype)
ValueError: Tensor conversion requested dtype float32 for Tensor with dtype uint8:
```
I've tried with multiple multi-class datasets and the same error is returned. The F1 score page says it should work with multi-class data https://www.tensorflow.org/api_docs/python/tf/keras/metrics/F1Score. Is there something I've missed regarding its implementation for multi-class data (such as somewhere to specify the number of classes?) or is this a bug?
### Standalone code to reproduce the issue
```shell
Here is a Jupyter notebook with some example data from https://www.tensorflow.org/tutorials/images/cnn
https://drive.google.com/file/d/1tExJ80AktA87EmsExOPEMWsevoiQz4VX/view?usp=share_link
```
### Relevant log output
_No response_ "
tensorflow/tensorflow,2023-06-18 22:44:17,bug,Uncaught exception in ZMQStream callback when running your example notebooks using latest or nightly docker image,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
binary
### Tensorflow Version
v2.12.0-rc1-12-g0db597d0d75 2.12.0
### Custom Code
No
### OS Platform and Distribution
Linux gpu02 6.2.11-2-pve #1 SMP PREEMPT_DYNAMIC PVE 6.2.11-2 (2023-05-10T09:13Z) x86_64 x86_64 x86_64 GNU/Linux
### Mobile device
_No response_
### Python version
python3.8
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current Behaviour?
Occurs when running any of your example notebooks:
```
[E 22:36:50.295 NotebookApp] Uncaught exception in ZMQStream callback
Traceback (most recent call last):
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 584, in _run_callback
f = callback(*args, **kwargs)
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 308, in stream_callback
return callback(self, msg)
File ""/usr/local/lib/python3.8/dist-packages/notebook/services/kernels/handlers.py"", line 572, in _on_zmq_reply
super()._on_zmq_reply(stream, msg)
File ""/usr/local/lib/python3.8/dist-packages/notebook/base/zmqhandlers.py"", line 256, in _on_zmq_reply
self.write_message(msg, binary=isinstance(msg, bytes))
File ""/usr/local/lib/python3.8/dist-packages/tornado/websocket.py"", line 339, in write_message
return self.ws_connection.write_message(message, binary=binary)
File ""/usr/local/lib/python3.8/dist-packages/tornado/websocket.py"", line 1086, in write_message
fut = self._write_frame(True, opcode, message, flags=flags)
File ""/usr/local/lib/python3.8/dist-packages/tornado/websocket.py"", line 1061, in _write_frame
return self.stream.write(frame)
File ""/usr/local/lib/python3.8/dist-packages/tornado/iostream.py"", line 546, in write
self._handle_write()
File ""/usr/local/lib/python3.8/dist-packages/tornado/iostream.py"", line 976, in _handle_write
self._write_buffer.advance(num_bytes)
File ""/usr/local/lib/python3.8/dist-packages/tornado/iostream.py"", line 182, in advance
assert 0 < size <= self._size
AssertionError
[E 22:36:50.297 NotebookApp] Uncaught exception in zmqstream callback
Traceback (most recent call last):
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 634, in _handle_events
self._handle_recv()
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 663, in _handle_recv
self._run_callback(callback, msg)
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 584, in _run_callback
f = callback(*args, **kwargs)
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 308, in stream_callback
return callback(self, msg)
File ""/usr/local/lib/python3.8/dist-packages/notebook/services/kernels/handlers.py"", line 572, in _on_zmq_reply
super()._on_zmq_reply(stream, msg)
File ""/usr/local/lib/python3.8/dist-packages/notebook/base/zmqhandlers.py"", line 256, in _on_zmq_reply
self.write_message(msg, binary=isinstance(msg, bytes))
File ""/usr/local/lib/python3.8/dist-packages/tornado/websocket.py"", line 339, in write_message
return self.ws_connection.write_message(message, binary=binary)
File ""/usr/local/lib/python3.8/dist-packages/tornado/websocket.py"", line 1086, in write_message
fut = self._write_frame(True, opcode, message, flags=flags)
File ""/usr/local/lib/python3.8/dist-packages/tornado/websocket.py"", line 1061, in _write_frame
return self.stream.write(frame)
File ""/usr/local/lib/python3.8/dist-packages/tornado/iostream.py"", line 546, in write
self._handle_write()
File ""/usr/local/lib/python3.8/dist-packages/tornado/iostream.py"", line 976, in _handle_write
self._write_buffer.advance(num_bytes)
File ""/usr/local/lib/python3.8/dist-packages/tornado/iostream.py"", line 182, in advance
assert 0 < size <= self._size
AssertionError
Exception in callback BaseAsyncIOLoop._handle_events(33, 1)
handle:
Traceback (most recent call last):
File ""/usr/lib/python3.8/asyncio/events.py"", line 81, in _run
self._context.run(self._callback, *self._args)
File ""/usr/local/lib/python3.8/dist-packages/tornado/platform/asyncio.py"", line 206, in _handle_events
handler_func(fileobj, events)
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 634, in _handle_events
self._handle_recv()
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 663, in _handle_recv
self._run_callback(callback, msg)
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 584, in _run_callback
f = callback(*args, **kwargs)
File ""/usr/local/lib/python3.8/dist-packages/zmq/eventloop/zmqstream.py"", line 308, in stream_callback
return callback(self, msg)
File ""/usr/local/lib/python3.8/dist-packages/notebook/services/kernels/handlers.py"", line 572, in _on_zmq_reply
super()._on_zmq_reply(stream, msg)
File ""/usr/local/lib/python3.8/dist-packages/notebook/base/zmqhandlers.py"", line 256, in _on_zmq_reply
self.write_message(msg, binary=isinstance(msg, bytes))
File ""/usr/local/lib/python3.8/dist-packages/tornado/websocket.py"", line 339, in write_message
return self.ws_connection.write_message(message, binary=binary)
File ""/usr/local/lib/python3.8/dist-packages/tornado/websocket.py"", line 1086, in write_message
fut = self._write_frame(True, opcode, message, flags=flags)
File ""/usr/local/lib/python3.8/dist-packages/tornado/websocket.py"", line 1061, in _write_frame
return self.stream.write(frame)
File ""/usr/local/lib/python3.8/dist-packages/tornado/iostream.py"", line 546, in write
self._handle_write()
File ""/usr/local/lib/python3.8/dist-packages/tornado/iostream.py"", line 976, in _handle_write
self._write_buffer.advance(num_bytes)
File ""/usr/local/lib/python3.8/dist-packages/tornado/iostream.py"", line 182, in advance
assert 0 < size <= self._size
AssertionError
```
### Standalone code to reproduce the issue
```shell
Run any of your Jupyter example in your docker image.
```
### Relevant log output
_No response_ "
tensorflow/tensorflow,2023-06-13 19:41:41,bug,W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudnn.so.8'; dlerror: libcudnn.so.8: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
source
### Tensorflow Version
2.8.0
### Custom Code
No
### OS Platform and Distribution
Ubuntu 22.04
### Mobile device
_No response_
### Python version
Python 3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:39:03)
### Bazel version
bazel 5.3.2
### GCC/Compiler version
gcc (Ubuntu 11.3.0-1ubuntu1~22.04.1) 11.3.0
### CUDA/cuDNN version
11.2/8 in conda env
### GPU model and memory
laptop 3080 RTX
### Current Behaviour?
A bug happened!
### Standalone code to reproduce the issue
```shell
I have tensorflow 2 installed and also from the code below I see cudnn 8 is found.
(samurai) mona@ard-gpu-01:~/samurai$ cat cudnn_test.py
import tensorflow as tf
sys_details = tf.sysconfig.get_build_info()
cuda_version = sys_details[""cuda_version""]
print(cuda_version)
cudnn_version = sys_details[""cudnn_version""]
print(cudnn_version)
cuda_compute_capabilities = sys_details[""cuda_compute_capabilities""]
print(cuda_compute_capabilities)
(samurai) mona@ard-gpu-01:~/samurai$ python cudnn_test.py
11.2
8
['sm_35', 'sm_50', 'sm_60', 'sm_70', 'sm_75', 'compute_80']
```
However, when I run the following command, I get an error that cudnn 8 is not found.
```
(samurai) mona@ard-gpu-01:~/samurai$ python train_samurai.py --config configs/samurai/samurai.txt --datadir data/duck/ --basedir . --expname duck_test --gpu 0
Namespace(config=None, basedir='.', expname='duck_test', batch_size=1024, learning_rate=0.0001, epochs=150, steps_per_epoch=2000, gpu='0', tpu=None, debug=False, profile=False, perturb=1.0, raw_noise_std=0.0, coarse_samples=64, linear_disparity_sampling=False, fine_samples=128, fourier_frequency=10, direction_fourier_frequency=4, random_encoding_offsets=True, fine_net_width=128, fine_net_depth=8, coarse_net_width=128, coarse_net_depth=6, appearance_latent_dim=32, diffuse_latent_dim=24, fix_diffuse=True, camera_distribution='sphere', use_fully_random_cameras=False, random_cameras_per_view=4, min_softmax_scaler=1.0, max_softmax_scaler=10.0, camera_weight_update_lr=0.3, camera_weight_update_momentum=0.75, bounding_size=0.5, resolution_factor=4, advanced_loss_done=80000, network_gradient_norm_clipping=0.1, camera_gradient_norm_clipping=-1, not_learn_r=False, not_learn_t=False, not_learn_f=False, edge_align_step=200, num_edge_align_steps=50, pretrained_camera_poses_folder=None, start_f_optimization=90000, start_fourier_anneal=0, finish_fourier_anneal=50000, slow_scheduler_decay=100000, brdf_schedule_decay=40000, lambda_smoothness=0.01, smoothness_bound_dividier=200, coarse_distortion_lambda=0.001, fine_distortion_lambda=0, normal_direction_lambda=0.005, mlp_normal_direction_lambda=0.0003, disable_posterior_scaling=False, disable_mask_uncertainty=True, lambda_brdf_decoder_smoothness=0.1, lambda_brdf_decoder_sparsity=0.01, camera_lr=0.003, camera_lr_decay=70, camera_regularization=0.1, aim_center_regularization=10.0, camera_rotation='lookat', learn_camera_offsets=True, basecolor_metallic=True, skip_decomposition=False, compose_on_white=True, rotating_object=False, single_env=False, brdf_preintegration_path='data/neural_pil/BRDFLut.hdr', illumination_network_path='data/neural_pil/illumination-network', datadir='data/duck/', max_resolution_dimension=400, test_holdout=16, dataset='samurai', load_gt_poses=False, canonical_pose=0, log_step=100, weights_epoch=5, validation_epoch=5, testset_epoch=150, video_epoch=50, lrate_decay=300, render_only=False)
2023-06-13 15:35:10.002485: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2023-06-13 15:35:10.022702: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudnn.so.8'; dlerror: libcudnn.so.8: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/lib:/home/mona/MVTec/HALCON-23.05-Progress//lib/x64-linux:/usr/local/cuda-11.7/lib64:/home/mona/onnx-tensorrt/build:
2023-06-13 15:35:10.022715: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1850] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.
Skipping registering GPU devices...
Utilizing 0 GPUs for training.
2023-06-13 15:35:11.092766: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
(70, 3)
Model: ""sequential_12""
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
MappingNetwork/Layer_0 (Den (None, 128) 16512
se)
MappingNetwork/Layer_1 (Den (None, 128) 16512
se)
MappingNetwork/Final (Dense (None, 768) 99072
)
reshape_1 (Reshape) (None, 2, 3, 128) 0
=================================================================
Total params: 132,096
Trainable params: 132,096
Non-trainable params: 0
_________________________________________________________________
Model: ""sequential_13""
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
ConditionalNetwork/Dense1 ( (None, 32) 192
Dense)
ConditionalNetwork/DenseFin (None, 256) 8448
al (Dense)
reshape_2 (Reshape) (None, 2, 128) 0
=================================================================
Total params: 8,640
Trainable params: 8,640
Non-trainable params: 0
_________________________________________________________________
Found ckpts []
Starting training in epoch 0 at step 0
Start Training...
/home/mona/anaconda3/envs/samurai/lib/python3.9/site-packages/tensorflow/python/framework/indexed_slices.py:444: UserWarning: Converting sparse IndexedSlices(IndexedSlices(indices=Tensor(""gradients/interpolate_bilinear/gather-bottom_right/GatherV2_grad/Reshape_1:0"", shape=(1024,), dtype=int32), values=Tensor(""gradients/interpolate_bilinear/gather-bottom_right/GatherV2_grad/Reshape:0"", shape=(1024, 1), dtype=float32), dense_shape=Tensor(""gradients/interpolate_bilinear/gather-bottom_right/GatherV2_grad/Cast:0"", shape=(2,), dtype=int32))) to a dense Tensor of unknown shape. This may consume a large amount of memory.
warnings.warn(
/home/mona/anaconda3/envs/samurai/lib/python3.9/site-packages/tensorflow/python/framework/indexed_slices.py:444: UserWarning: Converting sparse IndexedSlices(IndexedSlices(indices=Tensor(""gradients/interpolate_bilinear/gather-bottom_left/GatherV2_grad/Reshape_1:0"", shape=(1024,), dtype=int32), values=Tensor(""gradients/interpolate_bilinear/gather-bottom_left/GatherV2_grad/Reshape:0"", shape=(1024, 1), dtype=float32), dense_shape=Tensor(""gradients/interpolate_bilinear/gather-bottom_left/GatherV2_grad/Cast:0"", shape=(2,), dtype=int32))) to a dense Tensor of unknown shape. This may consume a large amount of memory.
warnings.warn(
/home/mona/anaconda3/envs/samurai/lib/python3.9/site-packages/tensorflow/python/framework/indexed_slices.py:444: UserWarning: Converting sparse IndexedSlices(IndexedSlices(indices=Tensor(""gradients/interpolate_bilinear/gather-top_right/GatherV2_grad/Reshape_1:0"", shape=(1024,), dtype=int32), values=Tensor(""gradients/interpolate_bilinear/gather-top_right/GatherV2_grad/Reshape:0"", shape=(1024, 1), dtype=float32), dense_shape=Tensor(""gradients/interpolate_bilinear/gather-top_right/GatherV2_grad/Cast:0"", shape=(2,), dtype=int32))) to a dense Tensor of unknown shape. This may consume a large amount of memory.
warnings.warn(
/home/mona/anaconda3/envs/samurai/lib/python3.9/site-packages/tensorflow/python/framework/indexed_slices.py:444: UserWarning: Converting sparse IndexedSlices(IndexedSlices(indices=Tensor(""gradients/interpolate_bilinear/gather-top_left/GatherV2_grad/Reshape_1:0"", shape=(1024,), dtype=int32), values=Tensor(""gradients/interpolate_bilinear/gather-top_left/GatherV2_grad/Reshape:0"", shape=(1024, 1), dtype=float32), dense_shape=Tensor(""gradients/interpolate_bilinear/gather-top_left/GatherV2_grad/Cast:0"", shape=(2,), dtype=int32))) to a dense Tensor of unknown shape. This may consume a large amount of memory.
warnings.warn(
25/2000 [..............................] - ETA: 42:41 - loss: 1.8824 - loss_camera: 7.2076 - fine_loss: 1.8019
```
```
### Relevant log output
```shell
(samurai) mona@ard-gpu-01:~/samurai$ lsb_release -a
LSB Version: core-11.1.0ubuntu4-noarch:security-11.1.0ubuntu4-noarch
Distributor ID: Ubuntu
Description: Ubuntu 22.04.2 LTS
Release: 22.04
Codename: jammy
(samurai) mona@ard-gpu-01:~/samurai$ uname -a
Linux ard-gpu-01 5.19.0-43-generic #44~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Mon May 22 13:39:36 UTC 2 x86_64 x86_64 x86_64 GNU/Linux
```
```
(samurai) mona@ard-gpu-01:~/samurai$ nvidia-smi
Tue Jun 13 15:38:44 2023
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 530.30.02 Driver Version: 530.30.02 CUDA Version: 12.1 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 3080 L... On | 00000000:01:00.0 Off | N/A |
| N/A 49C P8 17W / 90W| 102MiB / 16384MiB | 21% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
| 0 N/A N/A 2549 G /usr/lib/xorg/Xorg 95MiB |
| 0 N/A N/A 2983 G ...libexec/gnome-remote-desktop-daemon 3MiB |
+---------------------------------------------------------------------------------------+
```
```
(samurai) mona@ard-gpu-01:~/samurai$ nvcc --version
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2022 NVIDIA Corporation
Built on Wed_Jun__8_16:49:14_PDT_2022
Cuda compilation tools, release 11.7, V11.7.99
Build cuda_11.7.r11.7/compiler.31442593_0
```
The code is from this repo: https://github.com/google/samurai
```
"
tensorflow/tensorflow,2023-06-12 18:07:28,bug,"Unexpected failure when preparing tensor allocations: tensorflow/lite/kernels/pad.cc:79 SizeOfDimension(op_context->paddings, 0) != op_context->dims (4 != 1) Node number 0 (PAD) failed to prepare.","I have converted my DenseNet-121 model to model.tflite and when i am loading it to android app and trying to make predictions, it's giving following errors : java.lang.IllegalStateException: Internal error: Unexpected failure when preparing tensor allocations: tensorflow/lite/kernels/pad.cc:79 SizeOfDimension(op_context->paddings, 0) != op_context->dims (4 != 1)
Node number 0 (PAD) failed to prepare.
at org.tensorflow.lite.NativeInterpreterWrapper.allocateTensors(Native Method)
at org.tensorflow.lite.NativeInterpreterWrapper.allocateTensorsIfNeeded(NativeInterpreterWrapper.java:308)
at org.tensorflow.lite.NativeInterpreterWrapper.run(NativeInterpreterWrapper.java:248)
at org.tensorflow.lite.InterpreterImpl.runForMultipleInputsOutputs(InterpreterImpl.java:101)
at org.tensorflow.lite.Interpreter.runForMultipleInputsOutputs(Interpreter.java:77)
at org.tensorflow.lite.InterpreterImpl.run(InterpreterImpl.java:94)
at org.tensorflow.lite.Interpreter.run(Interpreter.java:77)
at com.example.appleleafdiseasedetection.DiseaseDetector$2.onClick(DiseaseDetector.java:72)
at android.view.View.performClick(View.java:7743)
at android.view.View.performClickInternal(View.java:7720)
at android.view.View.access$3700(View.java:854)
at android.view.View$PerformClick.run(View.java:29111)
at android.os.Handler.handleCallback(Handler.java:938)
at android.os.Handler.dispatchMessage(Handler.java:99)
at android.os.Looper.loopOnce(Looper.java:210)
at android.os.Looper.loop(Looper.java:299)
at android.app.ActivityThread.main(ActivityThread.java:8309)
at java.lang.reflect.Method.invoke(Native Method)
at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:556)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:1038) how can i solve it?
"
tensorflow/tensorflow,2023-06-12 10:32:27,bug,Documentation Bug:the description of padding,"Click to expand!
### Issue Type
Documentation Bug
### Have you reproduced the bug with TF nightly?
No
### Source
source
### Tensorflow Version
tf2.12.0
### Custom Code
Yes
### OS Platform and Distribution
MacOs
### Mobile device
_No response_
### Python version
3.9
### Bazel version
_No response_
### GCC/Compiler version
_No response_
### CUDA/cuDNN version
_No response_
### GPU model and memory
_No response_
### Current Behaviour?
#### Output
```
ValueError: The `padding` argument must be a tuple of 2 integers. Received: {'padding': 2}
```
#### Document
| `padding` | Int, or tuple of int (length 2), or dictionary. |
| --------- | ----------------------------------------------- |
### Standalone code to reproduce the issue
```shell
input_shape = (2, 2, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
x = ZeroPadding1D({'padding':2})(x)
print(x)
```
### Relevant log output
_No response_
"
tensorflow/tensorflow,2023-06-11 00:21:12,bug,Tensorflow Lite on Raspberry Pi,"Click to expand!
### Issue Type
Bug
### Have you reproduced the bug with TF nightly?
Yes
### Source
source
### Tensorflow Version
tflite_runtime-2.12.0-cp39-cp39-manylinux2014_armv7l.whl
### Custom Code
Yes
### OS Platform and Distribution
Linux raspbari14 6.1.32-v7+ #1656 SMP Wed Jun 7 11:31:19 BST 2023 armv7l GNU/Linux
### Mobile device
_No response_
### Python version
Python 3.9.2 (default, Mar 12 2021, 04:06:34)
### Bazel version
_No response_
### GCC/Compiler version
[GCC 10.2.1 20210110] on linux
### CUDA/cuDNN version
n/a
### GPU model and memory
n/a
### Current Behaviour?
Not working as documented:
`import tflite_runtime.interpreter as tflite`
How to import Tensorflow Lite in python scripts?
### Standalone code to reproduce the issue
```shell
$ python3 -m pip install tflite-runtime
Looking in indexes: https://pypi.org/simple, https://www.piwheels.org/simple
Collecting tflite-runtime
Downloading tflite_runtime-2.12.0-cp39-cp39-manylinux2014_armv7l.whl (1.8 MB)
|████████████████████████████████| 1.8 MB 2.6 MB/s
Requirement already satisfied: numpy>=1.19.2 in /home/chowkidar/.local/lib/python3.9/site-packages (from tflite-runtime) (1.23.1)
Installing collected packages: tflite-runtime
Successfully installed tflite-runtime-2.12.0
$ python3
Python 3.9.2 (default, Mar 12 2021, 04:06:34)
[GCC 10.2.1 20210110] on linux
Type ""help"", ""copyright"", ""credits"" or ""license"" for more information.
>>> import tensorflow
Traceback (most recent call last):
File """", line 1, in
ModuleNotFoundError: No module named 'tensorflow'
>>> import tflite_runtime.interpreter as tflite
Traceback (most recent call last):
File """", line 1, in
File ""/home/chowkidar/.local/lib/python3.9/site-packages/tflite_runtime/interpreter.py"", line 33, in
from tflite_runtime import _pywrap_tensorflow_interpreter_wrapper as _interpreter_wrapper
ImportError: /usr/lib/arm-linux-gnueabihf/libstdc++.so.6: version `GLIBCXX_3.4.29' not found (required by /home/chowkidar/.local/lib/python3.9/site-packages/tflite_runtime/_pywrap_tensorflow_interpreter_wrapper.so)
>>>
```
### Relevant log output
_No response_ "
microsoft/vscode,2023-09-29 07:30:00,bug,Give ms-python.debugpy access to portsAttributes proposal,The Python debugger extension has adopted the portsAttributes API to help us gain confidence in the API before finalization. Not granting that extension access to the API was an oversight.
microsoft/vscode,2023-09-28 21:18:51,bug,Verify fix/revision to Code Actions on Save,"tests https://github.com/microsoft/vscode/issues/194031 and new changes from https://github.com/microsoft/vscode/pull/194409. related to https://github.com/microsoft/vscode/issues/194397
added back old boolean values (since we reverted the changes for migration) in addition to supporting new enum values.
This is for both Code Actions in notebooks and in the editor:
1. Find `notebook.codeActionsOnSave` or `editor.codeActionsOnSave`
2. test each enum's behavior. note that `always` currently does not support code actions on auto save after delay. (easiest code actions to test would be `source.fixAll` and `source.organizeImports`. other code actions like `source.fixAll.eslint` and `source.removeUnusedImports` are supported as well.
3. test boolean behavior. (true and `explicit` should be the same, `false` and `never` should be the same. there are descriptions about future deprecation and behavior as well!)"
microsoft/vscode,2023-09-28 18:11:54,bug,Comments editor height is incorrect when window is resized to be small,"1. Resize window
2. Start a comment
3. :bug: as you can see from the scrollbar, the comment text doesn't fill the full editor
![Image](https://github.com/microsoft/vscode/assets/30305945/a2899e98-d88d-4f1b-9cb1-b6b6dac33a2d)
"
microsoft/vscode,2023-09-28 12:05:26,bug,[regression] CodeAction on save is broken,"Version: 1.83.0-insider
Commit: aad333b878b4cfce2f4152d48552fb6f980d7daf
* have the user setting spec'd below
* open `src/vs/editor/contrib/stickyScroll/test/browser/stickyScroll.test.ts`
* remove the semicolon on line 29
* save via `Cmd+S`, semi column isn't inserted
```
""editor.codeActionsOnSave"": {
""source.fixAll.eslint"": ""explicit"",
""source.removeUnusedImports"": ""explicit""
},
```"
microsoft/vscode,2023-09-28 08:26:59,bug,"Chat view `ERR Error: Invalid range: [1, 0)`","In one of my workspaces I see no more chat. The console shows the stacktraces below, the debugger stops like this
```
ERR Error: Invalid range: [1, 0)
at new k (offsetRange.ts:48:10)
at c.G (chatModel.ts:538:42)
at chatModel.ts:525:65
at Array.map ()
at c.F (chatModel.ts:524:19)
at new c (chatModel.ts:504:39)
at g.j (instantiationService.ts:119:18)
at g.createInstance (instantiationService.ts:85:18)
at O (chatServiceImpl.ts:325:43)
at O.getOrRestoreSession (chatServiceImpl.ts:404:15)
at s.U (chatViewPane.ts:114:54)
at s.render (paneview.ts:267:9)
at s.render (viewPane.ts:286:9)
at R.rb (viewPaneContainer.ts:778:9)
at u.value (viewPaneContainer.ts:532:87)
at c.z (event.ts:1138:13)
at c.A (event.ts:1149:9)
at c.fire (event.ts:1173:9)
at b.I (viewContainerModel.ts:661:41)
at b.H (viewContainerModel.ts:656:8)
at u.value (viewContainerModel.ts:361:118)
at c.z (event.ts:1138:13)
at c.fire (event.ts:1169:9)
at u.value (event.ts:152:97)
at l.z (event.ts:1138:13)
at l.A (event.ts:1149:9)
at l.fire (event.ts:1173:9)
at l.fire (event.ts:1335:11)
at s.setContext (contextKeyService.ts:345:29)
at u.reset (contextKeyService.ts:214:18)
at new u (contextKeyService.ts:203:8)
at s.createKey (contextKeyService.ts:290:10)
at w (contextKeyService.ts:599:20)
at g.invokeFunction (instantiationService.ts:68:11)
at y.n (commandService.ts:95:46)
at y.executeCommand (commandService.ts:60:17)
at p.$executeCommand (mainThreadCommands.ts:91:31)
at m.S (rpcProtocol.ts:456:17)
at m.Q (rpcProtocol.ts:441:32)
at m.M (rpcProtocol.ts:371:19)
at m.L (rpcProtocol.ts:297:10)
at u.value (rpcProtocol.ts:161:42)
at c.z (event.ts:1138:13)
at c.fire (event.ts:1169:9)
at r.fire (ipc.net.ts:650:19)
at Y.onmessage (localProcessExtensionHost.ts:581:40)
log.ts:441 ERR Invalid range: [1, 0): Error: Invalid range: [1, 0)
at new k (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:95:32662)
at c.G (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1575:1173)
at vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1575:827
at Array.map ()
at c.F (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1575:779)
at new c (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1575:326)
at g.j (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:640:1241)
at g.createInstance (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:640:733)
at O (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1576:8061)
at O.getOrRestoreSession (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1576:9336)
at s.U (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:2551:20103)
at s.render (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:227:65895)
at s.render (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1669:11445)
at R.rb (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:2537:37565)
at u.value (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:2537:34403)
at c.z (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:1902)
at c.A (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:1972)
at c.fire (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:2188)
at b.I (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:2321:122034)
at b.H (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:2321:122006)
at u.value (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:2321:116441)
at c.z (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:1902)
at c.fire (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:2119)
at u.value (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:85:49477)
at l.z (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:1902)
at l.A (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:1972)
at l.fire (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:2188)
at l.fire (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:3317)
at s.setContext (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:638:35504)
at u.reset (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:638:33917)
at new u (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:638:33812)
at s.createKey (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:638:34811)
at w (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:638:38446)
at g.invokeFunction (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:640:326)
at y.n (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1528:12391)
at y.executeCommand (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1528:11979)
at p.$executeCommand (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1537:552)
at m.S (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1551:18856)
at m.Q (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1551:18622)
at m.M (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1551:17715)
at m.L (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1551:16794)
at u.value (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1551:15597)
at c.z (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:1902)
at c.fire (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:2119)
at r.fire (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:616:14335)
at Y.onmessage (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:1652:1077)
```"
microsoft/vscode,2023-09-27 11:01:28,bug,[web][ff] web socket breaks when changing network,"At Gitpod we received bug requests that in FireFox (117-118) VS Code Web (1.82) sometimes became unresponsive. See [Loom](https://www.loom.com/share/c028fe3b10814f5f90b00255625eaac6?sid=5aa5fdaa-1d65-4581-bc45-5a1153dcafda) for what unresponsive means.
It is hard to reproduce, but I was able with following:
1. Leave FF tab in the background.
2. Trigger reconnects by toggling your wifi for instance.
3. After a while come back to FF tab.
Debugging web part showed that frontend is keep asking to replay missing messages, but server keeps replaying with the same message. Unfortunately I was not able to debug the server part.
"
microsoft/vscode,2023-09-27 08:29:52,bug,Extension magically got reenabled,"* I have rust-analyzer installed but disabled
* It's now enabled without me doing something
* I blame it on settings sync because yesterday I reenabled that and I have onboarded a bunch of clients (vscode.dev, separate mac, etc)
## Steps to Repro
- Open VS Code
- Install an extension A and disable it
- Install older version of the extension using `Install Another Version...` action
- Open VS Code using different user data directory but same extensions directory - `code --user-data-dir `
- Update the extension from this instance
🐛 Extension is enabled in other instance
Note that this is nothing to do with Settings Sync"
microsoft/vscode,2023-09-26 22:04:46,bug,Toggling file node in Comments view focuses comment,"Testing #194012
1. Checkout https://github.com/microsoft/vscode/pull/193812
2. Open comments view
3. Have `debug.contribution.ts` open and scrolled to the top of the file
4. Hit enter in the `debug.contribution.ts` node
5. :bug: I get scrolled to the first comment"
microsoft/vscode,2023-09-26 21:44:38,bug,Debug console with DWARF debugging isn't printing the expected value,"Testing #194071
See top left `a` and debug console `a`:
![Image](https://github.com/microsoft/vscode/assets/2193314/5c6ca24a-73e7-4424-8373-1d73ac57ce91)
"
microsoft/vscode,2023-09-26 21:18:55,bug,Specify what is in the event data for `env.onDidChangeShell`,"Testing #194028
'env.onDidChangeShell' tsdoc does not specify what the event data string will contain.
It looks like it's the path to the newly selected default shell."
microsoft/vscode,2023-09-26 18:10:14,bug,Seeing two action bars,"Testing #193991
![Image](https://github.com/microsoft/vscode/assets/900690/e1f21316-35ee-4c88-b1ef-320c66078411)
I am still trying to recap how I got there...
**Update:**
* have some pinned and non-pinned tabs
* open an empty group to the right
* drag the entire group to the right by dragging from empty space after the last tab
* you end up with 2 action bars visible"
microsoft/vscode,2023-09-26 18:04:35,bug,Debug toolbar with commandCenter limits command center search ,"Testing #194001
Followed the steps to turn on the debug toolbar with commandCenter options.
However, when the debugging started, clicking the area around the directory name did not trigger 'search files'. (the blank space at the right side of 'vscode-python' in terms of the attached screenshot.
I was able to trigger 'search files' only when pressing specifically on the directory name, in this case, 'vscode-python.
++ Nice feature by the way!
"
microsoft/vscode,2023-09-26 17:51:18,bug,Enable Source Mapped Stepping command is registered twice,"Testing #194071
1. Run Disable Source Mapped Stepping
2. Open command palette
3. :bug: there are two Enable Source Mapped Stepping commands (there's only one Disable Source Mapped Stepping command)
![Image](https://github.com/microsoft/vscode/assets/30305945/d4b152ad-684c-4af2-8733-80e820d1855c)
"
microsoft/vscode,2023-09-26 16:58:49,bug,insertFinalNewline breaks typings in first line in a new cell,"Re #194078, I was editing the endgame notebook in VS Code repo but it keeps moving the cursor to the next line, which breaks typing.
https://github.com/microsoft/vscode/assets/876920/be93070f-c41a-42ac-9afb-b0bdb050b552
It's very likely an issue with these two settings combined
* `""files.insertFinalNewline"": true`
* ""files.autoSave"": ""afterDelay""
We seems to have the same issue for files in text editor but untitled file doesn't suffer from this as it can't be auto saved. I wonder if we should have some special treatment for notebook cells, as it's very usual to create new cells, which are always empty at the beginning, typing in the first line is making it almost impossible. Maybe we could keep the cursor position for this scenario.
"
microsoft/vscode,2023-09-26 15:35:14,bug,Views entry is only present when English is the display language,"https://github.com/microsoft/vscode/issues/192271
English:
![Image](https://github.com/microsoft/vscode/assets/2193314/2622f6bd-11fc-413b-8aec-816b4e034fc1)
Korean:
![Image](https://github.com/microsoft/vscode/assets/2193314/46b0c4b1-d910-419f-a558-9e4f7194d182)
Daniel:
😕 "
microsoft/vscode,2023-09-26 14:07:28,bug,Settings Sync is uninstalling the extension that is installed from sources,"Settings Sync is uninstalling the extension that is installed from sources.
It needs complex steps to setup and reproduce. @hediet has this setup and is seeing this issue."
microsoft/vscode,2023-09-26 13:42:32,bug,Problematic Theme Styles,"White on gray is not readable. I think the explorer tree has a different behavior here.
![Image](https://github.com/microsoft/vscode/assets/2931520/d2a424a2-93c8-4a90-942c-b95ecda099b4)
"
microsoft/vscode,2023-09-26 13:06:15,bug,SCM Sync: Multiple select is enabled,"Testing #194016
It seems like multiple selection is enabled in this list/tree, though without any value. Let's disable it for now."
microsoft/vscode,2023-09-26 12:49:20,bug,Arrow up while editing comment goes to previous comment,"Testing #194011
Editing the second of two comment, the arrow up key navigates to the first comment instead of the previous line in the comment being edited.
https://github.com/microsoft/vscode/assets/9205389/39d6dc8d-e06b-4cd4-8468-cbd56a6ed2aa
"
microsoft/vscode,2023-09-26 09:38:25,bug,Settings description is wrong/swapped,"Testing #194077
* configure `""notebook.codeActionsOnSave"": { ""notebook.source.normalizeVariableNames"": ""explicit""},`
* 🐛 the description of the value is wrong
"
microsoft/vscode,2023-09-26 08:31:03,bug,Remove xtermTerminal.clearActiveSearchDecoration,"Version: 1.83.0-insider (user setup)
Date: 2023-09-26T06:31:13.416Z
![1](https://github.com/microsoft/vscode/assets/48614781/b0eceb8d-cbf9-43b8-b66a-847570b5f99a)
"
microsoft/vscode,2023-09-25 18:52:55,bug,icon aria label isn't updated/correct,"Testing #194040
It says settings gear regardless, making it seem to a screen reader user like the icon hasn't been successfully changed.
https://github.com/microsoft/vscode/assets/29464607/9bd2fbff-518c-432a-a1d5-b8f8f27a818e
"
microsoft/vscode,2023-09-25 14:52:55,bug,File > Open Recent > recent workspace no longet opens in new window when Ctrl or Shift used in VS Code Insiders,"
Type: Bug
Opening in new window straing from File > Open Recent menu is great tool to quickly get to another workspace wiithout loosing curren open one, and much faster then New Window and searching same workspace in new window.
So I hope this change is not intentional.
Steps to reproduce:
1. Have history of several opened workspaces
1. Open File > Open Recent submenu.
1. Press Ctrl or Shift and click on some of recent workspaces
1. Workspace in active window is replaces with selection from recent
Expecte behavior:
New window should be opened with new workspace instead of replacing workspace in active window.
This is behavior is reproducible in standalone(zip) Insiders intallation but not not reprodusible in standalone(zip) Stable intallation. However in stable Ctrl(Shift) sometimes does nothing, but never replaces current workspace.
VS Code version: Code - Insiders 1.83.0-insider (109e1f8d8afb754ed31317f79937a44e98d5063b, 2023-09-25T10:41:26.295Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz (8 x 2996)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|31.50GB (15.50GB free)|
|Process Argv||
|Screen Reader|no|
|VM|0%|
Extensions: none
"
microsoft/vscode,2023-09-25 13:55:47,bug,[Accessibility] Focus doesn't move to the symbol in terminal accessible view,"
Type: Bug
1. Open terminal in editor area.
1. Type `echo hello` and hit enter.
1. Type `echo world` and hit enter
1. Press alt+F2 to open accessible view
1. Press ctrl+shift+O to open symbol list and select `echo hello` and hit enter
* Note: the focus doesn't move to `echo hello`
VS Code version: Code - Insiders 1.83.0-insider (109e1f8d8afb754ed31317f79937a44e98d5063b, 2023-09-25T10:41:26.295Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1145G7 @ 2.60GHz (8 x 2611)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.71GB (6.53GB free)|
|Process Argv|--crash-reporter-id b05b88e5-8894-4031-ae34-fa034ebddea9|
|Screen Reader|yes|
|VM|0%|
Extensions (91)
Extension|Author (truncated)|Version
---|---|---
android-dev-ext|ade|1.3.2
aiprm-lang|AIP|0.0.2
Bookmarks|ale|13.4.1
openscad|Ant|1.2.1
spellright|ban|3.0.118
zoterolatex|bna|0.4.1
mermaid-markdown-syntax-highlighting|bpr|1.5.2
doxdocgen|csc|1.4.0
vscode-markdownlint|Dav|0.52.0
vscode-eslint|dba|2.4.2
vscode-quick-select|dba|0.2.9
vscode-deno|den|3.23.1
gitlens|eam|14.3.0
EditorConfig|Edi|0.16.4
prettier-vscode|esb|10.1.0
vscode-google-translate|fun|1.4.13
codespaces|Git|1.15.3
copilot|Git|1.115.437
copilot-chat|Git|0.8.2023092501
remotehub|Git|0.60.0
vscode-github-actions|git|0.26.2
vscode-pull-request-github|Git|0.72.0
overleaf-workshop|iam|0.2.4
cslpreview|igo|0.2.2
easy-snippet|inu|0.6.3
path-autocomplete|ion|1.25.0
latex-workshop|Jam|9.14.0
lilypond-syntax|jea|0.1.1
scheme|jea|0.2.0
better-cpp-syntax|jef|1.17.2
google-search|kam|0.0.1
vscode-lua-format|Koi|1.3.8
lilypond-formatter|lhl|0.2.3
lilypond-pdf-preview|lhl|0.2.8
lilypond-snippets|lhl|0.1.1
vslilypond|lhl|1.7.3
zotero|mbl|0.1.10
git-graph|mhu|1.30.0
vscode-docker|ms-|1.26.1
black-formatter|ms-|2023.4.1
flake8|ms-|2023.6.0
isort|ms-|2023.11.12681021
python|ms-|2023.16.0
vscode-pylance|ms-|2023.9.20
jupyter|ms-|2023.8.1002501831
jupyter-keymap|ms-|1.1.2
jupyter-renderers|ms-|1.0.17
vscode-jupyter-cell-tags|ms-|0.1.8
vscode-jupyter-slideshow|ms-|0.1.5
remote-containers|ms-|0.312.0
remote-ssh|ms-|0.106.4
remote-ssh-edit|ms-|0.86.0
remote-wsl|ms-|0.81.4
vscode-remote-extensionpack|ms-|0.24.0
azure-repos|ms-|0.36.0
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
js-debug-nightly|ms-|2023.9.2117
powershell|ms-|2023.6.0
remote-repositories|ms-|0.38.1
vscode-github-issue-notebooks|ms-|0.0.129
vscode-selfhost-test-provider|ms-|0.3.18
vscode-serial-monitor|ms-|0.10.0
vsliveshare|ms-|1.0.5883
autodocstring|njp|0.6.1
pandocciter|not|0.10.3
shiny-python|Pos|0.1.4
shinyuieditor|pos|0.4.3
quarto|qua|1.100.0
r-debugger|RDe|0.5.4
java|red|1.22.1
vscode-xml|red|0.26.1
r|REd|2.8.1
multi-command|ryu|1.6.0
vscode-deepl|soe|1.0.6
abc-music|sof|0.4.0
lua|sum|3.7.0
latex-utilities|tec|0.4.10
cmake|twx|0.0.17
errorlens|use|3.13.0
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
vscode-arduino|vsc|0.6.0
vscode-java-debug|vsc|0.54.0
vscode-java-dependency|vsc|0.23.1
vscode-java-pack|vsc|0.25.14
vscode-java-test|vsc|0.40.0
vscode-maven|vsc|0.42.0
markdown-all-in-one|yzh|3.5.1
grammarly|znc|0.22.1
(1 theme extensions excluded)
A/B Experiments
```
vsliv695:30137379
vsins829:30139715
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627:30244334
vslsvsres303:30308271
pythontb:30258533
pythonptprofiler:30281269
vshan820:30294714
vscod805cf:30301675
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30404738
py29gd2263:30784851
vsclangdf:30492506
c4g48928:30535728
dsvsc012:30540252
pynewext54:30618038
a9j8j154:30646983
showlangstatbar:30737417
ecj1e332:30687743
pythonfmttext:30716741
fixshowwlkth:30771523
showindicator:30805243
pythongtdpath:30726887
i26e3531:30792625
welcomedialog:30812478
pythonnosmt12:30779711
pythonidxpt:30768918
pythonnoceb:30776497
copilotsettingt:30808721
asynctok:30821568
dsvsc013:30777762
dsvsc014:30777825
diffeditorv2:30786206
pythonlinttype:30823781
pythonmpsinfo:30842935
dsvsc015:30821418
pythontestfixt:30826906
pythonfb280951:30830809
pythonregdiag:30842812
```
"
microsoft/vscode,2023-09-25 09:35:53,bug,Cannot read properties of undefined (reading 'id'),"* open a file from a pull request
* get an error saying id cannot be read from undefined
```
ERR Cannot read properties of undefined (reading 'id'): TypeError: Cannot read properties of undefined (reading 'id')
at vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:765:140066
at Object.h [as map] (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:12:10432)
at h.next ()
at P (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:7:2275)
at r.setActions (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:765:140001)
at d.Z (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:2412:20205)
at h.value (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:2412:20897)
at b.z (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:1902)
at b.fire (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:2119)
at b.resume (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:3188)
at vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/workbench/workbench.desktop.main.js:87:3469
```"
microsoft/vscode,2023-09-22 08:28:04,bug,Cannot read properties of null (reading 'b'),"```
ERR [uncaught exception in main]: Cannot read properties of null (reading 'b'): TypeError: Cannot read properties of null (reading 'b')
at h (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:103:24182)
at d.value (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:33:35781)
at $.z (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:1913)
at $.A (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:1983)
at $.fire (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:2199)
at d.value (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:106:42564)
at $.z (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:1913)
at $.fire (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:2130)
at J.setReady (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:105:100714)
at p.notifyReady (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:105:81450)
at Object.call (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:42:5018)
at I.s (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:40:5116)
at I.q (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:40:4639)
at d.value (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:40:4050)
at $.z (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:1913)
at $.A (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:1983)
at $.fire (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:2199)
at d.value (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:33:35891)
at $.z (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:1913)
at $.fire (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:2130)
at d.value (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:33:36099)
at $.z (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:1913)
at $.fire (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:35:2130)
at me (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:33:38543)
at IpcMainImpl.h (/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/out/vs/code/electron-main/main.js:36:96476)
at IpcMainImpl.emit (node:events:513:28)
```"
microsoft/vscode,2023-09-21 17:55:36,bug,active editor has commenting range is true even when it should not be,"I don't have a PR checked out and yet, it's `true`
![Image](https://github.com/microsoft/vscode/assets/29464607/54f4bb9b-d128-40ee-90cd-a83ca34e4e60)
![Image](https://github.com/microsoft/vscode/assets/29464607/e241bb2f-ffaa-43ed-8b3d-c70e440c01dd)
"
microsoft/vscode,2023-09-21 14:03:26,bug,Toggling tab pin row setting hides actions for me,Going from multi-row tabs to single-row tabs shows no editor actions.
microsoft/vscode,2023-09-20 18:36:34,bug,SCM Sync view: focus lost from the tree on mouse click,"Nice view 👍
Noticed that when you click on a file with the mouse, focus moves into the editor immediately. Maybe connect with @alexr00 and custom trees to learn how you can get opening behaviour that is consistent for free."
microsoft/vscode,2023-09-20 13:27:41,bug,Open editors view does not close editor that is selected anymore,"Type: Bug
On the right panel, where the opened files are listed, when you have a file selected and hover on another file it appears a close(x) icon, if you click that (x) the editor window that is closed is the file that is selected and not the file where you clicked (x). I keep repoen closed editor because of this recent issue.
VS Code version: Code 1.82.2 (Universal) (abd2f3db4bdb28f9e95536dfa84d8479f1eb312d, 2023-09-14T05:59:47.790Z)
OS version: Darwin arm64 22.1.0
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Apple M2 Pro (12 x 24)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|3, 4, 5|
|Memory (System)|16.00GB (0.04GB free)|
|Process Argv|--crash-reporter-id a4b55f5e-5134-402c-85da-b605e22d73f5|
|Screen Reader|no|
|VM|0%|
Extensions (61)
Extension|Author (truncated)|Version
---|---|---
better-comments|aar|3.0.2
alpine-js-intellisense|adr|1.2.0
laravel-extra-intellisense|ami|0.6.3
sidebar-markdown-notes|ass|1.2.0
laravel-blade-spacer|aus|2.1.3
laravel-docs|aus|1.10.0
tailwind-docs|aus|2.1.0
vscode-intelephense-client|bme|1.9.5
vscode-tailwindcss|bra|0.10.0
vscode-coloured-status-bar-problems|bra|0.2.0
vscode-better-align|cho|1.4.2
laravel-goto-view|cod|1.3.9
save-commands|dee|0.5.1
vscode-notes|dio|1.1.0
githistory|don|0.6.20
gitlens|eam|14.3.0
vscode-html-css|ecm|1.13.1
prettier-vscode|esb|10.1.0
restore-terminals|Eth|1.1.8
php-intellisense|fel|2.3.14
auto-rename-tag|for|0.1.10
code-runner|for|0.12.0
vscode-google-translate|fun|1.4.13
html-preview-vscode|geo|0.2.5
copilot|Git|1.113.423
copilot-chat|Git|0.7.1
copilot-labs|Git|0.15.1019
gitlab-workflow|Git|3.77.1
githd|hui|2.3.3
vscode-peacock|joh|4.2.2
vscode-inline-svg|kon|0.9.4
rainbow-csv|mec|3.7.1
php-namespace-resolver|Meh|1.1.9
git-graph|mhu|1.30.0
dotenv|mik|1.0.1
theme-monokai-pro-vscode|mon|1.2.1
empty-directory-extension|MRK|0.0.6
test-adapter-converter|ms-|0.1.8
resourcemonitor|mut|1.0.7
laravel-goto-components|nao|1.2.0
sftp|Nat|1.16.3
vscode-configurable-shortcuts|nor|1.0.3
laravel-blade|one|1.34.0
laravel5-snippets|one|1.17.0
material-icon-theme|PKi|4.30.1
vscode-thunder-client|ran|2.12.0
vscode-yaml|red|1.14.0
laravel-artisan|rya|0.0.31
vue-vscode-snippets|sdr|3.1.1
vscode-blade-formatter|shu|0.23.1
svg-preview|Sim|2.8.3
vscode-fileutils|sle|3.10.3
git-prefix|srm|1.3.1
workspace-explorer|tom|2.3.0
luna-paint|Tyr|0.16.0
remove-empty-lines|use|1.0.1
vscode-icons|vsc|12.5.0
volar|Vue|1.8.11
vscode-typescript-vue-plugin|Vue|1.8.11
vuetify-vscode|vue|0.2.0
php-debug|xde|1.33.0
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vswsl492:30256859
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vsdfh931cf:30280410
vshan820:30294714
vstes263cf:30335440
vscorecescf:30445987
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263:30792226
vsclangdc:30486549
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
3biah626:30602489
89544117:30613380
showlangstatbar:30737416
0bi6i642:30835152
03d35959:30757346
ecj1e332:30736112
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
asynctok:30821568
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015cf:30829746
```
"
microsoft/vscode,2023-09-19 16:27:29,bug,Terminal PS1 double,"
Does this issue occur when all extensions are disabled?: Yes
- VS Code Version: 1.82.2
- OS Version: Fedora 38 Workstation ediditon
### My bashrc :
```bash
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# Branch name
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \\(.*\\)/ (\\1)/'
}
# User specific environment
if ! [[ ""$PATH"" =~ ""$HOME/.local/bin:$HOME/bin:"" ]]
then
PATH=""$HOME/.local/bin:$HOME/bin:$PATH""
fi
export PATH
PS1=""[\\u@\\h \\W]\\[\\e[91m\\]\\$(parse_git_branch)\\[\\e[00m\\]$ ""
# Uncomment the following line if you don't like systemctl's auto-paging feature:
# export SYSTEMD_PAGER=
# User specific aliases and functions
if [ -d ~/.bashrc.d ]; then
for rc in ~/.bashrc.d/*; do
if [ -f ""$rc"" ]; then
. ""$rc""
fi
done
fi
unset rc
```
Separate terminal/vscode.
Lower(vscode terminal) : folder and branch is updating in the both cases (left and right)
![image](https://github.com/microsoft/vscode/assets/115103276/7e19c79a-f996-401d-9a5d-be4996ee63f0)
![Screenshot from 2023-09-19 21-40-59](https://github.com/microsoft/vscode/assets/115103276/1d593092-96e7-499f-bc95-5bd450b49578)
![Screenshot from 2023-09-19 21-41-04](https://github.com/microsoft/vscode/assets/115103276/e7a0c3ca-a0c5-4d36-b46c-36739f96b4b6)
"
microsoft/vscode,2023-09-19 14:54:31,bug,"Tasks using `""runOn"": ""folderOpen""` are broken","* have a task with `""runOn"": ""folderOpen""`, like https://github.com/microsoft/vscode-github-issue-notebooks/blob/e017704a5eb4bb6080322a3d489c76d421089111/.vscode/tasks.json#L18
* make sure it ran once
* reload/reopen folder
* 🐛 task doesn't run"
microsoft/vscode,2023-09-19 11:10:15,bug,[Accessibility] Display terminal-specific help in Accessible View for terminal buffer,"
Type: Bug
Currently, when alt+f1 is pressed in the accessible terminal buffer, it displays the following info:
> In the accessible view, you can:
> - Show the next (Alt+]) or previous (Alt+[) item
> - Navigate to the toolbar (Shift+Tab))
The above instruction is not applicable for the terminal buffer, and the original terminal buffer help content needs to be displayed here.
VS Code version: Code - Insiders 1.83.0-insider (7c7f7eee860e299499a3bd2915ad716f09f2d6a6, 2023-09-19T08:56:35.775Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1145G7 @ 2.60GHz (8 x 2611)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.71GB (6.06GB free)|
|Process Argv|C:\\\\Users\\\\jseo1005\\\\OneDrive - University of Illinois - Urbana\\\\Desktop\\\\source.R --crash-reporter-id b05b88e5-8894-4031-ae34-fa034ebddea9|
|Screen Reader|yes|
|VM|0%|
Extensions (91)
Extension|Author (truncated)|Version
---|---|---
android-dev-ext|ade|1.3.2
aiprm-lang|AIP|0.0.2
Bookmarks|ale|13.4.1
openscad|Ant|1.2.1
spellright|ban|3.0.118
zoterolatex|bna|0.4.1
mermaid-markdown-syntax-highlighting|bpr|1.5.2
doxdocgen|csc|1.4.0
vscode-markdownlint|Dav|0.51.0
vscode-eslint|dba|2.4.2
vscode-quick-select|dba|0.2.9
vscode-deno|den|3.22.0
gitlens|eam|14.3.0
EditorConfig|Edi|0.16.4
prettier-vscode|esb|10.1.0
vscode-google-translate|fun|1.4.13
codespaces|Git|1.15.3
copilot|Git|1.112.422
copilot-chat|Git|0.8.2023091901
remotehub|Git|0.60.0
vscode-github-actions|git|0.26.2
vscode-pull-request-github|Git|0.72.0
overleaf-workshop|iam|0.1.5
cslpreview|igo|0.2.2
easy-snippet|inu|0.6.3
path-autocomplete|ion|1.25.0
latex-workshop|Jam|9.13.4
lilypond-syntax|jea|0.1.1
scheme|jea|0.2.0
better-cpp-syntax|jef|1.17.2
google-search|kam|0.0.1
vscode-lua-format|Koi|1.3.8
lilypond-formatter|lhl|0.2.3
lilypond-pdf-preview|lhl|0.2.8
lilypond-snippets|lhl|0.1.1
vslilypond|lhl|1.7.3
zotero|mbl|0.1.10
git-graph|mhu|1.30.0
vscode-docker|ms-|1.26.0
black-formatter|ms-|2023.4.1
flake8|ms-|2023.6.0
isort|ms-|2023.11.12061012
python|ms-|2023.16.0
vscode-pylance|ms-|2023.9.10
jupyter|ms-|2023.8.1002501831
jupyter-keymap|ms-|1.1.2
jupyter-renderers|ms-|1.0.17
vscode-jupyter-cell-tags|ms-|0.1.8
vscode-jupyter-slideshow|ms-|0.1.5
remote-containers|ms-|0.311.0
remote-ssh|ms-|0.106.4
remote-ssh-edit|ms-|0.86.0
remote-wsl|ms-|0.81.3
vscode-remote-extensionpack|ms-|0.24.0
azure-repos|ms-|0.36.0
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
js-debug-nightly|ms-|2023.9.1317
powershell|ms-|2023.6.0
remote-repositories|ms-|0.38.1
vscode-github-issue-notebooks|ms-|0.0.129
vscode-selfhost-test-provider|ms-|0.3.18
vscode-serial-monitor|ms-|0.10.0
vsliveshare|ms-|1.0.5883
autodocstring|njp|0.6.1
pandocciter|not|0.10.3
shiny-python|Pos|0.1.4
shinyuieditor|pos|0.4.3
quarto|qua|1.98.0
r-debugger|RDe|0.5.4
java|red|1.22.1
vscode-xml|red|0.26.1
r|REd|2.8.1
multi-command|ryu|1.6.0
vscode-deepl|soe|1.0.6
abc-music|sof|0.4.0
lua|sum|3.7.0
latex-utilities|tec|0.4.10
cmake|twx|0.0.17
errorlens|use|3.13.0
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
vscode-arduino|vsc|0.6.0
vscode-java-debug|vsc|0.54.0
vscode-java-dependency|vsc|0.23.1
vscode-java-pack|vsc|0.25.14
vscode-java-test|vsc|0.39.1
vscode-maven|vsc|0.42.0
markdown-all-in-one|yzh|3.5.1
grammarly|znc|0.22.1
(1 theme extensions excluded)
A/B Experiments
```
vsliv695:30137379
vsins829:30139715
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627:30244334
vslsvsres303:30308271
pythontb:30258533
pythonptprofiler:30281269
vshan820:30294714
vscod805cf:30301675
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30404738
py29gd2263:30784851
vsclangdf:30492506
c4g48928:30535728
dsvsc012:30540252
pynewext54:30618038
a9j8j154:30646983
showlangstatbar:30737417
ecj1e332:30687743
pythonfmttext:30716741
fixshowwlkth:30771523
showindicator:30805243
pythongtdpath:30726887
i26e3531:30792625
welcomedialog:30812478
pythonnosmt12:30779711
pythonidxpt:30768918
pythonnoceb:30776497
copilotsettingt:30808721
asynctok:30821568
dsvsc013:30777762
dsvsc014:30777825
diffeditorv2:30786206
pythonlinttype:30823781
pythonmpsinfo:30815194
dsvsc015:30821418
pythontestfixt:30826906
pythonfb280951:30830809
```
"
microsoft/vscode,2023-09-19 01:19:55,bug,"Extension development:Frequent calls to _onDidChangeTreeData.fire(), resulting in memory leakage","
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version: V1.81.1
- OS Version: Linux kkuser-virtual-machine 5.15.0-83-generic #92~20.04.1-Ubuntu SMP Mon Aug 21 14:00:49 UTC 2023 x86_64 x86_64 x86_64 GNU/Linux
Steps to Reproduce:
1. There are constant changes in attribute states in the treeview
2. Frequent calls to this._onDidChangeTreeData.fire() functions result in memory leakage
"
microsoft/vscode,2023-09-18 13:32:35,bug,Bad Diff,"Notice the confusing import:
![Image](https://github.com/microsoft/vscode/assets/2931520/714c15c5-801d-4a48-90b0-8801263fac66)
[Monaco Editor Repro](https://microsoft.github.io/monaco-editor/playground.html?source=v0.44.0-dev-20230918#XQAAAAKuAwAAAAAAAABBqQkHQ5NjdMjwa-jY7SIQ9S7DNlzs5W-mwj0fe1ZCDRFc9ws9XQE0SJE1jc2VKxhaLFIw9vEWSxW3yscw4m0lEfGetoVyYgiB3WIAyGCL4buFPFOLc1lAy95RUKo06z2t3AsctLSHAcjORfYRN1yg400RgiDpSRk10aTwvQv6zU0xdTpaj6zSTK7O5w1Odpy7ndjoSj3ma3YVF3_FmBOgOwiopFHNNBPqMbq-v0RvCX2wcDNfJrORPwG1VK7vtKAvoDnL6nqm0qcLeCox2Vcd-zngBrgzSqiTgEXQEGrD3g7gonOqEAOByAGd_oCugkOt3ao7jXzQd3w4Fy-4lLimVF3_GvmNjyR7YPne-K0XCQvOZEr9A5yeBu954aGEYPtU0P7tS7ZI7XdPVWp_7Vx2LCz8OoMuuTAYZTfq_ZGyKuBTW_m2-yc_3ERGu8lnSsV9_kZXkpX9zk4UDmZemygf6VkJ5wo45Sstklpfb8Z8bTlqETeOfyLOVckLmu1MlZ26HwaiabpFkfqVf5VDfHsrfXn5RGiDRliyDdZrtECrEiRwdjhhRx-4_z-Exv_uPW5o)"
microsoft/vscode,2023-09-17 20:40:50,bug,Terminal context menu not hiding after running action,"
- VS Code Version: insiders and from sources
Steps to Reproduce:
1. Create terminal
2. Right click to show context menu
3. Run `Copy` or `Clear` action
4. :bug: Context menu still visible
Regression from https://github.com/microsoft/vscode/pull/192809
cc @Tyriar
"
microsoft/vscode,2023-09-15 18:04:59,bug,[Accessibility] accessible-buffer is not auto-focused,"
Type: Bug
I think there was an inadvertent regression in recent accessibility patches. {""terminal.integrated.focusAfterRun"": ""accessible-buffer""} setting does not take any effect. Tested on Windows with NVDA and JAWS. The focus does not move to the accessible buffer.
VS Code version: Code - Insiders 1.83.0-insider (bccfade64adb249f57c8fcf03cba41609f76ce5c, 2023-09-15T05:35:16.508Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1145G7 @ 2.60GHz (8 x 2611)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.71GB (6.19GB free)|
|Process Argv|C:\\\\Users\\\\jseo1005\\\\OneDrive - University of Illinois - Urbana\\\\Desktop\\\\source.py --crash-reporter-id b05b88e5-8894-4031-ae34-fa034ebddea9|
|Screen Reader|yes|
|VM|0%|
Extensions (90)
Extension|Author (truncated)|Version
---|---|---
android-dev-ext|ade|1.3.2
aiprm-lang|AIP|0.0.2
Bookmarks|ale|13.4.1
openscad|Ant|1.2.1
spellright|ban|3.0.118
zoterolatex|bna|0.4.1
mermaid-markdown-syntax-highlighting|bpr|1.5.2
doxdocgen|csc|1.4.0
vscode-markdownlint|Dav|0.51.0
vscode-eslint|dba|2.4.2
vscode-quick-select|dba|0.2.9
vscode-deno|den|3.22.0
gitlens|eam|14.3.0
EditorConfig|Edi|0.16.4
prettier-vscode|esb|10.1.0
vscode-google-translate|fun|1.4.13
codespaces|Git|1.15.2
copilot|Git|1.111.414
copilot-chat|Git|0.8.2023091501
remotehub|Git|0.60.0
vscode-github-actions|git|0.26.2
vscode-pull-request-github|Git|0.72.0
cslpreview|igo|0.2.2
easy-snippet|inu|0.6.3
path-autocomplete|ion|1.25.0
latex-workshop|Jam|9.13.4
lilypond-syntax|jea|0.1.1
scheme|jea|0.2.0
better-cpp-syntax|jef|1.17.2
google-search|kam|0.0.1
vscode-lua-format|Koi|1.3.8
lilypond-formatter|lhl|0.2.3
lilypond-pdf-preview|lhl|0.2.8
lilypond-snippets|lhl|0.1.1
vslilypond|lhl|1.7.3
zotero|mbl|0.1.10
git-graph|mhu|1.30.0
vscode-docker|ms-|1.26.0
black-formatter|ms-|2023.4.1
flake8|ms-|2023.6.0
isort|ms-|2023.11.12061012
python|ms-|2023.16.0
vscode-pylance|ms-|2023.9.10
jupyter|ms-|2023.8.1002501831
jupyter-keymap|ms-|1.1.2
jupyter-renderers|ms-|1.0.17
vscode-jupyter-cell-tags|ms-|0.1.8
vscode-jupyter-slideshow|ms-|0.1.5
remote-containers|ms-|0.311.0
remote-ssh|ms-|0.106.4
remote-ssh-edit|ms-|0.86.0
remote-wsl|ms-|0.81.3
vscode-remote-extensionpack|ms-|0.24.0
azure-repos|ms-|0.36.0
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
js-debug-nightly|ms-|2023.9.1317
powershell|ms-|2023.6.0
remote-repositories|ms-|0.38.1
vscode-github-issue-notebooks|ms-|0.0.129
vscode-selfhost-test-provider|ms-|0.3.18
vscode-serial-monitor|ms-|0.10.0
vsliveshare|ms-|1.0.5883
autodocstring|njp|0.6.1
pandocciter|not|0.10.3
shiny-python|Pos|0.1.4
shinyuieditor|pos|0.4.3
quarto|qua|1.98.0
r-debugger|RDe|0.5.4
java|red|1.22.1
vscode-xml|red|0.26.1
r|REd|2.8.1
multi-command|ryu|1.6.0
vscode-deepl|soe|1.0.6
abc-music|sof|0.4.0
lua|sum|3.7.0
latex-utilities|tec|0.4.10
cmake|twx|0.0.17
errorlens|use|3.13.0
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
vscode-arduino|vsc|0.6.0
vscode-java-debug|vsc|0.54.0
vscode-java-dependency|vsc|0.23.1
vscode-java-pack|vsc|0.25.14
vscode-java-test|vsc|0.39.1
vscode-maven|vsc|0.42.0
markdown-all-in-one|yzh|3.5.1
grammarly|znc|0.22.1
(1 theme extensions excluded)
A/B Experiments
```
vsliv695:30137379
vsins829:30139715
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627:30244334
vslsvsres303:30308271
pythontb:30258533
pythonptprofiler:30281269
vshan820:30294714
vscod805cf:30301675
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30404738
py29gd2263:30784851
vsclangdf:30492506
c4g48928:30535728
dsvsc012:30540252
pynewext54:30618038
a9j8j154:30646983
showlangstatbar:30737417
ecj1e332:30687743
pythonfmttext:30716741
fixshowwlkth:30771523
showindicator:30805243
pythongtdpath:30726887
i26e3531:30792625
welcomedialog:30812478
pythonnosmt12:30779711
pythonidxpt:30768918
pythonnoceb:30776497
copilotsettingt:30808721
asynctok:30821568
dsvsc013:30777762
dsvsc014:30777825
diffeditorv2:30786206
pythonlinttype:30823781
pythonmpsinfo:30815194
dsvsc015:30821418
pythontestfixt:30826906
pythonfb280951:30830809
```
"
microsoft/vscode,2023-09-15 10:43:39,bug,TypeError: Cannot read properties of undefined (reading 'trim'),"The diff editor was actually unable to compute the diff and just hangs.
* original file:
[original.txt](https://github.com/microsoft/vscode/files/12618480/original.txt)
* modified file:
[modified.txt](https://github.com/microsoft/vscode/files/12618481/modified.txt)
```
ERR Cannot read properties of undefined (reading 'trim'): TypeError: Cannot read properties of undefined (reading 'trim')
at $ (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:37:3518)
at e (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:37:3086)
at b (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:36:3677)
at E.h (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:37:6960)
at E.computeDiff (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:37:6268)
at v.l (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:41:59363)
at v.computeDiff (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:41:59226)
at i.d (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:41:45244)
at Object.handleMessage (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:41:44965)
at A.k (vscode-file://vscode-app/Applications/Visual%20Studio%20Code%20-%20Insiders.app/Contents/Resources/app/out/vs/base/worker/workerMain.js#editorWorkerService:41:42344)
```"
microsoft/vscode,2023-09-14 18:48:12,bug,Top padding is wrong on comment view zone,"Notice the hover feedback is cut off vertically and should have additional spacing.
![Image](https://github.com/microsoft/vscode/assets/2193314/0602b10f-9761-48f6-a320-02ca7b57a4e0)
Setup:
1. Korean language pack (may impact?)
2. `""window.zoomLevel"": 2
It also happens with other zoom levels:
![Image](https://github.com/microsoft/vscode/assets/2193314/f9ebf622-f4fd-49db-8ec6-f790c25f7ea2)
"
microsoft/vscode,2023-09-14 09:34:02,bug,Sash variables end up on the HTML element,"I would have expected maybe the workbench element if this needs to be global?
![Image](https://github.com/microsoft/vscode/assets/900690/345163aa-9713-4525-bcd5-6ffbeb7bb5bb)
"
microsoft/vscode,2023-09-13 16:33:54,bug,F9 removes existing breakpoint instead of adding one,"- Add a breakpoint on line 1
- Move cursor to line 2
- Press F9- removes the breakpoint on line 1 instead of adding one on line 2
@connor4312 this is from https://github.com/microsoft/vscode/pull/192483/files#diff-46dcc835c545807c00c0fc7a1797c010f84d51fac120e4205fb68cf114c3d9fdR84-R86, why did that change?"
microsoft/vscode,2023-09-13 00:47:57,bug,code CLI update mechanism previous version renaming anomaly ,"- VS Code Version: 1.82
- OS Version: Linux
When the vscode CLI update mechanism updates the code binary, the process preserves the outdated version by appending the executable with .old. At present, the process appends a second period, resulting in code..old. This occurs through `code update` or when it is updated through the Notification Pop-Up when Connected to Tunnel on vscode.dev
```
localhost /usr/local/bin # ls -la code*
-rwxr-xr-x. 1 root root 17981560 Sep 12 16:59 code
-rwxr-xr-x. 1 root root 17981560 Sep 8 04:59 code..old
localhost /usr/local/bin # ./code --version
code 1.82.1 (commit 6509174151d557a81c9d0b5f8a5a1e9274db5585)
localhost /usr/local/bin # ./code..old --version
code 1.82.0 (commit 8b617bd08fd9e3fc94d14adb8d358b56e3f72314)
```
Steps to Reproduce:
1. Invokde code CLI update
2. Examine path where code CLI lives"
microsoft/vscode,2023-09-12 20:44:29,bug,Unbounded keybindings are presented in empty workbench hints,"Find in Files and Show Settings provide no value, just noise:
![Image](https://github.com/microsoft/vscode/assets/2193314/d5b0a683-7fb9-4569-a794-7eafd2aea8a7)
Not sure who owns this, I remember Christof but maybe not?"
microsoft/vscode,2023-09-12 16:44:00,bug,quick search: preserveInput priority over selected text,"We added support for auto-populating selected text to quick search (https://github.com/microsoft/vscode/issues/191513), but setting `preserveInput` overrides this.
This is because `preserveInput` takes precedence over any default string (even if `defaultFilterValue` is defined).
https://github.com/microsoft/vscode/blob/26f41a49948c6da4eec5c93836c1c331364b54f2/src/vs/platform/quickinput/browser/quickAccess.ts#L83-L87
(from https://github.com/microsoft/vscode/pull/191956)"
microsoft/vscode,2023-09-12 13:58:53,bug,"When re-requesting a copilot inline chat answer, pressing escape accepts the current solution","* trigger inline chat and send a command
* press the reload button in the chat
* press escape
* notice that it keeps the last result (and does not restore the initial document)
When I don't click the reload button, pressing escape restores the initial document.
![Image](https://github.com/microsoft/vscode/assets/2931520/7b712edf-55f9-4edd-adb3-9c21107c8713)
Context:
```ts
// Remove short suffixes/prefixes
for (let i = 0; i < diffs.length; i++) {
const cur = diffs[i];
let newDiff = cur;
const fullRange1 = sequence1.extendToFullLines(cur.seq1Range);
const prefix = sequence1.getText(new OffsetRange(fullRange1.start, cur.seq1Range.start));
if (prefix.length > 0 && prefix.trim().length <= 3 && cur.seq1Range.length + cur.seq2Range.length > 100) {
newDiff = newDiff.deltaStart(-prefix.length);
}
const suffix = sequence1.getText(new OffsetRange(cur.seq1Range.endExclusive, fullRange1.endExclusive));
if (suffix.length > 0 && (suffix.trim().length <= 3 && cur.seq1Range.length + cur.seq2Range.length > 100)) {
newDiff = newDiff.deltaEnd(suffix.length);
}
while (true) {
const prevDiff = lastOrDefault(newDiffs);
if (prevDiff) {
if (newDiff.intersectsOrTouches(prevDiff)) {
newDiff = newDiff.join(prevDiff);
newDiffs.pop();
continue;
}
}
break;
}
newDiffs.push(newDiff);
}
```"
microsoft/vscode,2023-09-11 19:07:30,bug,go to symbol in the terminal's accessible view doesn't contain all commands after the first invocation,"1. with screen reader mode enabled, run some commands in the terminal
2. `ctrl/cmd+up arrow` to open the accessible view
3. `ctrl/cmd+shift+o` to go to symbol
4. ✅ the commands are there
5. `Escape` then `ctrl/cmd+shift+o` again
6. 🐛 some commands are missing (only the most recent one is there)"
microsoft/vscode,2023-09-11 14:55:26,bug,Test runner hangs with global `teardown` throwing,"Steps to Reproduce:
1. `git co ben/eventual-earthworm` or make sure https://github.com/microsoft/vscode/pull/192774 has landed
2. in `test/unit/electron/renderer.js` make sure to make `_allowedTestsWithUnhandledRejections` and empty `Set`
3. open `src/vs/workbench/services/lifecycle/test/electron-sandbox/lifecycleService.test.ts`
4. click on `suite('Lifecycleservice...` for running the suite
=> 🐛 the suite never finishes
![Image](https://github.com/microsoft/vscode/assets/900690/db3f786c-9edb-4cbb-9600-61483d956a48)
![Recording 2023-09-11 at 16 56 17](https://github.com/microsoft/vscode/assets/900690/ade08c9e-7e69-4190-ac3a-0090bd01ebc7)
"
microsoft/vscode,2023-09-09 16:05:16,bug,Problems with minimumContrastRatio inverse/selection edge cases,"Upstream: https://github.com/xtermjs/xterm.js/issues/4759
To verify:
1. On linux/macOS/wsl
2. Run `echo 'normal \\x1b[7minverse\\x1b[0m'`
3. Test various values of `terminal.integrated.minimumContrastRatio` (eg. 1, 4.5, 10) first with no selection and second with a selection. You may need to change your theme to see the differences"
microsoft/vscode,2023-09-09 12:01:13,bug,Terminal: Invisible text is visible in the DOM renderer,"Upstream: https://github.com/xtermjs/xterm.js/issues/4758
Repro:
1. On Linux/macOS/WSL
2. Run `echo -e '\\x1b[8minvisible'`, it should not show invisible in the output"
microsoft/vscode,2023-09-09 06:38:13,bug,Debug console is not working,"Type: Bug
After updating yesterday, I encountered an issue with my VS code where I set a breakpoint and attempted to enter a variable in the debug console, but the console remained empty.
VS Code version: Code 1.82.0 (8b617bd08fd9e3fc94d14adb8d358b56e3f72314, 2023-09-06T22:07:07.438Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz (8 x 2419)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.79GB (3.02GB free)|
|Process Argv|--file-uri file:///d%3A/Work/InstachatAI/InstachatAIApi.code-workspace --crash-reporter-id 6da24a21-c0cd-41c8-b3e7-ed9a77b716df|
|Screen Reader|no|
|VM|0%|
Extensions (52)
Extension|Author (truncated)|Version
---|---|---
codesnap|adp|1.3.4
TabOut|alb|0.2.2
Bookmarks|ale|13.4.1
ng-template|Ang|16.1.8
vscode-django|bat|1.10.0
vscode-opennewinstance|chr|0.0.12
fastapi-snippets|dam|0.0.2
dart-code|Dar|3.72.2
flutter|Dar|3.72.0
gitlens|eam|14.3.0
prettier-vscode|esb|10.1.0
remotehub|Git|0.60.0
vscode-pull-request-github|Git|0.72.0
gc-excelviewer|Gra|4.2.58
todo-tree|Gru|0.0.226
vscode-drawio|hed|1.6.6
git-graph|mhu|1.30.0
dotenv|mik|1.0.1
vscode-docker|ms-|1.26.0
csharp|ms-|2.1.2
vscode-dotnet-runtime|ms-|1.7.2
vscode-edge-devtools|ms-|2.1.3
autopep8|ms-|2023.6.0
isort|ms-|2023.10.1
python|ms-|2023.16.0
vscode-pylance|ms-|2023.9.10
jupyter|ms-|2023.8.1002501831
jupyter-keymap|ms-|1.1.2
jupyter-renderers|ms-|1.0.17
vscode-jupyter-cell-tags|ms-|0.1.8
vscode-jupyter-slideshow|ms-|0.1.5
remote-containers|ms-|0.309.0
remote-ssh|ms-|0.106.2
remote-ssh-edit|ms-|0.86.0
remote-wsl|ms-|0.81.2
vscode-remote-extensionpack|ms-|0.24.0
remote-explorer|ms-|0.4.1
remote-repositories|ms-|0.38.1
remote-server|ms-|1.4.3
vsliveshare|ms-|1.0.5883
autodocstring|njp|0.6.1
material-icon-theme|PKi|4.30.1
material-product-icons|PKi|1.6.0
sqlite-viewer|qwt|0.3.13
vscode-thunder-client|ran|2.11.4
vscode-yaml|red|1.14.0
errorlens|use|3.13.0
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
vscodeintellicode-completions|Vis|1.0.22
change-case|wma|1.0.0
material-theme|zhu|3.16.0
(3 theme extensions excluded)
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627cf:30244335
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vsdfh931:30280409
vshan820:30294714
vstes263:30335439
vscorecescf:30445987
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263:30792226
vsclangdc:30486549
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
3biah626:30602489
f6dab269:30613381
2i9eh265:30646982
showlangstatbar:30737416
a2ce3375:30757347
7ij38806:30736111
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015cf:30829746
```
"
microsoft/vscode,2023-09-08 20:54:02,bug,Port forwarding for remote-ssh doesn't notice closed socket on server side,"Type: Bug
Since the 1.82.0 update, using port forwards for remote SSH development has been extremely difficult for me, as the port forwarding system doesn't seem to notice when a socket is closed on the server side.
This leads to my client application running on the local side (generally Chrome) sending packets (e.g. GET requests) into the port forward (which shows up in `netstat` as `ESTABLISHED` still from chrome to Code, but those packets seem to be dropped into the bit bucket as the socket from the vscode-server to my application is closed (in `TIME_WAIT`) on the server (remote development machine). This causes HTTP requests to time out (very ... verry ......... sloooooooooowly) which severely hampers being able to get anything done.
VS Code version: Code 1.82.0 (8b617bd08fd9e3fc94d14adb8d358b56e3f72314, 2023-09-06T22:07:18.759Z)
OS version: Linux x64 6.2.0-32-generic
Modes:
Remote OS version: Linux x64 5.10.0-25-cloud-amd64
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-9700 CPU @ 3.00GHz (8 x 900)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: disabled_software vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: disabled_off|
|Load (avg)|1, 1, 1|
|Memory (System)|31.18GB (22.54GB free)|
|Process Argv|--unity-launch --crash-reporter-id f628f52a-51ea-40c8-aedd-0af9d4a6bd9f|
|Screen Reader|no|
|VM|0%|
|DESKTOP_SESSION|ubuntu-xorg|
|XDG_CURRENT_DESKTOP|Unity|
|XDG_SESSION_DESKTOP|ubuntu-xorg|
|XDG_SESSION_TYPE|x11|
|Item|Value|
|---|---|
|Remote|SSH: gdm.mgabeler-lee|
|OS|Linux x64 5.10.0-25-cloud-amd64|
|CPUs|AMD EPYC 7B13 (16 x 2449)|
|Memory (System)|31.36GB (17.60GB free)|
|VM|0%|
Extensions (26)
Extension|Author (truncated)|Version
---|---|---
Bookmarks|ale|13.4.1
vscode-peacock|joh|4.2.2
remote-ssh|ms-|0.106.1
remote-ssh-edit|ms-|0.86.0
remote-explorer|ms-|0.4.1
rewrap|stk|1.16.3
errorlens|use|3.13.0
github-markdown-preview|bie|0.3.0
markdown-checkbox|bie|0.4.0
markdown-emoji|bie|0.3.0
markdown-footnotes|bie|0.1.1
markdown-mermaid|bie|1.19.0
markdown-preview-github-styles|bie|2.0.2
markdown-yaml-preamble|bie|0.1.0
vscode-markdownlint|Dav|0.51.0
vscode-eslint|dba|2.4.2
EditorConfig|Edi|0.16.4
vscode-typescript-exportallmodules|eli|2.6.0
prettier-vscode|esb|10.1.0
terraform|has|2.27.2
bash-ide-vscode|mad|1.39.0
uuid-generator|net|0.0.5
vscode-commons|red|0.0.6
vscode-yaml|red|1.14.0
vscode-workspace-switcher|sad|1.15.3
code-spell-checker|str|3.0.0
(1 theme extensions excluded)
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627:30244334
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscorecescf:30445987
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
vsclangdf:30486550
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vsccc:30803844
3biah626:30602489
89544117:30613380
2i9eh265:30646982
showlangstatbar:30737416
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
asynctok:30821568
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015cf:30829746
```
"
microsoft/vscode,2023-09-08 18:43:50,bug,"""Press ctrl+I to ask Copilot ...` message showing in empty output channel","Steps:
1. Open an empty output channel. To do this, you can open an HTML file from a fresh reload and then check the HTML language server output channel.
2. The output channel says ""Press `CTRL`+`I` to ask Copilot to do something.
![Image](https://github.com/microsoft/vscode/assets/31675041/129778b6-a686-4336-9ce2-9347c09bd0da)
"
microsoft/vscode,2023-09-08 14:56:51,bug,Disposable leaks cause unit test failures,"Build: https://dev.azure.com/monacotools/a6d41577-0fa3-498e-af22-257312ff0545/_build/results?buildId=231184
Changes: https://github.com/Microsoft/vscode/compare/8971983...35c2ce3
```
(shared with 1/2 leaks) at $Fg (out-build/vs/base/common/async.js:389:16)
(shared with 1/2 leaks) at /mnt/vss/_work/1/s/out-build/vs/base/common/async.js:387:33
(shared with 1/2 leaks) at $sg (out-build/vs/base/common/async.js:15:26)
- stacktraces of 1 other leaks continue with $sg (out-build/vs/base/common/async.js:16:25)
(shared with 2/2 leaks) at Object.$Fg (out-build/vs/base/common/async.js:387:20)
(shared with 2/2 leaks) at Context. (out-build/vs/base/test/common/async.test.js:613:45)
(shared with 2/2 leaks) at process.processImmediate (node:internal/timers:476:21)
============================================================
==================== Leaking disposable 2/2: Object ====================
(shared with 1/2 leaks) at $8S.trackDisposable (out-build/vs/base/test/common/utils.js:56:21)
(shared with 1/2 leaks) at $db (out-build/vs/base/common/lifecycle.js:58:28)
(shared with 1/2 leaks) at $kb (out-build/vs/base/common/lifecycle.js:143:22)
(shared with 1/2 leaks) at MutableToken.q (out-build/vs/base/common/event.js:822:52)
(shared with 1/2 leaks) at /mnt/vss/_work/1/s/out-build/vs/base/common/async.js:17:47
(shared with 1/2 leaks) at new Promise ()
(shared with 1/2 leaks) at $sg (out-build/vs/base/common/async.js:16:25)
- stacktraces of 1 other leaks continue with $sg (out-build/vs/base/common/async.js:15:26)
(shared with 2/2 leaks) at Object.$Fg (out-build/vs/base/common/async.js:387:20)
(shared with 2/2 leaks) at Context. (out-build/vs/base/test/common/async.test.js:613:45)
(shared with 2/2 leaks) at process.processImmediate (node:internal/timers:476:21)
============================================================
at $8S.ensureNoLeakingDisposables (out-build/vs/base/test/common/utils.js:145:19)
at Context. (out-build/vs/base/test/common/utils.js:170:25)
at process.processImmediate (node:internal/timers:476:21)
```"
microsoft/vscode,2023-09-08 08:22:20,bug,Sticky scroll: folding icons missing from some sticky headers,"Type: Bug
> Issue troubleshooting has identified that the issue is with Visual Studio Code - Insiders.
If the sticky scroll header shows only one line, that line has no folding indicator.
If it shows 2 or more lines, only the last line has the indicator.
![junk](https://github.com/microsoft/vscode/assets/6726799/e76de0d7-e8f0-4b86-bea5-4b96c3bdb4d2)
This is working correctly in 1.82.0 Stable.
VS Code version: Code - Insiders 1.83.0-insider (5a400e53e985dc5e24f6ee574b07ab23943841c5, 2023-09-07T17:35:42.740Z)
OS version: Windows_NT x64 10.0.22621
Modes:
"
microsoft/vscode,2023-09-08 06:03:56,bug,1.82 cannot select text in integrated terminal,"
Does this issue occur when all extensions are disabled?: Yes
- VS Code Version: 1.82.0
- OS Version: Ubuntu 22.04
Steps to Reproduce:
1. Try to select text in the integrated terminal by shift + select text. Doesn't work.
2. Last working in 1.81.*
"
microsoft/vscode,2023-09-07 22:33:02,bug,Tab separator setting refers to the same setting twice,"
Does this issue occur when all extensions are disabled?: Yes
- VS Code Version: 1.82.0
- OS Version: Windows 10
Steps to Reproduce:
1. Go to settings
2. Find `terminal.integrated.tabs.separator`
3. It refers to one setting twice, it should refer to `title` and `description`:
![image](https://github.com/microsoft/vscode/assets/3606072/4ffa9275-d8ab-4d1f-bd73-2b175d108b58)
"
microsoft/vscode,2023-09-07 19:58:51,bug,VSCode warning 64 bit Windows users about deprecated 32 bit Windows support,"
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version: 1.82.0
- OS Version: Windows 10 Pro - 64 bit
My VSCode just updated this morning to 1.82.0 and now it's warning me that I won't be able to use it on Windows 32 bit anymore. Except I'm _not_ using 32 bit Windows, I'm using 64 bit Windows.
![image](https://github.com/microsoft/vscode/assets/298883/c6898685-2868-4345-bf26-7045c383acc9)
![image](https://github.com/microsoft/vscode/assets/298883/b1ef7306-b901-4f22-bf7f-603d598a80c6)
![image](https://github.com/microsoft/vscode/assets/298883/938e717f-ddc3-4692-8ba6-86ba072801c5)
![image](https://github.com/microsoft/vscode/assets/298883/49e4bbdc-4f72-4515-9279-172ee4f4e590)
"
microsoft/vscode,2023-09-07 15:40:03,bug,Why doesn't reflow work in test view output?,"When I resize this down and back up again:
![Image](https://github.com/microsoft/vscode/assets/2193314/1d75b734-3f0a-4a83-b9a9-f036e4693fa7)
I see this:
![Image](https://github.com/microsoft/vscode/assets/2193314/40421dee-e3a6-4344-9619-a87e70956bed)
Why isn't reflow working here?
Does the terminal not have scrollback by chance as it will be disabled if so:
https://github.com/xtermjs/xterm.js/blob/19c760a20470c26666efad770fe82496c4a9bf1e/src/common/buffer/Buffer.ts#L292-L294"
microsoft/vscode,2023-09-07 13:12:58,bug,Editor scrollbar markers update only after cursor move,"1. In a TS file, use the keyboard and type out a syntax error.
🐛 The scrollbar error marker doesn't show up.
2. Move the text cursor to another location using the mouse. The scrollbar error marker shows up then.
https://github.com/microsoft/vscode/assets/22350/ce7eea5f-7c2d-4bc9-ab40-f6760c05a09a
@hediet says:
> I think you need eslint [to repro]"
microsoft/vscode,2023-09-07 12:55:24,bug,Web: cannot right click into custom title,"Steps to Reproduce:
1. open https://insiders.vscode.dev/
2. notice the custom title shows by default (probably because of command center enabled by default?)
3. right click into empty space of custom title
=> 🐛 the menu does not open (or quickly closes)
![Image](https://github.com/microsoft/vscode/assets/900690/d7eadf67-588e-43c9-8601-7e967cc227c5)
"
microsoft/vscode,2023-09-07 04:43:00,bug,Forders not opening,"
Type: Bug
When I open new window VS Code, showing me ""Recent"" part. In this part has a menu More.... When I press More... menu nothing is happend. This problem has been for 2-3 month
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz (8 x 2419)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.65GB (8.85GB free)|
|Process Argv|--crash-reporter-id ff9a8db9-c9cf-42a6-a0e2-5dc8ac461d11|
|Screen Reader|no|
|VM|0%|
Extensions (13)
Extension|Author (truncated)|Version
---|---|---
vite|ant|0.2.5
vscode-apollo|apo|1.19.11
vscode-eslint|dba|2.4.2
gitlens|eam|14.2.1
prettier-vscode|esb|10.1.0
vscode-highlight|fab|1.9.0
vue-snippets|hol|1.0.4
graphql|mqu|0.1.2
vetur|oct|0.37.3
material-icon-theme|PKi|4.30.1
tabnine-vscode|Tab|3.8.13
volar|Vue|1.8.10
vscode-typescript-vue-plugin|Vue|1.8.10
(2 theme extensions excluded)
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vswsl492:30256859
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263cf:30335440
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263cf:30792227
vscaac:30438847
vsclangdf:30486550
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
vsccc:30803844
282f8724:30602487
f6dab269:30613381
2i9eh265:30646982
showlangstatbar:30737416
962ge761:30823813
a2ce3375:30757347
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015:30829745
```
"
microsoft/vscode,2023-09-06 18:54:49,bug,Cannot read properties of null (reading 'uri'),"```javascript
TypeError: Cannot read properties of null (reading 'uri')
at b.D in src/vs/workbench/contrib/comments/browser/commentThreadZoneWidget.ts:222:24
at b.create in src/vs/editor/contrib/zoneWidget/browser/zoneWidget.ts:246:8
at new b in src/vs/workbench/contrib/comments/browser/commentThreadZoneWidget.ts:148:8
at f.j in src/vs/platform/instantiation/common/instantiationService.ts:119:18
at f.createInstance in src/vs/platform/instantiation/common/instantiationService.ts:85:18
at M.O in src/vs/workbench/contrib/comments/browser/commentsController.ts:749:48
at in src/vs/workbench/contrib/comments/browser/commentsController.ts:1002:10
at Array.forEach ()
at in src/vs/workbench/contrib/comments/browser/commentsController.ts:987:17
at Array.forEach ()
at M.Z in src/vs/workbench/contrib/comments/browser/commentsController.ts:983:22
at in src/vs/workbench/contrib/comments/browser/commentsController.ts:478:9
```
[Go to Errors Site](https://errors.code.visualstudio.com/card?ch=6c3e3dba23e8fadc360aed75ce363ba185c49794&bH=6d61ff0f-3543-017f-ba38-130325f5832c)"
microsoft/vscode,2023-09-06 17:04:47,bug,Notebook: run button is rendered when revealing cell into view when hitting breakpoint,"
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version:
- OS Version:
Steps to Reproduce:
1. Add a breakpoint in a cell
2. Debug the cell
3. Scroll the cell out of view
4. Reveal the cell
5. Now the run button is rendered, other than the stop button
Digging into this a bit and I found that the context key service/update is correct, the problem is we receive an execution `complete` message from the extension host right after initializing the debug session so the cell execution state is `idle` instead of `pending` or `executing`. That's why when we re-render the cell, the run button is rendered.
"
microsoft/vscode,2023-09-05 19:12:57,bug,code cli shouldn't panic if it can't bind a port,"```
❯ ./code-insiders serve-web
*
* Visual Studio Code Server
*
* By using the software, you agree to
* the Visual Studio Code Server License Terms (https://aka.ms/vscode-server-license) and
* the Microsoft Privacy Statement (https://privacy.microsoft.com/en-US/privacystatement).
*
Web UI available at http://127.0.0.1:8000?tkn=a1c7a8b1-bfc2-4b91-a60e-e18fac626dad
thread 'main' panicked at 'error binding to 127.0.0.1:8000: error creating server listener: Address in use (os error 98)', /home/cloudtest/.cargo/registry/src/index.crates.io-6f17d22bba15001f/hyper-0.14.26/src/server/server.rs:79:13
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
```
Probably should print an error message instead of panicking, and maybe do that before printing the URL to connect to."
microsoft/vscode,2023-09-05 09:36:05,bug,Accept button is not visible in inline chat,"* generate an answer
* tweak it
* press Send again (by accident)
* press Stop generating
* the Accept button is no longer visible
![Image](https://github.com/microsoft/vscode/assets/5047891/34ac892d-ed31-4f11-b10d-ee92fa3518d8)
```
Version: 1.82.0-insider
Commit: f1302be1e67e3af5fbeb8bbb2ea784de7bc96150
Date: 2023-09-01T11:13:22.523Z
Electron: 25.8.0
ElectronBuildId: 23503258
Chromium: 114.0.5735.289
Node.js: 18.15.0
V8: 11.4.183.29-electron.0
OS: Darwin arm64 22.6.0
```"
microsoft/vscode,2023-09-04 13:09:38,bug,Terminal: White bar on top of terminal,"Version: 1.82.0-insider (user setup)
Commit: f1302be1e67e3af5fbeb8bbb2ea784de7bc96150
Date: 2023-09-01T11:08:40.414Z
Electron: 25.8.0
ElectronBuildId: 23503258
Chromium: 114.0.5735.289
Node.js: 18.15.0
V8: 11.4.183.29-electron.0
OS: Windows_NT x64 10.0.22631
A white bar is hiding the first characters of the last line:
![Recording 2023-09-04 at 15 18 55](https://github.com/microsoft/vscode/assets/6461412/cce87b8a-a1d6-4dc7-8d50-ded57b604a24)
![Image](https://github.com/microsoft/vscode/assets/6461412/ce15937d-78b0-40ef-a465-24c78d40bf38)
Last week I tested some terminal accessibility issues, using the terminal buffer, maybe that's related."
microsoft/vscode,2023-09-04 08:48:52,bug,Don't detect aligned code as moved,"Don't show non-moves:
![Image](https://github.com/microsoft/vscode/assets/2931520/6a80bb42-7bd8-433b-a41f-af99d44560f5)
[Monaco Editor Repro](https://microsoft.github.io/monaco-editor/playground.html?source=v0.42.0-dev-20230904#XQAAAAK3JAAAAAAAAABBqQkHQ5NjdMjwa-jY7SIQ9S7DNlzs5W-mwj0fe1ZCDRFc9ws9XQE0SJE1jc2VKxhaLFIw9vEWSxW3yscyIQRnzlomSBcA3EzPkMOWcvqy7fj-WpAGlaTunV0ezgt0-lrqbxDLBHp40VgoG1h0bYn33Z5W8JwKP8faa2U12FLGnWghxFOkfPhEOEevGj81IbTtL6Ld4LFL-hBvbp0k-XSoxa7IRE-EmrVD6gb_EMvQHtDePYSE1RfALToyLD1JPlY6IbqOY2EAdk26JxkYD43vdAVrCgTuddhyHQfardBF5ljU86TWzXewJ2H5jS-Hl4PNJF5i_cMbqKfevhmFDIbD7Q3aD6hChhhyJTcImz3ASkmLluTbYFygXKpXoeJp570vHkx0ZchqGGEetOXSI8h4cG__BgUbusCGIHKdewGYW3CHjfbY64q7cktl7OQyDLtb5hLRxlm-ULDNGqt06SYuMgqnavqrW42s273OUZIQ4bY7L25mPIKex9yRqTRidVwLB3BvFAk_3xnTVMzDlYLClhuy5EFaytECcEbyP8SOf6mdRcJ0Lf2eyUHaBXb7Xv6jrmlHVP3TDOaD5e1icb1_n_k03_G5C57d5FPlO6cIrShF4EwABjIBb_rECvlWibgSdyUk2zJ-nwakgt_GtTI313hO6WopDQIPsF03LR3ZbAgRCGLUOdNNQCUhbf99n8Saum4nYlYVf8F0e47zs3VbZdW_hyv0pUv28AMe7x-w1QtVBFAjISmFUnFTC7anISph8irYSdRKqM23pSXBBHk0__81vgwSUCPxAy72rTwTzv3QnCk0t4RtNhM1IBUHqfkoYuFycQw9Qcie-jdF4YuCQlPVcqWnrF8TGNz-WfeaucrXPo6cxXieTc6ynWHcNwkGPrIGM25Zdo04jzBDBpQNsh1cqXICLx0FDOqMe2PtRZO3QWYxsnWq71ZTdlx5jy4K_mm8DfQWL0-8QCWjpMJBCkgAbEhsc5HXwLFn54zblz162W1nEaQ1YUE-Uw1VI1XM-XPIUGQmXv85eTcF8eYg1QphNLpV98bek-0EBYtVKeCADi9F_y3EKr-JhbngkGsTIB7OEuW70Ma0M8MMRDWj65KgZEnVjXBnJEgn5RhUbMgWMfQsj1ADtB4mV85yGzAwqQr4HMFhBG6_-loXEbVRCfs2fA0l3OAL2nicZI3OnQ7SOWAM2IXvgN87UJ1xHrkHbre3v5q_iFvVgD3m0BGnWs9ZBR2ZLIhxg670IOHR3SSebcJhreoAcMRmE_zPrMa3UCZT4c5_tH-jFPK4Oa1GqiMWlAjKPJUlEZFr9j917M9wZ82jZ9y_pwZnW8sJGF_s6FGxtQ4D1KEbW1mZo6vYsZnH5U8X14N_6y2OzwLCIX-6ws1h8aj-3AiKoTc9mOsVpFshV6ZKIF0SsM5rjwgNhBf7CNkpvOhMreugXs7i1f5SSe2_iOWFWgx7Ix-VnBbHT-0co27hkT7pG2k_agfwJ0gRsNYrl5JfPT6eMSxhKEHGQxMF0GF6l3by8kTw7n485SiS1ErzSzklkoknQwi19izjQidVa1xFpMdfrL9dr0FRh6kGR6pHqOyob_iWUfYUuJJBJA6bsr27LiVI1P0GcsITTsMpyAsEW8RAIajvwLfQm0GVOBzbPcnRLGMaOvX5YYW59SrMadg6N-8sPLjf0mdmF7rwnEDLX01UERzzz0oZLdRcikQK8tI6EvStPe5S8SXtMuhMPmoeHj16fe0Z5IFW99GJr8_RGinXCk1RPhowmnSxua3IuJwJcXBR8pJrbzwcubfzqdCCeUd_p4myCNUMRJze8_TgPLlJWjEYyP0_d9JREaCx585qwtgNQkb1mvfj38sDw3HOh3ZxdfUiSxcBC_rc_nT5tsyYPDGtmFuO8h-GqDLAJ_aY0iC84cD3stwJ7VPZvs8UNPt5caJkaQ4ZPcVfE3LVZ_tCI0OHv1qydAaUm4Q1iRCtpxVwp1C9EjFnktxDny6vlAn4hRuzze4KjCpJ8RgYwaXOBW1RgdE2FasQj1B2L-dKVH7mVqbcp_ABkmRMz53hnmOltl0M4b84-DjSdIk3wQKAetegw8MPL-lMTEdQYwu5Zy0FOzFdpma-0u-v4lDCMiqU_gICJfT24GT6U1XSUZcX1Y46xK7II-tzPod52YdEYLBuWeKBs7u3vu5aDBJEflKqKtVvSlZd0UmyM0B94U3wwLvKj06qB4bILkylyuhL4gHzvyT6iab64Iagfqh7dpyQx43HNrgsMI-4vGPUuxKBH4r9WHLTV8ZN2gRwabvcfFzzC6n1e3LoX1SImXU9nFj01pDwhoqUwin-acYfFRQLs8c4UfU1-dXFRkEteaQafCsZv6gBLfDu758R8l7w7T8GsgjXYed-wb6mJ31ZCVIPYH28dPAfe2kzHH7f7QVUPaYBIfiA1agvKDyKJlWH_7soN6LGvCWejfWDT_N4_-Uod0Kvxz2A05CUgWMMdCZuv2teMzZSoApmvu2nR6UlLNEFeyCPhAUbq13oYR5fRc84dOXBqmM41MBMq3txeTGXOX0KPzc0qzuRQUE5qyfQnjImtoULFIdIxFEY9S92mqgsb9gn1Qu77LsDv1HuobqQcFqedNbGqxGq0MUrzThF_fRXy7vipPbXQsB6a0MzRyVdtk8C7CvnyBa5H9vzIX-2Z4IU5JS7FklNUJI8OJ2BbbXOv7MwogNh9s07aI_i6i2kxiN63G__-iEpEw)"
microsoft/vscode,2023-09-01 16:34:50,bug,Compress single test messages in the Test Results tree view," @connor4312 testing this out and it looks better to me - although is it the case that now there will always be only a single child in the tree below the test?
![image](https://github.com/microsoft/vscode/assets/1078012/996afa2d-231b-4e0e-9d42-aa1fa69deff0)
If so, could the output not be shown against the test node and the child removed? Currently it seems like clicking on the test name doesn't do anything (except maybe expand/collapse) which I find confusing. I feel like clicking on the test name and seeing the output (and it having no children) would be better - but I'm not sure if there are cases where there are more children.
Thanks!
_Originally posted by @DanTup in https://github.com/microsoft/vscode/issues/187104#issuecomment-1700873018_
"
microsoft/vscode,2023-08-31 23:04:51,bug,SSH/DevContainer Port Forwarding broken,"
Does this issue occur when all extensions are disabled?: Yes/No
```
Version: 1.82.0-insider (system setup)
Commit: 3cd6f481266dcbd2ca2fcff43b4465d747c78e2f
Date: 2023-08-31T17:34:55.916Z
Electron: 25.7.0
ElectronBuildId: 23434598
Chromium: 114.0.5735.289
Node.js: 18.15.0
V8: 11.4.183.29-electron.0
OS: Windows_NT x64 10.0.22621
```
Issue similar to #190859 - this time it affects **Devcontainers** running on **remote ssh hosts** ⚠️
Client: Windows 11
Remote-Host: Ubuntu 22.04
Container: Debian 11
Repo for reproduction: https://github.com/gecio/gecio.github.io
Steps to Reproduce:
1. Connect to ssh host
2. Clone [Repo](https://github.com/gecio/gecio.github.io)
3. Open in devcontainer when asked
4. Launch Task ""Serve"" to start webserver
5. Open http://localhost:4000 and see indefinite loading.
I can't test it without the remote ssh host :/
Shared-Log
```
2023-09-01 00:51:07.658 [info] [SharedProcessTunnelService] Created tunnel 1: 127.0.0.1:64076 (local) to 127.0.0.1:7863 (remote).
2023-09-01 00:51:07.673 [info] [SharedProcessTunnelService] Created tunnel 2: 127.0.0.1:33761 (local) to 127.0.0.1:33761 (remote).
2023-09-01 00:51:07.833 [info] [SharedProcessTunnelService] Created tunnel 3: localhost:4000 (local) to localhost:4000 (remote).
2023-09-01 00:51:07.834 [info] [SharedProcessTunnelService] Created tunnel 4: localhost:35729 (local) to localhost:35729 (remote).
2023-09-01 00:51:09.216 [info] Getting Manifest... ms-vscode-remote.remote-containers
2023-09-01 00:51:09.313 [info] Installing extension: ms-vscode-remote.remote-containers
2023-09-01 00:51:10.103 [info] Extension signature is verified: ms-vscode-remote.remote-containers
2023-09-01 00:51:10.426 [info] Extracted extension to file:///c%3A/Users/max06/.vscode-insiders/extensions/ms-vscode-remote.remote-containers-0.308.0: ms-vscode-remote.remote-containers
2023-09-01 00:51:10.432 [info] Renamed to c:\\Users\\max06\\.vscode-insiders\\extensions\\ms-vscode-remote.remote-containers-0.308.0
2023-09-01 00:51:10.439 [info] Extracting extension completed. ms-vscode-remote.remote-containers
2023-09-01 00:51:10.463 [info] Extension installed successfully: ms-vscode-remote.remote-containers
2023-09-01 00:51:10.469 [info] Marked extension as uninstalled ms-vscode-remote.remote-containers-0.307.0
2023-09-01 00:51:12.628 [info] [SharedProcessTunnelService] Created tunnel 5: 127.0.0.1:42025 (local) to 127.0.0.1:42025 (remote).
2023-09-01 00:51:30.294 [info] Creating a socket (renderer-Tunnel-f2cc70c7-d306-4708-b542-3bfdd2cb513f)...
2023-09-01 00:51:30.416 [info] Creating a socket (renderer-Tunnel-f2cc70c7-d306-4708-b542-3bfdd2cb513f) was successful after 123 ms.
2023-09-01 00:51:30.957 [info] Creating a socket (renderer-Tunnel-57cbefc6-cb29-42ef-89ac-2ff5873ad23a)...
2023-09-01 00:51:31.049 [info] Creating a socket (renderer-Tunnel-57cbefc6-cb29-42ef-89ac-2ff5873ad23a) was successful after 93 ms.
2023-09-01 00:51:31.223 [info] Creating a socket (renderer-Tunnel-d3fcfdea-adbf-44f9-a6da-2d1089f1c7fd)...
2023-09-01 00:51:31.311 [info] Creating a socket (renderer-Tunnel-d3fcfdea-adbf-44f9-a6da-2d1089f1c7fd) was successful after 90 ms.
2023-09-01 00:56:31.298 [info] Creating a socket (renderer-Tunnel-ec10ac1a-bbec-46d3-bb84-2667f176bf2f)...
2023-09-01 00:56:31.400 [info] Creating a socket (renderer-Tunnel-ec10ac1a-bbec-46d3-bb84-2667f176bf2f) was successful after 101 ms.
2023-09-01 00:58:16.519 [info] Creating a socket (renderer-Tunnel-bebfb153-c346-414c-bdcf-e567fb8e8bf6)...
2023-09-01 00:58:16.623 [info] Creating a socket (renderer-Tunnel-bebfb153-c346-414c-bdcf-e567fb8e8bf6) was successful after 104 ms.
2023-09-01 00:58:16.783 [info] Creating a socket (renderer-Tunnel-54b3ca0c-f89e-4827-bd48-d42869c6094f)...
2023-09-01 00:58:16.891 [info] Creating a socket (renderer-Tunnel-54b3ca0c-f89e-4827-bd48-d42869c6094f) was successful after 109 ms.
```
Dev console doesn't contain anything related.
The extension host shows:
```
2023-09-01 00:53:21.861 [error] Error: read ECONNRESET
at TCP.onStreamRead (node:internal/stream_base_commons:217:20)
2023-09-01 00:53:21.861 [error] Error: read ECONNRESET
at TCP.onStreamRead (node:internal/stream_base_commons:217:20)
2023-09-01 00:53:21.985 [error] Error: read ECONNRESET
at TCP.onStreamRead (node:internal/stream_base_commons:217:20)
2023-09-01 00:53:22.031 [error] Error: read ECONNRESET
at TCP.onStreamRead (node:internal/stream_base_commons:217:20)
2023-09-01 00:53:22.451 [error] Error: read ECONNRESET
at TCP.onStreamRead (node:internal/stream_base_commons:217:20)
```
Although I'm not sure if that's related.
The server output is interesting:
```
2023-08-31 23:03:58.609 [error] Error: connect ECONNREFUSED ::1:4000
at TCPConnectWrap.afterConnect [as oncomplete] (node:net:1494:16)
2023-08-31 23:03:58.870 [error] Error: connect ECONNREFUSED ::1:4000
at TCPConnectWrap.afterConnect [as oncomplete] (node:net:1494:16)
```
This is directly related - it happens right/shortly after opening the url in the browser."
microsoft/vscode,2023-08-31 19:49:05,bug,Can't change local address port in port forwarding,"
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version: Todays insider
- OS Version: Win11
Steps to Reproduce:
![image](https://github.com/microsoft/vscode/assets/7556827/d8f0cd2d-8664-4060-b93a-889dbe978855)
Be in a **devcontainer** (here on a remote ssh host, if that matters).
Try to set a new local port for an existing port forwarding.
Enter `localhost:5000` -> does not work
Enter `5000` -> does work.
"
microsoft/vscode,2023-08-31 14:14:09,bug,xterm live region isn't present unless output spans full viewport,noticed by @rperez030
microsoft/vscode,2023-08-31 12:46:34,bug,"`app#resolveInitialProtocolUrls()` does not remove `?windowId=_blank""` part of the URL with `vscode:` schema","Type: Bug
I'm trying to open workspaces with links like this: `vscode://vscode-remote/ssh-remote+viknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace?windowId=_blank`
When VS Code is launched before opening the link, everything working as intended: the new window opens, connects to the server and loads working workspace.
But if VS Code was not running, loaded workspace has some problems: almost every internal URI contains `?windowId=_blank` in `external` and this breaks many extensions. I think this is effect of passing ""query"":""windowId=_blank"" to the Remote-SSH extension.
I enabled trace logging and found different behaviour in handling URL when running vs. not running VS Code process:
Launching VS Code with link
```
2023-08-31 06:53:59.892 [trace] app#resolveInitialProtocolUrls() protocol urls from macOS 'open-url' event: [""vscode://vscode-remote/ssh-remote+viknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace?windowId=_blank""]
2023-08-31 06:53:59.893 [trace] app#resolveInitialProtocolUrls() protocol url will be handled as window to open: vscode://vscode-remote/ssh-remote+viknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace?windowId=_blank {""workspaceUri"":{""$mid"":1,""path"":""/home/viknet/arc-project/arc-project.code-workspace"",""scheme"":""vscode-remote"",""authority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net"",""query"":""windowId=_blank""}}
2023-08-31 06:53:59.893 [trace] ElectronURLListener initialUrisToHandle: []
2023-08-31 06:53:59.893 [trace] ElectronURLListener: waiting for window to be ready to handle URLs...
2023-08-31 06:53:59.893 [trace] lifecycle (main): phase changed (value: 2)
2023-08-31 06:53:59.893 [trace] windowsManager#open
2023-08-31 06:53:59.893 [trace] windowsManager#open pathsToOpen [{""workspace"":{""id"":""805a4cf8f6335103243bb008f275ef99"",""configPath"":{""$mid"":1,""external"":""vscode-remote://ssh-remote%2Bviknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace?windowId%3D_blank"",""path"":""/home/viknet/arc-project/arc-project.code-workspace"",""scheme"":""vscode-remote"",""authority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net"",""query"":""windowId=_blank""}},""remoteAuthority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net""}]
2023-08-31 06:53:59.894 [trace] windowsManager#doOpenFolderOrWorkspace {""folderOrWorkspace"":{""workspace"":{""id"":""805a4cf8f6335103243bb008f275ef99"",""configPath"":{""$mid"":1,""external"":""vscode-remote://ssh-remote%2Bviknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace?windowId%3D_blank"",""path"":""/home/viknet/arc-project/arc-project.code-workspace"",""scheme"":""vscode-remote"",""authority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net"",""query"":""windowId=_blank""}},""remoteAuthority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net""}}
```
Opening link when VS Code is running
```
2023-08-31 06:58:06.231 [trace] app#handleProtocolUrl(): vscode://vscode-remote/ssh-remote+viknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace?windowId=_blank {""originalUrl"":""vscode://vscode-remote/ssh-remote+viknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace?windowId=_blank""}
2023-08-31 06:58:06.232 [trace] app#handleProtocolUrl() found 'windowId=_blank' as parameter, setting shouldOpenInNewWindow=true: vscode://vscode-remote/ssh-remote+viknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace?windowId=_blank
2023-08-31 06:58:06.232 [trace] app#handleProtocolUrl() opening protocol url as window: {""workspaceUri"":{""$mid"":1,""path"":""/home/viknet/arc-project/arc-project.code-workspace"",""scheme"":""vscode-remote"",""authority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net""}} vscode://vscode-remote/ssh-remote+viknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace
2023-08-31 06:58:06.232 [trace] windowsManager#open
2023-08-31 06:58:06.232 [trace] windowsManager#open pathsToOpen [{""workspace"":{""id"":""f7c3787ac45e98797d40d7528f7be026"",""configPath"":{""$mid"":1,""external"":""vscode-remote://ssh-remote%2Bviknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace"",""path"":""/home/viknet/arc-project/arc-project.code-workspace"",""scheme"":""vscode-remote"",""authority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net""}},""remoteAuthority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net""}]
2023-08-31 06:58:06.232 [trace] windowsManager#doOpenFolderOrWorkspace {""folderOrWorkspace"":{""workspace"":{""id"":""f7c3787ac45e98797d40d7528f7be026"",""configPath"":{""$mid"":1,""external"":""vscode-remote://ssh-remote%2Bviknet-bionic.sas.yp-c.yandex.net/home/viknet/arc-project/arc-project.code-workspace"",""path"":""/home/viknet/arc-project/arc-project.code-workspace"",""scheme"":""vscode-remote"",""authority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net""}},""remoteAuthority"":""ssh-remote+viknet-bionic.sas.yp-c.yandex.net""}}
```
And `TRACE workbench#open(): with configuration` looks like this:
```
{
...
""workspace"":
{
""id"": ""805a4cf8f6335103243bb008f275ef99"",
""configPath"":
{
""$mid"": 1,
""path"": ""/home/viknet/arc-project/arc-project.code-workspace"",
""scheme"": ""vscode-remote"",
""authority"": ""ssh-remote+viknet-bionic.sas.yp-c.yandex.net"",
""query"": ""windowId=_blank""
}
},
...
}
```
vs.
```
{
...
""workspace"":
{
""id"": ""f7c3787ac45e98797d40d7528f7be026"",
""configPath"":
{
""$mid"": 1,
""path"": ""/home/viknet/arc-project/arc-project.code-workspace"",
""scheme"": ""vscode-remote"",
""authority"": ""ssh-remote+viknet-bionic.sas.yp-c.yandex.net""
}
},
...
}
```
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:40:25.698Z)
OS version: Darwin arm64 22.5.0
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Apple M1 Pro (10 x 24)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled metal: disabled_off multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|1, 2, 2|
|Memory (System)|32.00GB (7.32GB free)|
|Process Argv|--enable-proposed-api jeanp413.open-remote-ssh|
|Screen Reader|no|
|VM|0%|
Extensions: none
"
microsoft/vscode,2023-08-31 08:23:21,bug,Open dialog filter does not work for multiple extension files (e.g. .tar.gz) when working with Remote-SSH,"
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version: 1.77.3
- OS Version: Ubuntu 22.04.2 LTS
Steps to Reproduce:
1. set the open dialog options filter to accept .tar.gz files:
const openDialogOptions: vscode.OpenDialogOptions = {
canSelectFiles: true,
canSelectFolders: false,
canSelectMany: false,
openLabel: 'Select',
filters: { 'targz': ['tar.gz'] }
};
await vscode.window.showOpenDialog(openDialogOptions);
2. Run the extension in Remote-SSH mode and try to select a .tar.gz file
"
microsoft/vscode,2023-08-31 06:14:32,bug,Cannot select a path outside the workspace to save the file,"ADD ISSUE DESCRIPTION HERE
Version: 1.81.1
Commit: 6c3e3dba23e8fadc360aed75ce363ba185c49794
User Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36
Embedder: vscode.dev
![bug1](https://github.com/microsoft/vscode/assets/11473889/29f86a05-8305-4511-a1f1-3df582d21324)
Steps:
1. Open a repo on vscode.dev
2. Open a file in any directory
3. Double-click the blank space to create a new file and use the shortcut key to save it
4. You can find the path to save the file can directly click on the previous level
Although the path beyond the current workspace is not saved successfully, in theory, it should not be possible to choose such a path."
microsoft/vscode,2023-08-30 21:21:21,bug,Bracket colorization for markdown links with mismatched parenthesizes ,"![image](https://github.com/microsoft/vscode/assets/26030610/2fe78b57-ffcb-4c35-9a80-72e3cef54c97)
issue surfaced when testing: https://github.com/microsoft/vscode/issues/188867
It correctly displays with <> but the rightmost parenthesis is still highlighted red. Likely the bracket pair colorization isn't working properly in this case."
microsoft/vscode,2023-08-30 15:52:20,bug,change default value of `focusAfterRun` to be `none` for screen reader users,"Changing this behavior could be jarring, so instead, we will set it to `none` by default and include a hint about this in the terminal accessibility help dialog. "
microsoft/vscode,2023-08-30 07:44:13,bug,Unable to forward port due to tunnel limit ,"Testing #191540
* Using latest insiders
```
Version: 1.82.0-insider
Commit: 35be9bf683eace09796e59d54f1f225bbc3a7866
Date: 2023-08-30T06:26:06.760Z
Electron: 25.7.0
ElectronBuildId: 23434598
Chromium: 114.0.5735.289
Node.js: 18.15.0
V8: 11.4.183.29-electron.0
OS: Linux x64 6.2.0-31-generic snap
```
* Initiate forward port and after successful sign in flow, following error is seen
```
2023-08-30 16:40:00.726 [info] [forwarding] starting CLI
2023-08-30 16:40:00.810 [info] [forwarding] [2023-08-30 16:40:00] debug No code server tunnel found, creating new one
2023-08-30 16:40:00.810 [info] [forwarding] [2m[2023-08-30 16:40:00] trace Found token in keyring
2023-08-30 16:40:03.915 [info] [forwarding] [0m[2023-08-30 16:40:03] info Creating tunnel with the name: parallels-parallels-virtual-platform
2023-08-30 16:40:03.915 [info] [forwarding] [2m[2023-08-30 16:40:03] trace Found token in keyring
2023-08-30 16:40:06.699 [info] [forwarding] [0m[2m[2023-08-30 16:40:06] trace Found token in keyring
2023-08-30 16:40:07.720 [info] [forwarding] [0m[2m[2023-08-30 16:40:07] trace Tunnel limit hit, trying to recycle an old tunnel
2023-08-30 16:40:07.720 [info] [forwarding] [0m[2m[2023-08-30 16:40:07] trace Found token in keyring
2023-08-30 16:40:10.487 [info] [forwarding] [0m[2m[2023-08-30 16:40:10] trace No tunnels available to recycle
2023-08-30 16:40:10.489 [info] [forwarding] [0m[2023-08-30 16:40:10] error Could not create tunnel with name: parallels-parallels-virtual-platform
2023-08-30 16:40:10.489 [info] [forwarding] Reason: The request was denied because it would exceed the limit for 'TunnelsPerUserPerLocation' (5).
2023-08-30 16:40:10.492 [info] [forwarding] exited with code 1
```"
microsoft/vscode,2023-08-30 01:10:04,bug,Wrong extension name reported as installed when installing extension pack,"1. Install this vsix https://github.com/microsoft/vscode-jupyter/files/12454365/ms-toolsai-jupyter-hub-insiders.vsix.zip
2. :bug: VS Code shows this success notification
![Image](https://github.com/microsoft/vscode/assets/30305945/ddf59f13-c2cd-4b1f-a661-b11727e42d86)
"
microsoft/vscode,2023-08-29 21:34:35,bug,`x` button in accessibility help view triggers error,"Type: Bug
1. Run `Open accessibility help...`
2. Try clicking on the `x` in the help
**bug**
See the error:
```
workbench.desktop.main.js:sourcemap:765 Unable to write to User Settings because accessibility.verbosity.editor is not a registered configuration.
```
VS Code version: Code - Insiders 1.82.0-insider (Universal) (ebd67244fb2da33ab078bb2baa96106fda29f336, 2023-08-29T05:34:04.713Z)
OS version: Darwin x64 22.6.0
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz (16 x 2400)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|3, 9, 10|
|Memory (System)|32.00GB (0.92GB free)|
|Process Argv|--crash-reporter-id 48781ca2-1705-4f64-9bab-325055aab55d|
|Screen Reader|no|
|VM|0%|
"
microsoft/vscode,2023-08-29 20:07:40,bug,Linux: Notifications Accessible View only opens when focusing with mouse,"Testing #191344
After focusing the notification by keyboard I cannot open the Accessible View. Only after clicking into it with the mouse cursor does the Accessible View open.
(Today's Insiders build on Linux.)"
microsoft/vscode,2023-08-29 19:59:10,bug,Padding is off on the quick question `x` button,"Testing #191496
There's not gap between the x button and the right side, hovering it makes it really clear:
![Image](https://github.com/microsoft/vscode/assets/2193314/493238bc-980d-456c-ae2f-eacfd07f959f)
![Image](https://github.com/microsoft/vscode/assets/2193314/2ae6be83-c1d7-4e5c-9b29-8e62ac866e28)
![Image](https://github.com/microsoft/vscode/assets/2193314/44acbdd2-c61c-4ddd-bb44-e558e03f82ae)
"
microsoft/vscode,2023-08-29 18:08:46,bug,Use better codicon for action to disable verbosity hint,"We use an X now, which to me feels more like a 'dismiss accessible help' button. Maybe `bell-slash` would be more appropriate https://github.com/microsoft/vscode-codicons/blob/main/src/icons/bell-slash.svg, or even `circle-slash` https://github.com/microsoft/vscode-codicons/blob/main/src/icons/circle-slash.svg
![Image](https://github.com/microsoft/vscode/assets/30305945/dc0762b1-8ca1-40f0-9d57-74fed2d9edb9)
"
microsoft/vscode,2023-08-29 17:46:50,bug,Clicking into notebook markdown search result clears result,"Testing #191488
1. Create a notebook with a markdown cell
2. Close the notebook
3. Search for some text in the notebook markdown
4. Click on search result for the markdown content
**Bug**
The search results view is cleared
![Image](https://github.com/microsoft/vscode/assets/12821956/e8ff52f7-6a4c-4f67-8f9c-e1acba16d03a)
Maybe because the markdown cell is now in edit mode?"
microsoft/vscode,2023-08-29 17:25:47,bug,Copy image output doesn't work on Linux,"Testing #191502
1. Have a cell with an image
2. Copy the cell output
3. Paste in something like Gimp
4. 🐛 Nothing happens
Both `xclip` and Gimp don't see anything written to the clipboard. Copying images from a browser works so I know the clipboard can be written with images."
microsoft/vscode,2023-08-29 15:18:51,bug,"[Bug] When the window is downsized, icons to expand the hidden lines move to the left","This behavior is seen in the following GIF. Perhaps the icons should remain on the same position:
https://github.com/microsoft/vscode/assets/61460952/e344ce73-9696-4539-a3f8-bda01f4d6782
"
microsoft/vscode,2023-08-29 14:04:30,bug,Error: Throttler is disposed in serve-web,"Testing #191542
- VS Code Version:
Version: 1.82.0-insider
Commit: ebd67244fb2da33ab078bb2baa96106fda29f336
Date: 2023-08-29T05:03:17.701Z
Browser: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36
- OS Version: Arch Linux
Steps to Reproduce:
1. Download latest insider vscode cli
2. Run `code-insider serve-web` , open the browser and wait for vscode server to open
3. Open and close some workspaces and folders
4. The following error lines appear in terminal where cli is run:
```
[2023-08-29 16:57:20] info [ebd6724 stderr]: [16:57:20] Error: Throttler is disposed
[2023-08-29 16:57:20] info [ebd6724 stderr]: at c.queue (//cli/serve-web/ebd67244fb2da33ab078bb2baa96106fda29f336/out/vs/server/node/server.main.js:70:13632)
[2023-08-29 16:57:20] info [ebd6724 stderr]: at //cli/serve-web/ebd67244fb2da33ab078bb2baa96106fda29f336/out/vs/server/node/server.main.js:70:15216
[2023-08-29 16:57:20] info [ebd6724 stderr]: at //cli/serve-web/ebd67244fb2da33ab078bb2baa96106fda29f336/out/vs/server/node/server.main.js:70:14852
[2023-08-29 16:57:20] info [ebd6724 stderr]: at runNextTicks (node:internal/process/task_queues:60:5)
[2023-08-29 16:57:20] info [ebd6724 stderr]: at listOnTimeout (node:internal/timers:538:9)
[2023-08-29 16:57:20] info [ebd6724 stderr]: at process.processTimers (node:internal/timers:512:7)
```
"
microsoft/vscode,2023-08-29 13:33:28,bug,Settings feedback,"Testing #191527
Two polish items:
* the link to the setting does not seem to work
* you talk about ""making the focused view more obvious"" but that is not true, this feature only works for ""text editors"" and ""terminals"", so I would clarify that
![Image](https://github.com/microsoft/vscode/assets/900690/082040dc-9868-455e-ac17-82d5d77835ed)
"
microsoft/vscode,2023-08-29 13:23:07,bug,Editor placeholder (error case) does not dim,"Testing #191527
![Image](https://github.com/microsoft/vscode/assets/900690/1e98226a-3bbc-4f78-ac3b-9d8e913effd1)
"
microsoft/vscode,2023-08-29 13:21:06,bug,Keybindings editor does not dim,"Testing #191527
![Image](https://github.com/microsoft/vscode/assets/900690/530bc461-27b3-41b8-822b-1fac1aba955c)
"
microsoft/vscode,2023-08-29 13:11:54,bug,Remove `envCollectionOptions` from product.json,"```
Via 'product.json#extensionEnabledApiProposals' extension 'ms-python.python' wants API proposal 'envCollectionOptions' but that proposal DOES NOT EXIST. Likely, the proposal has been finalized (check 'vscode.d.ts') or was abandoned.
```"
microsoft/vscode,2023-08-29 12:11:18,bug,[Accessibility] Alt+F1 does not work in terminal,"
Type: Bug
This issue is found on Windows.
1. Open terminal.
2. Press Alt+F1.
It says:
> TypeError: Cannot read properties of undefined (reading 'getAriaLabel')
VS Code version: Code - Insiders 1.82.0-insider (ebd67244fb2da33ab078bb2baa96106fda29f336, 2023-08-29T05:32:55.965Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1145G7 @ 2.60GHz (8 x 2611)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.71GB (6.49GB free)|
|Process Argv|--crash-reporter-id b05b88e5-8894-4031-ae34-fa034ebddea9|
|Screen Reader|yes|
|VM|0%|
Extensions (89)
Extension|Author (truncated)|Version
---|---|---
android-dev-ext|ade|1.3.2
Bookmarks|ale|13.4.1
openscad|Ant|1.2.1
spellright|ban|3.0.116
zoterolatex|bna|0.4.1
mermaid-markdown-syntax-highlighting|bpr|1.5.2
doxdocgen|csc|1.4.0
vscode-markdownlint|Dav|0.51.0
vscode-eslint|dba|2.4.2
vscode-quick-select|dba|0.2.9
vscode-deno|den|3.20.0
gitlens|eam|14.2.1
EditorConfig|Edi|0.16.4
prettier-vscode|esb|10.1.0
vscode-google-translate|fun|1.4.13
codespaces|Git|1.14.16
copilot|Git|1.105.366
copilot-chat|Git|0.7.2023082902
remotehub|Git|0.60.0
vscode-github-actions|git|0.26.1
vscode-pull-request-github|Git|0.70.0
cslpreview|igo|0.2.2
easy-snippet|inu|0.6.3
path-autocomplete|ion|1.24.1
latex-workshop|Jam|9.13.4
lilypond-syntax|jea|0.1.1
scheme|jea|0.2.0
better-cpp-syntax|jef|1.17.2
google-search|kam|0.0.1
vscode-lua-format|Koi|1.3.8
lilypond-formatter|lhl|0.2.3
lilypond-pdf-preview|lhl|0.2.8
lilypond-snippets|lhl|0.1.1
vslilypond|lhl|1.7.3
zotero|mbl|0.1.10
git-graph|mhu|1.30.0
vscode-docker|ms-|1.26.0
black-formatter|ms-|2023.4.1
flake8|ms-|2023.6.0
isort|ms-|2023.11.12061012
python|ms-|2023.14.0
vscode-pylance|ms-|2023.8.40
jupyter|ms-|2023.7.1002162226
jupyter-keymap|ms-|1.1.2
jupyter-renderers|ms-|1.0.17
vscode-jupyter-cell-tags|ms-|0.1.8
vscode-jupyter-slideshow|ms-|0.1.5
remote-containers|ms-|0.307.0
remote-ssh|ms-|0.105.1
remote-ssh-edit|ms-|0.86.0
remote-wsl|ms-|0.81.0
vscode-remote-extensionpack|ms-|0.24.0
azure-repos|ms-|0.36.0
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
js-debug-nightly|ms-|2023.8.2817
powershell|ms-|2023.6.0
remote-repositories|ms-|0.38.1
vscode-github-issue-notebooks|ms-|0.0.129
vscode-selfhost-test-provider|ms-|0.3.16
vscode-serial-monitor|ms-|0.10.0
vsliveshare|ms-|1.0.5883
autodocstring|njp|0.6.1
pandocciter|not|0.10.3
shiny-python|Pos|0.1.2
shinyuieditor|pos|0.4.3
quarto|qua|1.95.1
r-debugger|RDe|0.5.4
java|red|1.21.0
vscode-xml|red|0.26.1
r|REd|2.8.1
multi-command|ryu|1.6.0
vscode-deepl|soe|1.0.6
abc-music|sof|0.4.0
lua|sum|3.7.0
latex-utilities|tec|0.4.10
cmake|twx|0.0.17
errorlens|use|3.13.0
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
vscode-arduino|vsc|0.6.0
vscode-java-debug|vsc|0.54.0
vscode-java-dependency|vsc|0.23.1
vscode-java-pack|vsc|0.25.13
vscode-java-test|vsc|0.39.1
vscode-maven|vsc|0.42.0
markdown-all-in-one|yzh|3.5.1
grammarly|znc|0.22.1
(1 theme extensions excluded)
A/B Experiments
```
vsliv695:30137379
vsins829:30139715
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627:30244334
vslsvsres303:30308271
pythontb:30258533
pythonptprofiler:30281269
vshan820:30294714
vscod805cf:30301675
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30404738
py29gd2263:30784851
vsclangdf:30492506
c4g48928:30535728
dsvsc012:30540252
pynewext54:30618038
a9j8j154:30646983
showlangstatbar:30737417
ecj1e332:30687743
pythonfmttext:30716741
fixshowwlkth:30771523
showindicator:30805243
pythongtdpath:30726887
i26e3531:30792625
gsofa:30797620
welcomedialog:30812478
pythonnosmt12:30779711
pythonidxpt:30768918
pythonnoceb:30776497
copilotsettingt:30808721
asynctok:30821568
dsvsc013:30777762
dsvsc014:30777825
diffeditorv2:30786206
pythonlinttype:30823781
pythonmpsinfo:30815194
dsvsc015:30821418
```
"
microsoft/vscode,2023-08-29 09:47:23,bug,Debug: Open Link created chrome config when only msedge is installed,"Testing #191545
I don't have Chrome installed, but Edge. If Open Link would detect which browsers are available, this would have worked out-of-the-box."
microsoft/vscode,2023-08-28 23:43:23,bug,quick search - file highlight decoration isn't showing up on search,"1. use quick search for something like `% dispose(` that is in the current file.
2. Notice that, while the picker is open, the matches aren't highlighted :bug:
You should see it like this on Dark Modern
![Image](https://github.com/microsoft/vscode/assets/31675041/c99d40d9-a1c7-4cfe-a9c4-f9610faec23b)
"
microsoft/vscode,2023-08-28 18:27:09,bug,exec server port forwarding fails for certain cases," I still have this issue in the latest version:
```
Version: 1.82.0-insider
Commit: 083fca132543aa91a7e1de2dc23857d70ea56dd3
Date: 2023-08-25T05:44:25.625Z (23 hrs ago)
```
To reproduce:
1. Use node script [from here](https://gist.github.com/Dador/746e9b7806f2d3b0f4e7d6913950fc00).
2. Do a request from client side:
```
head -c 10000 /dev/urandom | curl 'http://127.0.0.1:9090/' -X POST --data-binary @-
```
3. Server side receives only the first chunk:
```
on req
on data 3726
```
After this, the request gets stuck and never ends.
In my case, the issue mostly affects small requests. A 10kb request always has the issue, but a 100kb request seems to be consistently fine. Size of the response also seems to be important.
I connected to a distant server (with a ping of ~300 ms), but there doesn't seem to be any problem with the connection itself.
_Originally posted by @Dador in https://github.com/microsoft/vscode/issues/190859#issuecomment-1694181472_
---
I have discovered this to be an issue in the CLI or SDK's compression handling. It seems like something is not flushing or decompressing completely. Disabling connection compression fixes, it but this is not a good solution."
microsoft/vscode,2023-08-28 17:39:17,bug,Context menu for Quick Search appearing in search view,"1. Right-click in the search view.
2. See this option, which brings you to the quick search :bug: This shouldn't be here.
![Image](https://github.com/microsoft/vscode/assets/31675041/95a7c11a-0dbe-4d01-91e7-abde633e8e55)
"
microsoft/vscode,2023-08-27 23:14:27,bug,[Bug] Tela preta no terminal do VS Code ,"Type: Bug
o meu terminal do vscode ficou todo preto derrepente, ele esta normal e quando fui abri-lo estava preto, nao consigo resolver e nao posso voltar a minha rotina por causa disso.
My vscode terminal suddenly went all black, is that normal and when I opened it was black, I can't resolve it and I can't get back to my routine because of this.
![2023-08-23](https://github.com/microsoft/vscode/assets/90806102/43143f05-3de3-445d-9bba-bf31db8cb0ca)
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|AMD Ryzen 5 5600X 6-Core Processor (12 x 3700)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.89GB (6.67GB free)|
|Process Argv|--crash-reporter-id 74ce9143-439b-4406-a24f-d6f86ccdd59a|
|Screen Reader|no|
|VM|0%|
Extensions (8)
Extension|Author (truncated)|Version
---|---|---
turbo-console-log|Cha|2.9.6
gitlens|eam|14.2.1
auto-rename-tag|for|0.1.10
copilot|Git|1.105.350
prettify-json|moh|0.0.3
vscode-language-pack-pt-BR|MS-|1.81.2023081609
vscode-thunder-client|ran|2.10.5
vscode-icons|vsc|12.5.0
(1 theme extensions excluded)
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263cf:30335440
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593cf:30376535
pythonvs932:30410667
py29gd2263:30792226
vsclangdf:30486550
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vsccc:30803844
282f8724:30602487
89544117:30613380
showlangstatbar:30737416
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
gsofb:30804716
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
```
"
microsoft/vscode,2023-08-25 16:55:53,bug,Terminal multiple action icons overlap,"
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version: Insiders
- OS Version: macOS
![Image](https://github.com/microsoft/vscode/assets/876920/421798b0-dd84-4399-b7eb-ceab65ffdb0a)
"
microsoft/vscode,2023-08-25 16:04:19,bug,Moved Code Detection: Don't Detect Moves When Nothing Moved,"![Image](https://github.com/microsoft/vscode/assets/2931520/4d930f3a-2d6b-4396-a18d-2ecbb474392d)
"
microsoft/vscode,2023-08-25 07:41:47,bug,The markdown link in TEST RESULTS is hard to read,"
Does this issue occur when all extensions are disabled?: Yes/No
Version: 1.82.0-insider (user setup)
Commit: a0377f0c51dbb2d3188565cdf35e89929f864e65
Date: 2023-08-24T05:32:35.024Z
Electron: 25.5.0
ElectronBuildId: 23084831
Chromium: 114.0.5735.289
Node.js: 18.15.0
V8: 11.4.183.29-electron.0
OS: Windows_NT x64 10.0.19045
![image](https://github.com/microsoft/vscode/assets/6193897/a3fdd456-0169-4baf-8ea9-22a48da54c60)
Especially in the dark theme, the font color makes it difficult to read.
"
microsoft/vscode,2023-08-24 22:26:31,bug,Search view not showing warnings,"1. Open search view and search for something that isn't in your workspace. Enable `Use Exclude Files and Ignore Files` button that is within the files to exclude input.
2. You result should say something like ""No results found. Review your settings for configured ...."", but it says ""0 results in 0 files"".
Buggy behavior:
![Image](https://github.com/microsoft/vscode/assets/31675041/f95c76e5-6272-42b7-8413-4aab010a7992)
Correct behavior:
![Image](https://github.com/microsoft/vscode/assets/31675041/f27c584e-061a-4545-b8a2-4dc1a4a4694b)
"
microsoft/vscode,2023-09-27 19:25:34,feature,Smooth scrolling on tabs,"
I'd like to have smooth scrolling for window tabs (when you scroll horizontally across open tabs up top)"
microsoft/vscode,2023-09-25 16:48:45,feature,Introduce a concept of similar commands in Core,We can use an implementation of TF-IDF to provide quick local similarity search for commands.
microsoft/vscode,2023-09-15 22:49:07,feature,Allow text in error message popups to be copied or at least selected,"
The possibility to copy the message in an error popup (the one with a red X in a circle. This would be nice for:
* searching error messages
* getting key info to resolve the error oneself
Below is my specific usecase:
![Untitled](https://github.com/microsoft/vscode/assets/4111/f2d3a726-6c89-4d2d-bba6-aa2feb345449)
The server I work on does not have ftp access, so I need to download that file on my workstation and scp it onto the server.
As it is I cannot select that text to copy the url, so I'm left with typing a 100 character ftp url by hand. Which is painful enough that I'm writing this issue."
microsoft/vscode,2023-09-14 07:51:02,feature,Add a 'wordwise' option for the diff inline view,"
As kind of requested in issue #103285 and visible in the mentioned issues attached image I'd like the option to only display the changed part of the line.
The idear is to not have a read line and a green line underneeth each other but rather a single line where only the parts are highlighted that have acually changed.
Maybe the option could be added to the ""More Actions..."" drop down menu as ""Inline Word View"", ""Wordwise View"" or ""Word Diff View""
"
microsoft/vscode,2023-09-13 15:05:18,feature,Extension tree that use `vscode.open` and `vscode.diff` commands should respect enter vs. space,"When discussing with @meganrogge how we're going to improve accessibility for GitHub Pull Requests and Issues when opening a PR description we found that the built in Extensions view does the following:
- Arrow keys are used to navigate between list/tree items
- Enter opens the extension item and moves focus to the extension description webview editor
- Space opens the extension item and keeps focus in the tree
Extension trees don't work the same way:
- Arrow keys are used to navigate between list/tree items (same as above)
- Both enter and space run the tree item command and keep focus in the tree
We already have special handling for `vscode.open` and `vscode.diff` command run from the tree view so that we can keep things ctrl+click as open to the side. We should also handle enter and space as the Extensions view does."
microsoft/vscode,2023-09-11 09:59:46,feature,Diff Editor: Mark the full block as being replaced,"## Description
Mark the full block as being replaced
![Image](https://github.com/microsoft/vscode/assets/2931520/976bf5e8-dc2b-42d4-9fd7-e1aeec87a650)
## Playground Example
[Monaco Editor Playground Repro](https://microsoft.github.io/monaco-editor/playground.html?source=v0.42.0-dev-20230911#XQAAAAItBgAAAAAAAABBqQkHQ5NjdMeMm-jY7SIQ9S7DNlzs5W-mwj0fe1ZCDRFc9ws9XQE0SJE1jc2VKxhaLFIw9vEWdz-byd2-yxN8X4gTjZnHx_R_ugxZ6xrWgz8k0rlYVGZfwQ3QjH2GDw7bI_8paqOoYtw1AXFwYvJQ2FfcOWSRahkBHGIpvphFsp0dv_kujP7FSM9eFraIq3yjHeuGUpEofHF_Yz0qHNPPGbV1wfm52_Xt4W3OK2ChtRL6Eixci58SKGibR-tiOIpi4GiIjKYunEEy_De2QgmQzTrvm9ZcLoL07vWl7EwJ0YRaZbNIyb4__bTWr7m1zwDI-050XjyflhFYiKy7DNB3IH3VYJ0gV00H2dKdptYHQQUD2fJoaPw7TA95hnEIo2KQllGMkZBjhzxSyUfLQZznlY-eDiyxYyoQgH8Y0BZMSPEBCbKPu0DDk1naT_20zIUJ0PMcLYf6KYIHO08EDzvQqJwuN_dxt6k9ZkKq3QXlmVr0hMnUo7CPnMrtllKe-oSFihNiofQ8a0FYt3QfuSWw9TQ2JGwPnlDRJv5AwcHcX2rhgrcniUn7eMSoPWTLJFHGkwYv68VHwKfSFcIoJh7X3nmm9Hx_t8i-OgJ29wGuRBmYBgbJs9wIFgMSz9Ihp-BqmxQDqJ7bpNAfkwjGanqJYAIetU6mXrIpeyNKZHg3TP1Qd-3jqj20kAIlRCXfrG3O1W7a3y6BbtpFRIULsrzZTIougs_8hID4bv2o4lmt_62tZBQHwXEyEA-oJTx8iICIC3cPqxv3g-2eBthtZFYw6Fa6uE5BPANXUCvPKZoeDIHJUY-w7jIBgZPOz6B-qxfTBaexmNjFwOiO93hwlKdmrjkUymWI0paUkEnHRMP_6jBQt3P1DuENKLdIQ_QA0nTeLWxNmmUT1T8Dkg828N-vk10b9LjxNZxut16McLml7yq-WZiam0M07Co1e8CVuEWLUDJgLMtsdkPm7FuvgpcO7x8x3u9Mo6Qnf8-lhK_s3xdcuRv-b4Vd) (click on ""compare withlatest dev"" to verify a future bug-fix)
"
microsoft/vscode,2023-09-08 12:48:03,feature,Debug toolbar and CC,"This PR adds the option to let the debug toolbar show in the command center. While it's there, it also updates the background color
https://github.com/microsoft/vscode/assets/1794099/cb85e68e-747a-4181-8a8e-2bfd658bb5ce
"
microsoft/vscode,2023-09-07 07:38:19,feature,Improve Comments accessibility ,"- [x] Indicate when a document has commentable ranges
- Aria status when a document is opened
- [x] Add keyboard shortcut for adding a comment
- [x] Provide commands to go to next and previous commentable range
- [x] Add an accessible help menu to the comment widget
- Esc dismisses widget
- Lists go to next/previous commentable range commands + keyboard shortcuts
- Lists command and keyboard shortcut to add a comment
- Lists keyboard shortcut to execute the comment primary action and what the primary action is
- [x] The Comments view should respect the enter/space = reveal+focus/reveal
- [x] Make the comment widget toolbar always visible when in screen reader mode
"
microsoft/vscode,2023-09-06 07:49:11,feature,Improve Settings Sync diagnostics tooling,"The settings sync tooling that allows to inspect what happened must be improved. I do understand that bugs like https://github.com/microsoft/vscode/issues/192267 happen but I see myself being unable to file good issues (and as a consequence I observe that things don't get better). The sync view with all its viewlets, diff editors, and logs is overwhelming, esp when you must use in an unpleasant situation. There should be a single command which collects all the information needed (it can ask for my input) so that it can create a (zip)-file which allows the respective owners to investigate issues"
microsoft/vscode,2023-09-05 17:54:42,feature,"Add functionality that copies command and output, as opposed to just copying command, or just copying output","
"
microsoft/vscode,2023-08-31 22:45:48,feature,"[FR] Notebook, Markdown cells: please allow to generate a line break using '\\n' and/or ' ' in the link tooltips","
Hello, 👋
While I don't think it's ""standard"", some Markdown renderers show **link tooltips on multiple lines** when **'\\n'** or '**\\ '** are used in the link tooltip code. In some cases they require that it be preceded by two spaces.
Where support exists, the tooltips in the examples I add below are seen on multiple lines, **however that's not the case in VSCode's Markdown cells**, and for that matter on GitHub.
Code:
```markdown
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ 'microsoft/vscode: \\nVisual Studio Code')
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ 'microsoft/vscode: \\nVisual Studio Code')
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ 'microsoft/vscode: Visual Studio Code')
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ 'microsoft/vscode: Visual Studio Code')
```
Example:
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ 'microsoft/vscode: \\nVisual Studio Code')
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ 'microsoft/vscode: \\nVisual Studio Code')
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ 'microsoft/vscode: Visual Studio Code')
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ 'microsoft/vscode: Visual Studio Code')
The only way I've found to achieve what I'm aiming for is to write the tooltip spreading multiple lines but I'd want to avoid having to resort to that method.
Below I show an example of the successful case with the unwanted method:
Code:
```markdown
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ ' microsoft/vscode:
Visual Studio Code
GitHub')
```
Example:
[microsoft/vscode: Visual Studio Code](https://github.com/microsoft/vscode/ ' microsoft/vscode:
Visual Studio Code
GitHub')
Please add support for **'\\n'** and/or '**\\ '** in **link tooltips**.
Kind regards.
Claudio Salvio
P.S.: 🙏 Thank you for the useful work you are doing in the field of notebook support in VSCode!
"
microsoft/vscode,2023-08-31 13:14:11,feature,Does package.json configuration properties pattern regex support unicode?,"I have a configuration property in my extension's package.json with the following definition:
```json
""objectscript.unitTest.autoload.folder"": {
""markdownDescription"": ""When running client-side test classes, automatically load the contents of sub-directories with this name. See the [%UnitTest /autoload qualifier documentation](https://docs.intersystems.com/irislatest/csp/documatic/%25CSP.Documatic.cls?LIBRARY=%25SYS&CLASSNAME=%25UnitTest.Manager#RunTest) for details."",
""type"": ""string"",
""default"": ""_autoload"",
""scope"": ""resource"",
""pattern"": ""^[\\\\p{L}\\\\d_. -]*$""
}
```
That regex is a valid unicode regex that works in the node REPL, but it doesn't work properly in the VS Code settings editor:
Are these patterns evaluated with the unicode flag on? If not, can that feature be added or enabled?
"
microsoft/vscode,2023-08-30 18:02:22,feature,Add new scope to VS Code settings for only workspace configurable,"
I was hoping we could add a new scope to VS Code settings. Currently, VS Code settings have the following scopes:
![image](https://github.com/microsoft/vscode/assets/5290572/1c1dfac4-1ea7-4e4b-a494-845de31ca18b)
None of these scopes describe a setting that is only workspace configurable. We were hoping we could have a scope that would only show up in the workspace and not in Global and User settings.
For context as to why we want this: we use vscode settings to help save default Azure resources that users want to deploy that specific project to. This setting doesn't really make sense to show as a user setting since what resource you want to deploy to is generally always going to be tied to the workspace/project.
We're worried that users may end up in a scenario where they have accidentally altered their user setting rather than their workspace setting, and now they will deploy every new project to whatever that resource is"
microsoft/vscode,2023-08-29 01:22:30,feature,Enable Command Center by default,We've seen great responses and results of testing with experimentation that the command center is a net-positive feature. So let's turn it on my default.
microsoft/vscode,2023-08-28 18:19:34,feature,Rename `--disable-keytar` to `--disable-persisted-secrets` or similar,"Follow up from https://github.com/microsoft/vscode/issues/188432
We don't use `keytar` anymore, so we should rename this flag to something more generic:
* `--disable-persisted-secrets`
* `--disable-secrets-storage`"
microsoft/vscode,2023-08-28 17:44:57,feature,Add Quick Chat to the Command Center,"If a Chat provider is contributed, we should offer a way to start a Quick Chat session from the Command Center so that Quick Chat is easier to discover."
microsoft/vscode,2023-08-25 01:42:20,feature,testing.openTesting is not working for testing explorer now,"Version: 1.82.0-insider (user setup)
Commit: a0377f0c51dbb2d3188565cdf35e89929f864e65
Date: 2023-08-24T05:32:35.024Z
Electron: 25.5.0
ElectronBuildId: 23084831
Chromium: 114.0.5735.289
Node.js: 18.15.0
V8: 11.4.183.29-electron.0
OS: Windows_NT x64 10.0.19045
Noticed that this setting is now controlling the `TEST RESULT` panel. Some users hoping that this setting can still work for the testing explorer: https://github.com/microsoft/vscode-java-test/issues/1597"
microsoft/vscode,2023-08-24 15:21:27,feature,Support Profile Inheritance,"Can something like inheritance or import be added?
```plain text
global
├── web dev
│ ├── vue
│ ├── react
│ └── vanilla
├── python
│ ├── data mining
│ └── deep learning
└── c/cpp
```
When adding a plugin to `global`, it means that a plugin is installed globally.
When certain settings are added in `web dev`, this applies to all web development.
_Originally posted by @tbontb-iaq in https://github.com/microsoft/vscode/issues/190856#issuecomment-1685975772_
"
microsoft/vscode,2023-08-23 15:23:11,feature,Add quick search to command center,"As mentioned in title. add the text quick access menu to this list:
![Image](https://github.com/microsoft/vscode/assets/31675041/66639311-3bce-49fa-aba2-fe4fc8478a07)
"
microsoft/vscode,2023-08-22 15:55:05,feature,builtin command executeInlineValueProvider does not exist,"
Does this issue occur when all extensions are disabled?: Yes
- VS Code Version: 1.81.1
- OS Version: any
Steps to Reproduce:
1. in extension code, call
vscode.commands.executeCommand('vscode.executeInlineValueProvider',vscode.Uri.parse('c:/foo'),new vscode.Range(0, 0, 1, 1));
2. returned promise is rejected with Error: command '_executeInlineValueProvider' not found
I am developing an extension, and was not interested in this specific command, but noticed it was missing when I was looking for an example of a debug related built-in command.
I have no use for the command, I am just reporting it, because its in the the built-in commands wiki.
"
microsoft/vscode,2023-08-22 10:13:37,feature,Diff Editor Improve Alignment,"![Image](https://github.com/microsoft/vscode/assets/2931520/f52a3453-adf2-4e6f-9d81-0b42b5016000)
[Repro](https://microsoft.github.io/monaco-editor/playground.html?source=v0.42.0-dev-20230809#XQAAAALoCgAAAAAAAABBqQkHQ5NjdMjwa-jY7SIQ9S7DNlzs5W-mwj0fe1ZCDRFc9ws9XQE0SJE1jc2VKxhaLFIw9vEWSxW3yscxJt3-RhsAFQjfi1GSTeRDChAtr8RC8jhkfedYn4TxJcJIcii5J3w75DBaWbOhZVfaDoM0qNlMfjIIi_pOZYLX8hUC9KEglwGK1Zx62ZzqjEL1FXgB-5hVRcUYpIQxJHVMk6DsoODVXD_zW6G5yAc4hAJqEAZiCsze4ySI8URz0mzOCEjiILxd8pTxZp9fnkMXF3JYiuv9w5V9DLi0uL-Vg-L4Z5BmibEgIuj0cg2_6Felz9MsVJMDIXp0xAYYsBjDBxGZ9XiLNhBz8heIt936L8CFCazGpFfLfp2hZjM67tOfTNJv5oplz5m4sJxZdkgaSkrFbOkDrwF11izXX-nB3yKVHp2YGXRlENt-M3p_8GWiJPFPVJRkug1XByBxx8ipiy5Z8CK2pN456VLQePZS4L_-mvRdZdnP8mIPGds4dCZpxenxMmW2sbRFAg6aRz7xd_WDzjqrrAzAQo0-_ZiQrYRWzYPeEcUOu_8AgiXyc9Bte59oPLe6LBZfiodJa66knqiOqwGUZyDTRubvumWT3UyaNXohUSgb_HsKuAAq55vJL0BlxKInzVT4c3E4zLlQt8PKHtN4GzeG_q4pmNLd5j9F3uTsQkOB9A2itpoNf3qENHDmz7QzIijQMqK323gfqkekmjiWaRYBjuTyrR6jvCxl8W27RYvMw0Q88JAM-SQ5LXkxyOo0GosZXHcv8TRS1i8q5ubBrPhCeTxNvjXOFSDv6VfbRGuATvwVvRQ91sklqEZQCq-pDsp2wobp--Fa4gyE4TdLzVscgceidCjSbcIjUjS_JH5Maet2R6aj6K2Wfg_0ap_NJi-93Nci5kmOip1-94ydbe7dhpZWD22Wu13KxWVGvTdE6R56OweQbtCbIGUDhz3UqmIhTI72Egol-SY_XdaFbR6559gLxD-t8E-kh7wqMCjZsxrU80JB0w94IVkeCntt7S24Q5j8T7Gl00qqagmRN2VcK0f1qhG8uLj1TQpcaPMTAImlSk5w2NY8OGCx_eNPcmB5vCbQx81x9J2uCMpjxNr2GF8xNto9ijkdQNofRKW9VpCBM9jpAwX8kBP6aLaBMO1tO38Hg4RukmjZyA1XQ2PBVhSBgauK8dIk0wLPYqLMTmwwPuwHZvpE_94lDtM)
The const line should be aligned (try latest dev)."
microsoft/vscode,2023-08-21 14:20:33,feature,Diff Editor v2: setting for number of expanded lines when clicking top/bot,"There is now `diffEditor.hideUnchangedRegions.revealLineCount`.
Verification steps:
Set it to a value (e.g. 7), open a diff where there are many unchanged lines, enable collapsing unchanged lines (click the map symbol in the editor toolbar) and click the border of the folded lines indicator. Notice that the specified amount of lines are revealed."
microsoft/vscode,2023-08-20 08:54:02,feature,Integrated terminal code folding,"
when running a Node.js script that logs extensive data, it becomes challenging to find and focus on specific sections of the output. Code folding in the terminal would significantly improve the user experience and productivity when dealing with such situations.
This feature would be a valuable addition to VS Code, enhancing its capabilities for working with the integrated terminal."
microsoft/vscode,2023-08-18 03:00:16,feature,[Accessibility] consider adding default keybinding to accept completion in inline accessible view,"
Type: Feature Request
In Accessible View (Alt+F2) for inline suggestion, users can activate some action buttons via keybindings, such as Alt+F6, Alt+[, Alt+]. However, ""Accept Completion"" does not have its default keybinding so users have to press Shift+Tab and hit Enter. Please add default keybinding for this, such as Ctrl+/ or Ctrl+Enter to accept the current suggestion.
VS Code version: Code - Insiders 1.82.0-insider (ccb95fd921349023027a0df25ed291b0992b9a18, 2023-08-17T05:33:29.141Z)
OS version: Windows_NT x64 10.0.22621
Modes:
"
microsoft/vscode,2023-08-15 22:46:05,feature,Add SmartSelect / CamelHumps caret browsing for the F2 rename symbol feature,"
Add SmartSelect / CamelHumps caret browsing for the F2 rename symbol feature"
microsoft/vscode,2023-08-14 15:44:14,feature,Use a single action bar,"![image](https://github.com/microsoft/vscode/assets/2931520/17e56b44-e3f4-4eea-9b15-6a496d1b14e1)
These are currently individual action bars, which makes navigation harder.
"
microsoft/vscode,2023-08-13 20:59:07,feature,DOM renderer does not show selection over regular background colors,"When gpuAcceleration = 'off', the yellow text background should be blue here:
![image](https://user-images.githubusercontent.com/2193314/188175658-0b95600f-70fb-4702-bcfc-de788b0f8b54.png)
"
microsoft/vscode,2023-08-13 09:32:52,feature,Support GNU style file:line.column links,"The [Sail compiler]() outputs file:line links that follow [this GNU convention](https://www.gnu.org/prep/standards/html_node/Errors.html):
```
Warning: Redundant case sail-riscv/model/riscv_sys_control.sail:206.6-7:
206 | _ => false
| ^
```
This doesn't currently work in VSCode. It does support a very wide range of formats and I don't recall ever seeing this format before (even from GNU tools) so I suspect nobody else uses it. Nonetheless it's easy to add support in VSCode.
See https://github.com/rems-project/sail/issues/287"
microsoft/vscode,2023-08-11 12:01:05,feature,Adopt xterm.js' cursorStyleInactive option,See https://github.com/xtermjs/xterm.js/issues/4566
microsoft/vscode,2023-08-09 21:10:03,feature,Placeholder text for Ports tab should be different for local port forwarding,"
Currently, it says ""No forwarded ports. Forward a port to access your running services locally."".
But when using the Ports tab locally (no remote), this doesn't have the same meaning because I can already access the port locally.
The suggestion is to change the text in the local port forwarding case to show something more meaningful.
Just as an example, something like ""No forwarded ports. Forward a port to securely access your locally running services over the Internet."""
microsoft/vscode,2023-08-08 21:37:51,feature,add title bar to accessible view / help,to align with quickpick
microsoft/vscode,2023-08-08 18:52:11,feature,Explore showing workspace search results in the command center,"We want to start to explore what it'd be like to have a quickpick that shows text results.
For example, if we search `activate` in the `vscode-livepreview` repo, we don't get anything because it isn't in any filenames.
![Image](https://github.com/microsoft/vscode/assets/31675041/ca4ab35f-5dd2-4071-867f-e63e389b15bb)
It would be nice if vscode knew that `activate` was in the `extension.ts` file and showed that file.
![Image](https://github.com/microsoft/vscode/assets/31675041/68acd14f-5005-4837-96ef-5517854e69ce)
"
microsoft/vscode,2023-08-06 16:34:24,feature,Ignore a Collapsed Code Block While Copying,"
Does this issue occur when all extensions are disabled?: Yes
- VS Code Version: 1.80.1 (Universal)
- OS Version: Darwin arm64 22.6.0
I'm working on a Vue.js project, and I have a rather large array of data within one of my components that takes up a lot of space in my file. I often find myself wanting to copy various parts of the code from this component, but I don't want this array to be copied.
In VS Code, I have the ability to collapse this array, but when I try to copy the code, the collapsed array is still being copied. Is this intended behaviour? Is there any way to tell the editor to ignore the collapsed code block when copying?
Steps to Reproduce:
1. copy-paste the array to vscode,
2. collapse the array in the editor
3. copy the collapsed array
4. paste the collapsed array anywhere else
5. the collapsed array will appear full size
```
const categoryTitles = {
'choroby-wewnetrzne': 'Choroby Wewnętrzne',
'chirurgia': 'Chirurgia Ogólna',
'pediatria': 'Pediatria',
'poloznictwo-ginekologia': 'Położnictwo i Ginekologia',
};
```
"
microsoft/vscode,2023-08-05 10:14:20,feature,Allow to theme foreground color of status bar entries on hover,"Hi. i am creating a theme pack for VSCode.
I have discovered that there is a ` ""statusBarItem.hoverBackground"" ` to customize the background color of a status bar hovered item but there is no option to customize the color of the hovered item foreground color. Something like ` ""statusBarItem.hoverForeground"" `
![image](https://github.com/microsoft/vscode/assets/55595063/b014bcce-fc58-4979-a332-b31c9b42b4f5)
"
microsoft/vscode,2023-08-04 19:35:26,feature,Improve discoverability of accessibility verbosity settings,"I imagine the accessible view hints (and others) that we have around the workbench are annoying if a user doesn't know they can be disabled. Should we include info about disabling at the end of the hint?
For example, ""Use Tab+Shift to access the terminal accessible buffer, disable this hint with the `accessibility.verbosity.terminal` setting
cc @jooyoungseo, @rperez030
Someone just emailed me that they didn't know where this was coming from and were annoyed by it."
microsoft/vscode,2023-08-03 09:07:27,feature,Diff Editor: Detect Exact Moves,"When a code fragment of at least 3 lines of code is deleted somewhere in inserted somewhere else without modifications, this move should be detected.
[In this example, moves should be detected](https://microsoft.github.io/monaco-editor/playground.html?source=v0.41.0-dev.20230727#XQAAAAIj1QAAAAAAAABBqQkHQ5NjdMjwa-jY7SIQ9S7DNlzs5W-mwj0fe1ZCDRFc9ws9XQE0SJE1jc2VKxhaLFIw9vEWSxW3ysc4hZTZxMLkt8pHA7F_Pn1keU5nB3shGxImSeqJguG37-pomZNyYs0J2m0N7JXtK-OyEPwJ1dYkOSSBjz2kEiCnrJx3y5JEJ3kbjVrha1Np0AoQHUcFCC3E7snbnIdHi_PI-DiwUE-UHuhXjgIOfC-XShJXAPebJffUWMuvtD4J9B2bqf6rI6DZxSsz1unP3F8EO_JAh1_5SruICPktWRHEK2dwadDpLE9H9ZLCBKuEzIOmPaFDnleVGjd2iBW0SXhb5LANM2jofu9wgO_BI5qYXXPagoD65bxJtbaEQmL1i9HOAlfCSKelr-VvRJHpknjKMWfrCmyVBPdP1OiRhnMQQ6UjXzZ4jFklfdGqDwtJFXrLzTS7DAEJOnN49eeCJhCLhTOiWg8Y_Q6vftgrWT935ne6Stq86qaoFLJtNc7xvq8QYr0HWcOcAZMfFFDJ2FVmaB4l9Ktfmv1y4a8MkR-tKtN_ZXC-Vfo6Fi3W35N3O1zKzN-6dv64O_F8FK8fzy5EBPOjmqwu8MbYtaMP1EZkKlTsKLfRzLRJ1CNhp594NyfX3gCk9cu_Jq_KTg4O3jnaR7-caZAnYmwH7sEajIc0H3awV-yjKQ63WXG5103pCplkn5i6nEqeQGR4rtqG46jNc96A3QfbGzlk5VDcRRsIIIKTybjZKqdyMXlJAUqkaFnJgcR7qDGGaLiIWgIYpIMxjJcnCNu3xkG6ZC72f0uW82awT7u9zAJeqeboYjoNzhWHx0abdTt2YWcMJ3aG8oWWuxY25bL8zDdv1bfpIS2iqUDVKo1ulYgezMYKGF5jd7i-LMc2LXT5Ey0ajaC-p01ieDelYUqs-5Tip8oevv5uc6P5xOqUZDcU1PfzAmpx4v8cK3E_v-uFHMSeSjx4Z_zNV9QgvhkbHtRrKElCwrNzFEQzhDQfPzKVp84-7YhaNaem2z3n5Av053O8f4uZ1keKN9N3zzSUZcoXMDItPX3WJBFAyFY4To4IjiVF5IWMZ_5CYUFCyE9a2Vy1BG-BRYh-ru_fFYr2anj9Hf2c0qIy75-9Hg1_V3LX_6MyCwMfBJmm-pblo_LISHPWq8SLtP5cXAl-5r1knaAV3lIf6NR9XTT4fAFKdkgBan5gb1PKDhPH0SnLLJ7Jh4xfj7bi-iXMeFbnrUmM--KZ5AXCpMQKm1_31CNdGlwQCjOxY0O6Jk_9YiCHY-VQCBR52pvqKwZWdz1fFTSAz7oHFBybsp6h4ANBCiOGfPZjbFTEwGt_WC_D8dxRvqjPNIk2fd7fFPHAx4xL6Bzc1_Avd80sz8u3QTFIlxqJp01UvIrH0EjiYjZ81NywmZzZ7EItgk4dLL8UUbirjFCenOQT69xw47RZM2K2l6TwRyFimb8_-jzhDnYCJ1vxxPMBN4bzPlQVc6xThT46xbc4bHXkCufRWOgbji07yZ7imn32RHY0tAAOa_-hmDfK5Dt8CxpikBLIy1jMMPvR9xkJdSjnAXok7yjtfivRniQMd57Acdcgewh68uVZfzWP31X9-rzL89epwMJMCu761tGMB5sbjS9LidlgYTkRTwV-G4Kijju-emgynAyw95PDQQ7qbC4lnZ0d7hTaDAKdIBTk_VOJWRKCUBeEhSlPIMKMcfQZR5RSgzywcU2XCqxCajSEynkuQjkTblwWxdFfbRkT4shlRLdsx28NVu1qwgj7ssOVofAXAsyG9vZ9fKc6nbuY-bOJakSUI7sVNVcy2IqEXjQMEjn-Nw7y8BWIdA5w-u_jhhnQ3awHW4LFwD66oYCiwxW59TBjBLEzTlMLvaWGLvokMe0nTZcRQpq5-brG-8P1TZAWu3w6HshpHb9XGABxs5AgLMaxJQ6v9HqyTvEFsq5kyHsAbOziJ-Kvxiz2OJ0Mtf62aQKqV9USkc1G5NhsecC5GkIWhSu8B1cJsycac23QsF8xphdTvP_JjefnhTurAuwBtjgL8NuUUFDcn57cTlLZXLatooMEUsYfSMQE2tfw0M1GVxdpXp-9mhfWTWVHDJuFF6pNmGKPF54L4oyMT-z7XG3ySgD8H_CT6Kl2Njzjax2R7urThrTtKSZYv4KIomG5jnmFQK35Pd5QF7d4cS8DfakyuqCecN2wDcIhiAsLS0abTwP6tJMlmPb3si2xAJvuLCdVKKJTTclUC2tuSzir-U3V7U6B54JC3eZykowNfxddU2VbXr89AFJpDfzY37d6O4E9YuJ5qGoS3WjzaceC9JosaLrTXoMglIFqLLlIFeYOmPbU6Dk25zS-HlZw0Lw2ZiL290RrKlBrnHomBt7YafPFGc3dPng735sIA91DyE14C8eRsWp_CMcEqtkTXHEMq-RGPZXzqfk7RE4-xLpo56yrSsZ5BJDoRS-pFFuoUgdbUZ0jT51yhtqE7SqY3VF86XvVfrNdyBI9xnNKFDRnz1RMOr9OZwztOTTpsnOSGouE5NhLhGaCoAIH2wzrKR-rR5wT0LliP_0RxpN9730KftNs8EIm9P-_mgPKeiHGqqQ9QnhvOS4PJAJh7MRrwy-h5nf1WAQ96kr4WkN5rIyFR2czrFatC-IXK4UxjBmX0ZaW3X4ntUCcfo0xIYuwAOu2zWDssC8gharo1fmCqGe9OF1Z4kTKMrzvcxsqJDa298jmLkI_oCSVOrWEblDeDkXqLOmFA4wR7PLkSRAYW-WDkEbBRCRRndMYI5bs5YxTS9fIzeZg4k7pX_vqOBGNKqYHHpKNb_DokdcCsXvRHCon7ztIXTTFBC3APKBF56sQzpKMZymLvSy1x-wRDE-p-xBL45EcWMapNjQEMVOB3Q7KmXQSX0gtpnT06RJE2anszcwTojKYTtGQ4Z1UoyyeIreGqyz8Vuf_3RpkaFXBBOauoW9Tt2Ck2kfhqKVKJuKG5c1tDsZkTZ-8ei2sP8ZFjUY1og23yX19ROBdAH7Kpw_1v_Abj5qia44uQZbM-QAody23g3mEendM4UFCcCC3ZVpQyZjy8mgcXlDzdV7ZmK8e3rkgKR7BXOYhxhIf1i3k6bdNH3XU-hZofjY0ctzQ23SMrzQH4NghFlsWAb1Bz2OQbmjAM_8IDIy_Mgb8LrHWF_XQeRutQOTpfP2ZURzIBg8444b51pyaEX4gpKB_3yBYR0sIc-8sf67I36B9DVotTy9cc3WoNpHw3zn-QFoA-t8wetwkx1ul04TyZIZW0FWhHyJp-LMP1n93f-lQ7nIYk8QX_R-Ww4chGmzhrm1oL9YKTawtFZjH95aT_ToYWw6anfHyCa0VthCwi90IOuhxO_EXW8cRqP_1KMmsQmwvi5Xxk9TjtMWMELQYww9L3gml56ncQtnuXDw9IE3O0n6TwlwCrCv0xlBADfCXOkvBJfgoLCXAfs97G_qmXsibWzM1k4OJdT6gkjMbC_1jljCbEhXg-rRTYogl-mXKtBxvK6lX8-0rNQlWFJz0hhuf1NJsM-LxqbBmyk4V09MtrS2KRrMkNVTSXNsFldEZfULsx-wmRQ8S5IBXjr1XYeLATktaRlDMrS8RjuU0WQe64fdOBg9ZdTi1eycploJevAEUE4ThHFRuQXQxw39BNITXdZlqLuW_odlcXBql9z7czRKUoLyFiYZKANxtPU_ZmsADPNh0SPIy6_gSuSGPMy9pclWkrQBbDLMW_Xy9jiqRnT-8u4HWSt3pxIEFKi2I62mmIuPTPC0O_nmPBaEM9LRuZoU9GiBxM_xENNs7mdTbko557K-gurXLQBLnkCnHvXbJeKzSDknSVV9Ew990ghkfbcPbr245gmbyKgZK9THiUvr31YSglXsvp70LRSgsIPHu-KqBHkHxGvrdVS7oO_3Ubd7EziICuyi5_SD5FLbx9ST8jiCjUyh3QtObQjTEreccsZ9O44mtpLe88V5Xt1nvZ76OFAsTZ799Adptow5GwnVYpXwznWRVnYHl99Ec48gv8-h9IbFwxwkhRYsKnk81zTHy4BaEITDGNCB9kcHu5pkpWazUkUYU2iG44xH0yW6bVZTgA7TqRjFbBDUTr7IXLGQYGnY2b1Qu4wG1eDxfAHZ_1aPrursBoq86HxWzp0L0sgpjKN8WatwelP5xGiDlOXdDm1mhlLB6r35z5qHCYC5mTCJVm_iFXbIflR40GJrizyao8C5eGM3-OjWUz7OJ85LlaBShS-xkcL9DkSGFzsjdAw8L7c7QuIV7LvpbRnEQj8hsvIu8MNZA6DutmveFlBJI1Gij6SNVudlVrMgwKkZNk0Eu31XvZsbFR4u8U9giI5JWVlajWsKPbh2M_fg8eatU7g03VroXhmh4il7zdBRhZzXUDaUH9SNCEerxZAcyMwHBMTvIqZJdUbF5mkQ_9fRXoJxYtCl1VF7ZL7LoskWpS7Xu2nRyih5bT5A9RwZIoiGEUPtgVOP6YVz502ilKPNtnoNmnVqDGXlnRJVEsyFO-zVM2JCFGWtjPkPDkHKM0q7xupfO6gX-KKFtE48xdUIlbXhQZ2Bl7qmHbi7je3nK46l2qQc8Wz_hQDSP578aX-7NPxrjfe2sdmPHRxo_6Zd_9maWba9GF0u2LgJ6TskRtYuJtqTw3zaP_970qF-vpAkU4L9nnV6ipT8VrjgqMAa-V3R_y-JEko3YC6tYhQ96r_WfIOY5cZNby0k7H9J3m2aThdBSkuLGtZ2FysfpTQJlidQykCKo-ksAx9sslkN1bWy1o0moI9ydKO6KEtey6dyHptynXlram-mQsCRqU4fYx73OS7mzPJELSXc9vQ3Y_yjY0Y9XttUrjI21wAqd70jZbmCPBEq1l9ygYA54u92iNxmrvdAx-psazXV84DYaq4fATnHDh1JQSSaNH2Pce11hg4Oz8xPlFDLctGQIV4y2MqlKqLrzapZJN9DlKF43bPcyTdU3VGhWRgQ-ZHy52tmvtethspvoMOfXEHp2lVmv5FZ0wWCPaZhS--PnwpytWQl13t4lkDBAkzFTkRlrYXOXCSuQJjtJEpF2EYhGzyhecCTvVENZB0KvUELx1YhY61bh7HMXMbfs2zu6bZIBiIJzsYs_CzdM-VMeP4nO25tpH7AYWoU7I_y9jM3GNSMzyEjeTuBlZKSVI6ua-4VWWOPkbBDVmCxsiW0rbEfyHC4hievnwsNtBSgjc4Tj0640cr1HCuO3HI0DNWC3uzPz9kSitBF1Qd-xsunEqoSGHwIREzwvF4_csxYuVF3cnpFz8fip6AaxW8Jw-0jq-DaFUX7H8ARny-DBqrSzIuSBUHZUfnDxtxOWoHuZ7yjhLAiA4C7Wt062dpvvDziEBKzZx3sEogAbgMdM0pR6322xmjo7PV5ku0XpGDzRbYpS_l-XLCrtk79HpID-oXkx5AWUW15NOyHGMmdwEMpxGtzHmd3fGIVleP-Ivdin7Q9fLouitckcD2xZAjv8qhmdlt0LUfDxSDsT4KnxKdlT3x8k_eVOn_Cne9OfDAtKK0lgiVQPvrCkK7Kt2lecH7EfySsMOWxUk5dYxhb9XI1drPADpjAMpId96YFjRddZCC26bDDwZDQBm1SKw2mCddmzxGtHtcJbmI9WJ9TwH2DpbqQf2OAbLvFXpq6aHYqWbUNR0r3B8hcc0m4y73hMsP-BT0jvEiq58v71LPmtve9mRizK6OQyzoT3ieOng1VSDPlkWbqaKhdBCJcXOQiJnZ8IB9AVv9g2dUXtdw50PISMR5_2JAhFIbzk4xOCoqxbqeNrH999lEJ3463a8rpW2yZSCwiz9IH9FjWq9GMnNlx9jjPQpJnN0ZfcqAAsW3CnzOz8Y_-YznE5-Rg1e_IhhbuaL947iJWgqul4cbsDuXgURJdXdKdRYvGLQI3w14FqfGP_gHmkBuu7CLskM-r_ghFyLb8QJ5L3_pH8q_BC2hBTcRszseyqSr-9YFGes1wkl9kJP7SqK58SM02XCNzz8fh8G6TCyVo4CNCfihG2xZf6582pTeIhnUTyH_CR43EpBt2H3mdMSd4diFzqGeWJ8RRQqejXa7t3libok6cyH-o2ExzK8TFUtawiaQRj09V9WZRLcgwjPxkfJ98YNzBzpwEa6g5gw22uA4Byl95ooqTrRf_WD-XNpqF5eCrE3r4wPkk_3Y6tW5WZpgO-c9bBwRlQM9AJgcWPuecAdvLkRgNaq_ALtNQQoCnOA40PTcV4I_8MBgacebQWIVoX6K49cl169k6NHPg42pYtspp_mOnqvVVN56pQI8eSPRaDhTybF2ChuJPNnCpYm9W1xaLR_q4IzHLefqi7BfI4_kM9wG2W98QedHGhbOw1mdnrbzFsErRw7u_y122dhUUPsCBWkYc0ktdO5koI8QthWsOIwW6lfq7lBmU-8vf92LZCn-_XKRGZLPB-Tzdi3vxN2agcKCk6RaTIB91nyDDEvfvCQpQik1yRpUrPaj0vYECzMJsYsGNgiNxccM7pZIY0ASK0tM2xAEeN3DYbUEK5GrZ0HlmELwsI4l-5Tik4MfBikrH2-VIOwylPpi5N9Vz4CKlvwtFCEkCgGOmYOA5udrhVVssR5eNAZVA2QtTQ6FF1x-VdnviOOIhWZsefbSAL0yfc6PA9U-6jLTCSDOoWl3xFq4wLuEZ5h-ZEHURyY78L0MTf0fEoPUlcDCVEohsh9IU0jtaHZh3fW1fKs46iTafI0Q78zu6LlX_ATgPYsKDnZaGvpTyLrPttDm2DI8bG4rNyhe_AG5T39Yp5Cfzkf3H3pqm_ii4Tfy9m6LLPAGQGiiEp7Z75fty_zu1ARU6kag-ghXVw9t6sPwh1t4oo7BkZVCs4sHoes5X-Ox6KgjWWcVNGc5Mby34WCzR7xu43qdjG9wQfm9WIklEOjRu3ag4FteyaLsAY7r2KC5vKq8Gkh5CEz-1ouybLlvBjNXCGv_661z6uqMd3b2HRWSiPkIrQShShwZddikCm24ICIPOFbSW58Del3r-NViEzTACufNg7l8ZbyUJld28d9SFRwB4fCwr7oKZwBoIWJ0ojrdLwM1ZoOSTRw7nh3CioS5VaMBrjWwiBK1IQj9rgEfwp3S0xhAk0CCgy3lAiKUl934WeCJd2_LV3UdoMV8GFUFye1qRm0mpZIwP-0bgkMbjrbDgYK0rvHoVLJraLg4sTJFnNrZFhCW7lYTRKtiTMZlEaOGXv4jJ7uC7BcJOVZMRK4zGdqil0YUbl2BR7xbmIAChpELNb8ZAHw7EvyG51MuZT3043baEgTta0RgYr8mFUpbvJC4W0HkEHx-J1_ss6SPZA_71jJhCFdspB3M0TmPlFgXC5SNnIkrkU5LziOdssypWNPfCjve4XWQR8onSwfjvujsMV5aeMg6aaERdtp_HA10XfXtMnnC5yWuAsnOMcxtBK9l4bajSIsyQgF5v7f6Uq_rlKA6FOTbWBqAzs-bhC0gkJlip0twpzm8Vakfh9B7PlrFwbY4piKVEOZiIKukUGjNBlcExhIt9t-v8h2Xj3xFBV55zxT8z74L_1Z-FVuswejDyBiB2xbv7cmdWnWiuB_yT3q1MC3tSHvaU4PS6TstJQlV-dSy8FqHRyKj4B8QFjS-pE1kcJFIL_l_5tD2YLTKfAreQ8dlhORBLATlhnRHqJyGXo5rLqPAgPOzBf3v0DxhGCHg--3O5l7-ZC727pHMz5aOSu8Ot2Bt9Q5P5aVqsi1JGfcWqRuRsZZUX7lhELesTA4HxX3PnCvVQRkV29gNNpUZOngAJpGnyBxQRJjWVo8005EKp2do4MhIftSdFSfmATenE6LrIuaqc4FUhbxghxJ1ktXIoUujJ-6ewXR3dJyH3uyFyTIOj1Qy0VFZpi775PvuH8LwHCBwsrvlmac7Ei-WRQiyzG3LOpydAFSCanTQP0bhF8K1Jfm9OpCaYc-W9nE9RfH4DcghYKCeSKb15BDmP1wPDHMXrqlHMRpzj2tl50nuCDAG-2N_CF-UyZObUFPOOZFmZsjWMz4UJZmILrDxTqY5FPL0RpbAhebkRvK-iSkfr3TW-wKnW8savr8rToIKbeA2nh6Ewdi6mKi1ofOUetpawq-RTHTMezYZnx5oPVK5gS8bRWPo2SO_ShpZgW4TeenWCoHz-9wHku9EXf8osq-ubWJP8573ue1xXxsAwjDJZDDxBNuhkDR6tbUW0LXr-9A3OWbzmapAY2_YBPt-IUVT2CB1jqirntHXHV3DfO11cH2Hw8vwLiQ3RVSmcIgZ-SWmT1B2u19DMLDF1N4jB5-OB06K0gYzQZNMAfyytxbux7Em6URZqCtI6WNl4-eW0STGIONBNOa4tnIYChSeYZiRvrV8pHC3kmokk8PcrMVGJuCbt-jMj1TIz-d7NjCMmiHxHHnCt7JMVpkZ3kehHyh91i5pQqNEuoN84mQlTdt9bEuRCDJLO8DL1F8eXsaETR0A8pCWBZh6QAKHVOddN-kkiiSYgnVb3Qr4BW_2TqgvfFU2_7xU7N4AV_RdV1HgXrvMaCYXMZ90Z7tnCqpysGrwM8HdEdKsAhxUG2eI2ElBcu_Mk4rrwhCmpv82vkg5-xohiqCqRz2G17ABb9XAS-mEmQtuQZkmY_Vpfgbg4RnBrYATFU_9IArASAfPAOJyA2bfPh2GtkwFsfC8ltj8iyq0WQU5RYjXVCkQQLxD05NrTombzoavd6vdc3w-DFFAzUdqriwBqAF5Ff2oAKrRTDcC723gzvN4UfLiVd0ibn9EhnHFSFqC0SzF6nJDruhoujTpjUnAA8DoQrYks_gaNsFedISMGvZwayS08i5bZ9kEQTDjs0j37PgfMWHELdo0eXv8PI9OebptiAMlgUuQVTr39Xhb447WEk-8IhgPXU3-5v8B_xkPr_v_kkLdzG45o1aQTY3kmmIzMWWNv4aolIq7tSzXXh8zwcxImuz9BLM5z3Z2WYqo5j7M90URjWVO5qOb8oj1dvZBc2BXCFVK-87O_MbwVPU_WLnbB9oxuJJAVA8bYPgba0C74ISikeb7fsIdWNj_MTYDAlcUdocaUnv_3MXidFUhs_8DT5mH5B3Zro3EsTqzGmIOH88xRsZxN1vilVasLo5L2DuRJJFGwqnpEZjMQDcxoZvsifoX0ajbp9Szq5GKXBLCjcWcHlO3S9VWvV_LUitxEKpZAYsm2VR6lYrYI03xoi4wRxO-T6G0X8sLdEPzk1vsXBlde__GnEuQ1I1EtLCHVzXVJ2i4KGUS5faNX6qSeoMWtRLpILW39gXMxdJcdWjrHT-YD_oCJ37FRwfbXNhD7sU61cDmqRg36Sa7ZHWrr9cSEs9OnAQVy5CbP1lTtWEK0dJ3vIN9SQXyF1ZDkV1c-0cT4nnz0LrFuvZBQUf-Z9fz_EccEwrHlkc1kvxuXtIHKzGRY-ya2siBbL8y5x3BwyWMl8qkoos1YcXXlTlAp7cV7VVksyHpdAu26SNGaD68zR_eSW8UtaLX0sAK28w7Pu0__YxMXp). (select latest -dev version to verify)"
microsoft/vscode,2023-08-02 16:32:18,feature,Support actions in the accessible view,"For features like notifications, a user might want to tab to actions directly instead of having to go back to the item to take action.
_Originally posted by @meganrogge in https://github.com/microsoft/vscode/issues/188325#issuecomment-1656364314_
"
microsoft/vscode,2023-08-01 15:14:41,feature,Adopt new DOM renderer performance changes,Upstream: https://github.com/xtermjs/xterm.js/pull/4605
microsoft/vscode,2023-08-01 10:26:23,feature,Less aggressive `comments.openView` setting,"
With #147365 we got the `firstFile` setting to open the comments panel when opening the first file with comments. This is a nice improvement over the more aggressive `file` as setting. However users will still have the comments panel focused when there are only resolved comments and therefore no action needs to be taken. This isn't optimal, especially in setups where the comments panel hides panels like source control, which might be more relevant in these situations.
That's why we propose to add an additional option to the `comments.openView` setting which behaves like `firstFile` but will only consider unresolved comments. Naming wise this might be tricky but something among the lines of `firstFileUnresolved` might work.
Cc @alexr00 @laurentlb @hermannloose"
microsoft/vscode,2023-07-31 21:49:47,feature,[Accessibility] Change command history default keybindings in terminal buffer on Windows to align with Mac,"Type: Feature Request
I am educating blind folks how to use VSCode with screen readers. It is confusing to have two different keys. On Mac, Alt+Up/DownArrow are used in terminal buffer to navigate executed commands whereas Windows uses Ctrl+Up/DownArrow.
I suggest using Alt+Up/DownArrow on Windows by default to align with Mac keybindings. There will be another benefit of saving Ctrl+Up/DownArrow for Windows that will be described in a separate feature request.
VS Code version: Code - Insiders 1.81.0-insider (9800cf6dd6bf4634889d60720ef46a400f3a7298, 2023-07-28T12:08:04.472Z)
OS version: Windows_NT x64 10.0.22621
Modes:
"
microsoft/vscode,2023-07-30 06:18:13,feature,Always on top window,"
Requst to have an always on top feature for window especially within Mac and Windows, as of now Linux provides a OS level always on top window, which has it's own use cases especially while multitasking and not wanting to loose the focus upon the coding window.
The implementation details which might help could be found over [here](https://stackoverflow.com/a/39844471/9928212)."
microsoft/vscode,2023-07-27 04:58:33,feature,Tunnel factory to provide error message for notification toast,"
We currently can contribute a tunnel factory through the embedder so that VS Code can open tunnel connections.
The problem is that if something goes wrong within that factory we have no control on what we show to the user because every error falls back to [this error handler](https://github.com/microsoft/vscode/blob/28849849a9e28f2abcecb5fad62564942821a333/src/vs/workbench/contrib/remote/browser/tunnelView.ts#L1152).
It would be nice if the tunnel factory could provide a custom error to show as part of the notification to the user and fallback to the default in case an uncaught error was thrown.
cc @alexr00 "
microsoft/vscode,2023-07-26 11:33:05,feature,"When no text is selected, CTRL-C should not overwrite the current buffer with nothing","
Sometimes, when it's my intention to paste a piece of text into VSCode, I accidentally hit CTRL-C instead of CTRL-V.
This overwrites the cut/paste buffer with whatever is currently selected in VSCode.
However, if nothing is selected then it effectively erases the current contents of the cut/paste buffer.
I cannot think of a scenario where this is desired functionality.
So: if no text is selected in VSCode, entering CTRL-C should have no effect at all (thus leaving the cut/paste buffer intact)."
microsoft/vscode,2023-07-25 16:49:43,feature,Should it be possible to use ctrl+up/ctrl+down to navigate within a comment thread,"Testing #188536
1. Open a file in a PR that has comments
2. Set focus to the comment widget input
3. :bug: can't ctrl+up/ctrl+down to move focus to the comments within that thread
"
microsoft/vscode,2023-07-25 11:10:22,feature,Navigating notification a11y view with alt+]/[ should announce where you are in the list,"Testing #188528
I'm expecting something like ""notification x of y"", instead it just announces the next focused notification, even if it's the same one:
![image](https://github.com/microsoft/vscode/assets/2193314/929ae59f-a833-44c8-952e-7b69dc528c21)
"
microsoft/vscode,2023-07-25 03:37:33,feature,[Feature] Support Sticky display the code stack of the cursor line,"
Sticky currently only supports displaying the stack structure of the first line of code in the editor.
Can it add feature to let sticky shows the code stack of the cursor line?"
microsoft/vscode,2023-07-24 18:14:01,feature,Support go to symbol in the accessible view,"ATM, this is opening the symbols for the focused editor pane. We will also want to make sure that the accessible view remains open despite the `blur` event that will occur on quick pick open. Also check the `zIndex` as I think it's currently set to = that of the quickpick, but will need to be < than it"
microsoft/vscode,2023-07-24 12:19:33,feature,.ipynb wrap cell output at fixed character limit,"
Type: Bug
```python
'1' * 1000
```
The output is a single line that goes off my screen to the right.
I would prefer to have options:
- single line, no wrap
- fixed margin (e.g. 80)
- wrap to visible width (so if I resize the window it adjusts)
VS Code version: Code 1.80.1 (74f6148eb9ea00507ec113ec51c489d6ffb4b771, 2023-07-12T17:20:23.298Z)
OS version: Darwin x64 22.5.0
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-8559U CPU @ 2.70GHz (8 x 2700)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled metal: disabled_off multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|2, 2, 2|
|Memory (System)|16.00GB (0.02GB free)|
|Process Argv|--crash-reporter-id ffe7017d-9a68-4026-a96e-3eb92191e23c|
|Screen Reader|no|
|VM|0%|
Extensions (25)
Extension|Author (truncated)|Version
---|---|---
project-manager|ale|12.7.0
gitignore|cod|0.9.0
vscode-office|cwe|3.1.6
git-extension-pack|don|0.1.3
githistory|don|0.6.20
gitlens|eam|14.1.1
copilot|Git|1.98.275
vscode-pull-request-github|Git|0.68.1
git-graph|mhu|1.30.0
vscode-docker|ms-|1.26.0
isort|ms-|2023.10.1
python|ms-|2023.12.0
vscode-pylance|ms-|2023.7.30
jupyter|ms-|2023.6.1101941928
jupyter-keymap|ms-|1.1.2
jupyter-renderers|ms-|1.0.17
vscode-jupyter-cell-tags|ms-|0.1.8
vscode-jupyter-slideshow|ms-|0.1.5
remote-containers|ms-|0.299.0
cmake-tools|ms-|1.14.34
cpptools|ms-|1.16.3
cpptools-extension-pack|ms-|1.3.0
makefile-tools|ms-|0.7.0
cmake|twx|0.0.17
vscode-open-in-github|ziy|1.3.6
(1 theme extensions excluded)
"
microsoft/vscode,2023-07-21 21:22:34,feature,Improve Quick Question experience for conversational chat,"Right now, the Quick Question experience only allows for one question and one answer.
![image](https://github.com/microsoft/vscode/assets/2644648/e3726d4d-3492-4dbb-ac21-6c1e64c8cfb0)
We should explore alternatives to this UX that allows for full conversations."
microsoft/vscode,2023-07-20 23:28:37,feature,Support a `--password-store=inmemory` or similar,"
The main purpose of this would be for running in CI when you need _some_ sort of secret storage store, but it doesn't have to live on.
Right now `--password-store=basic` allows the SecretStorage API to work, but it still stores things on disk which isn't needed in CI scenarios... and it's probably better to store it in memory than weakly on disk in CI anyway."
microsoft/vscode,2023-07-20 19:15:53,feature,Behaviour of the Search-Box,"Type: Feature Request
Dear Sirs
As an older Developer using a lot of Editors during Time i find VS really, really good - so thanks a lot.
There is one Issue with the Search-Box, i often found crazy while in 'my Workflow':
Simply searching while working with 'Ctrl-F', so focusing the Search-Box, all Functions for moving in the Code are disrupted.
So, no Page-Up, no Page-Down, no 'Home'/'End' and, and , and are'nt working as long as the Search-Box is focused - after years i cant get used to - perhaps too old and to much experience while decades with other Editors...
Ok, sorry for my poor English, perhaps somebody will think about changing...
Eckart Bechler, Dortmund, Germany
VS Code version: Code 1.80.1 (74f6148eb9ea00507ec113ec51c489d6ffb4b771, 2023-07-12T17:22:07.651Z)
OS version: Windows_NT x64 10.0.19045
Modes:
"
microsoft/vscode,2023-07-20 02:30:58,feature,[Accessibility] Cannot use Shift+Tab as an input key in terminal,"Type: Bug
## Repro
1. Configure the settings like below:
`settings.json`:
``` json
{
""terminal.integrated.tabFocusMode"": false
}
```
1. Create new terminal via ctrl+`
1. Press `Shift+Tab` from terminal input.
## Current Behavior
Pressing Shift+Tab moves to the terminal buffer.
## Expected Behavior
Shift+Tab should be passed to terminal as an input key. The focus needs to remain in the terminal input field.
We need `Shift+Tab` when cycling back through shell auto-suggestion or ipython completion.
VS Code version: Code - Insiders 1.81.0-insider (c85bf61a82b0c39886b032d2634108782a55c637, 2023-07-19T05:34:51.441Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1145G7 @ 2.60GHz (8 x 2611)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.71GB (6.44GB free)|
|Process Argv|--crash-reporter-id b05b88e5-8894-4031-ae34-fa034ebddea9|
|Screen Reader|yes|
|VM|0%|
Extensions (90)
Extension|Author (truncated)|Version
---|---|---
android-dev-ext|ade|1.3.2
Bookmarks|ale|13.4.1
openscad|Ant|1.1.1
spellright|ban|3.0.116
zoterolatex|bna|0.4.1
mermaid-markdown-syntax-highlighting|bpr|1.5.2
doxdocgen|csc|1.4.0
dscodegpt|Dan|2.1.14
vscode-markdownlint|Dav|0.51.0
vscode-eslint|dba|2.4.2
vscode-quick-select|dba|0.2.9
vscode-deno|den|3.19.1
gitlens|eam|14.1.1
EditorConfig|Edi|0.16.4
prettier-vscode|esb|9.19.0
vscode-google-translate|fun|1.4.13
codespaces|Git|1.14.12
copilot|Git|1.97.271
copilot-chat|Git|0.5.2023071901
remotehub|Git|0.60.0
vscode-github-actions|git|0.25.8
vscode-pull-request-github|Git|0.68.1
easy-snippet|inu|0.6.3
path-autocomplete|ion|1.24.1
latex-workshop|Jam|9.13.1
lilypond-syntax|jea|0.1.1
scheme|jea|0.2.0
better-cpp-syntax|jef|1.17.2
google-search|kam|0.0.1
vscode-lua-format|Koi|1.3.8
lilypond-formatter|lhl|0.2.3
lilypond-pdf-preview|lhl|0.2.8
lilypond-snippets|lhl|0.1.1
vslilypond|lhl|1.7.3
zotero|mbl|0.1.10
git-graph|mhu|1.30.0
vscode-docker|ms-|1.26.0
black-formatter|ms-|2023.4.1
flake8|ms-|2023.6.0
isort|ms-|2023.11.11921012
python|ms-|2023.12.0
vscode-pylance|ms-|2023.7.30
jupyter|ms-|2023.6.1101941928
jupyter-keymap|ms-|1.1.2
jupyter-renderers|ms-|1.0.17
vscode-jupyter-cell-tags|ms-|0.1.8
vscode-jupyter-slideshow|ms-|0.1.5
remote-containers|ms-|0.301.0
remote-ssh|ms-|0.102.0
remote-ssh-edit|ms-|0.86.0
remote-wsl|ms-|0.80.2
vscode-remote-extensionpack|ms-|0.24.0
azure-repos|ms-|0.36.0
cmake-tools|ms-|1.14.34
cpptools|ms-|1.16.3
cpptools-extension-pack|ms-|1.3.0
js-debug-nightly|ms-|2023.7.1717
remote-repositories|ms-|0.38.1
vscode-github-issue-notebooks|ms-|0.0.129
vscode-selfhost-test-provider|ms-|0.3.15
vscode-serial-monitor|ms-|0.10.0
vsliveshare|ms-|1.0.5873
resourcemonitor|mut|1.0.7
autodocstring|njp|0.6.1
pandocciter|not|0.10.2
shiny-python|Pos|0.1.2
shinyuieditor|pos|0.4.3
quarto|qua|1.90.0
r-debugger|RDe|0.5.4
java|red|1.20.0
vscode-xml|red|0.26.1
r|REd|2.8.1
multi-command|ryu|1.6.0
vscode-deepl|soe|1.0.6
abc-music|sof|0.4.0
lua|sum|3.6.23
latex-utilities|tec|0.4.10
chatgpt|tim|1.1.2
cmake|twx|0.0.17
errorlens|use|3.12.0
intellicode-api-usage-examples|Vis|0.2.7
vscodeintellicode|Vis|1.2.30
vscode-arduino|vsc|0.6.0
vscode-java-debug|vsc|0.52.0
vscode-java-dependency|vsc|0.23.0
vscode-java-pack|vsc|0.25.12
vscode-java-test|vsc|0.39.0
vscode-maven|vsc|0.41.0
markdown-all-in-one|yzh|3.5.1
grammarly|znc|0.22.1
(1 theme extensions excluded)
A/B Experiments
```
vsliv695:30137379
vsins829:30139715
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627:30244334
vslsvsres303:30308271
pythontb:30258533
pythonptprofiler:30281269
vshan820:30294714
vscod805cf:30301675
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30404738
py29gd2263:30784851
vsclangdf:30492506
c4g48928:30535728
dsvsc012:30540252
pynewext54:30618038
pylantcb52:30590116
a9j8j154:30646983
showlangstatbar:30737417
ecj1e332:30687743
pythonfmttext:30716741
pythoncmvfstr:30726892
fixshowwlkth:30771523
hideindicator:30766887
pythongtdpath:30726887
i26e3531:30792625
gsofa:30778558
pythonnosmt12:30779711
pythonidxpt:30768918
pythondjangots:30768917
pythonnoceb:30776497
copilotsettingt:30767686
e537b577:30772214
h0f32768:30792099
synctok:30783813
dsvsc013:30777762
dsvsc014:30777825
diffeditorv2:30786206
```
"
microsoft/vscode,2023-07-19 14:50:19,feature,there should be a shortcut for jumping between new and old files,"Use ""Diff Editor: Switch Side"" command to jump from original to modified and vice versa. Notice how selections are mapped.
There is no keybinding for it, but users can configure their own.
"
microsoft/vscode,2023-07-19 04:29:33,feature,Trigger IntelliSense(code completions) after paste or delete?,"
"
microsoft/vscode,2023-07-18 21:48:38,feature,Only show top-level variables in `Outline: Show Variables`,"* Related to https://github.com/microsoft/vscode/issues/146937
For our JS/TS projects, we are declaring functions and components as variables (eg. `const foo = () => {...}`). For the Outline view to be useful, we have `Outline: Show Variables` enabled.
However, this means a file like:
```typescript
const componentA = () => {
const state = null;
const localVar = 1;
};
const componentB = () => {
return;
};
const funcAsVar = () => {
const localVar2 = 1;
};
```
outlines like:
It would be useful if there was a way to only show top-level variables
```
componentA
componentB
funcAsVar
```
May also be useful for other languages that allow similar function-as-variable declaration
"
microsoft/vscode,2023-07-18 19:13:44,feature,add info about go to next / previous accessible view to help menu,This is currently supported in notifications and in the chat responses
microsoft/vscode,2023-07-17 19:28:57,feature,Show more terminal links by default,"Context: https://github.com/microsoft/vscode/issues/188101#issuecomment-1638740467
Let's explore pulling all the links when you open it, with some reasonable timeout. We could also do this lazily by showing just the buffer initially but when you filter we backfill the results."
microsoft/vscode,2023-07-17 12:36:20,feature,Use Find (search text in files) WITHOUT expanding already-collapsed sections,"
Type: Feature Request
When I'm using Find/Replace, the Find is way too aggressive at expanding all my collapsed areas. It seems to do so on a per-typed-letter basis so depending what the first few letters are, the experience is way more or less annoying.
VS Code version: Code 1.80.1 (74f6148eb9ea00507ec113ec51c489d6ffb4b771, 2023-07-12T17:22:07.651Z)
OS version: Windows_NT x64 10.0.19044
Modes:
"
microsoft/vscode,2023-07-17 10:24:52,feature,Support link detection in test results terminal,"
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version: VSCode 1.80.1, 1.81.0-insider; plugin playwright test for VSCode 1.0.14
- OS Version: Windows 10
Steps to Reproduce:
- Run playwright TS test throwing an error.
- Open test results viewer in vs code (see picture)
- Try Ctrl+Click on call stack item for the error
**Expected**
We jump to source code line
**Actual**
Nothing happens.
It stopped to work ~ a week ago, I guess. But used to work in vs code insider build. But today is stopped to work in the insider build too. (monday, 17.07.2023)
Doesn't reproduce in VSCode 1.79.2.
![Скриншот 17-07-2023 105240](https://github.com/microsoft/vscode/assets/11839300/b4debe61-81f9-45e0-892b-91bb1a211fcc)
"
microsoft/vscode,2023-07-15 21:01:11,feature,Feature Request: Introduce $TM_SUGGESTED_TEXT Variable for Enhanced Snippet Creation,"
I'm a devoted user of VSCode and appreciate the powerful snippets feature it offers. I would like to propose adding a new variable, $TM_SUGGESTED_TEXT, to enhance snippet creation.
The $TM_SUGGESTED_TEXT variable would capture the prefix or suggested text provided by VSCode's IntelliSense. This would simplify creating dynamic snippets that adapt to user input.
For instance, using the TODO Tree extension, accessing the suggested text within snippets would greatly improve workflow, enabling more contextual and efficient code templates.
I believe this feature would benefit the VSCode community, empowering users to create more sophisticated and personalized snippets.
Thank you for considering my suggestion to enhance the already remarkable snippets functionality in VSCode!"
microsoft/vscode,2023-07-14 07:13:00,feature,In debug session: Add items to WATCH section by drag-and-drop them from VARIABLES section,"
Add a feature which will allow to drag-and-drop items from VARIABLES debug section to WATCH section.
"
microsoft/vscode,2023-07-13 16:25:58,feature,Add info about sticky scroll to editor accessibility help menu,related to #186659
microsoft/vscode,2023-07-13 00:22:55,feature,Debugger: copy value from hover,"
As far as I understand to copy variables I have to either:
- Find them in variables view
- Type them in debugger -> right click copy
Ultimatively for greater user experience it would be better to add copy button to the hover (I see there is a space on the right near `Hold Alt key to switch to editor language hover`)
And for advanced users add command to copy value, so they can assign it to keybinding and click mouse less, they just need to hover over variable and thats it! (really want this so bad)
![image](https://github.com/microsoft/vscode/assets/46503702/c954c3ec-f50d-4474-8406-e7641ae4a249)
"
microsoft/vscode,2023-07-12 11:14:29,feature,[Feature] Extension hover labelling,"
Some extensions provide content on hover in the editor window. Sometimes, some extensions provide similar content on hover - leading to duplicated content.
A (debug?) feature that can be enabled in settings that prints name of extension contributing a hover content. This name can be printed just below the hover content provided by said extension.
"
microsoft/vscode,2023-07-11 23:10:58,feature,Speed up creating troubleshooting profile,Speed up creating troubleshooting profile by copying extensions instead of installing them
microsoft/vscode,2023-07-11 15:49:22,feature,"When restoring a file editor on a UNC path, the security error message should include an option to allow the host.","
Does this issue occur when all extensions are disabled?: Yes
- VS Code Version: 1.80.0
- OS Version: Windows 10 22H2
Steps to Reproduce:
1. Have a VSCode session or workspace with open editors hosted on UNC paths (pre 1.78)
2. Upgrade VSCode to a version after the [GHSA-mmfh-4pv3-39hr](https://github.com/microsoft/vscode/security/advisories/GHSA-mmfh-4pv3-39hr) fix
3. Reopen the workspace and hit this error message:
![error](https://github.com/microsoft/vscode/assets/7613032/aa85a237-bccc-42cc-a79b-bbc8132796d9)
What should happen:
3. The error message should provide an option to add the server to the `security.allowedUNCHosts` list, like you get when reopening the file manually:
![dialog](https://github.com/microsoft/vscode/assets/7613032/498d57ef-54dc-447c-94e5-d4b577eb371e)
I have dozens of files on several servers open, so reopening them manually to get this fixed is rather annoying. Integrating that allow dialog into the error message would make for a smoother transition experience for users getting blindsided by this upgrade."
microsoft/vscode,2023-07-10 18:17:58,feature,Pick up TS 5.1.6,Track picking up https://github.com/microsoft/typescript/issues?q=is%3Aissue+milestone%3A%22TypeScript+5.1.6%22+is%3Aclosed
microsoft/vscode,2023-07-10 15:44:01,feature,[Feature Request] Option to enable `remote.SSH.defaultExtensions` for all currently installed extensions.,"
To enable a more seamless transition to remote development we'd like to have users extensions transparently enabled on the remote.
I have a script that uses `code --list-extensions` to grab the current set of extensions and add it to the user's local `remote.SSH.defaultExtensions`, but this is cumbersome and error prone. Ideally we'd have an option to install all existing extensions on the remote by default instead of having to sync them periodically."
microsoft/vscode,2023-07-10 15:03:10,feature,Add accessible view hint,"cc @isidorn
when a user focuses an item with an accessible view, we should tell them how to access it in the aria label and have a setting to disable that"
microsoft/vscode,2023-07-10 12:59:38,feature,"Add option to Allow configuration / customization of ""Open with VSCode"" in Windows Explorer context menu","I encountered an issue during the installation of Visual Studio Code (VSCode) on Windows where there is an option to add an ""Open with VSCode"" entry to the context menu in Windows Explorer for folders and files. Unfortunately, once this option is selected during installation, there is no built-in way to undo or remove it later within VSCode. Additionally, there is no option to add or enable this feature after the installation.
# Workarounds
Workarounds found online suggest modifying the Windows Registry to remove or add the ""Open with VSCode"" entry. While this solution may work, it involves manual registry editing. Since these workarounds can be found online, this indicates that there is a solution needed for a problem, and the correct solution should be a setting within VSCode.
It would be beneficial to have an option within the VSCode settings or installer to easily enable or disable the ""Open with VSCode"" entry in the Windows Explorer context menu, without the need for manual registry modifications.
# Steps to reproduce:
Install VSCode on Windows.
During the installation process, select the option to add ""Open with VSCode"" to the context menu in Windows Explorer for folders and files.
After installation, observe that there is no built-in option within VSCode to remove or disable this feature.
# Expected behavior:
There should be an option within the VSCode settings or installer to enable or disable the ""Open with VSCode"" entry in the Windows Explorer context menu, allowing users to control this feature without the need for manual registry modifications.
# Environment:
Operating System: Windows"
microsoft/vscode,2023-07-10 09:00:15,feature,An small feature to improve the program efficiently,"Hi
I have an small improvement for vs code.
It will be really efficient in manage files and projects.
Today I wanted to write a new plugin using other plugins. Imagine I have this structure:
plugins folder
>plugin one
>plugin two
>plugin three
In each plugin I have to open some files and all files should be seperated.
What if I can group each plugin opened files? there is a way? yes
I want to group opened files by 3 tabs in the top-top-top of vs code window. If I click on each of those items (tabs), in vs code I can see just opened files for that plugin(folder), and I simply can go to other plugin opened files by choosing right plugin from initial tabs section ( in the top-top-top of vs code window)
Best
Iman Ghorbani
UI/UX Designer and Developer"
microsoft/vscode,2023-07-10 07:39:46,feature,Use other extensions to enhance the emmet extension,"
I want to add emmet functionality to my extension, but I don't want to add duplicate code😄.
Is it possible to define a configuration so that emmet gets the classnames and idnames from other extensions to enhance autocomplete?"
microsoft/vscode,2023-07-07 05:02:45,feature,Rounded corner interface request,"
I'm using Windows 10, but the pages of VS Code are more suitable for Windows 11, is it possible to add a theme suitable for Windows 10 at the theme color?
Thanks!"
microsoft/vscode,2023-07-06 19:39:33,feature,Independent options to enable/disable preview for the editor and source control management,"
My personal preference is to disable the default option of opening files in ""preview"" mode, requiring the user to double-click the file in the explorer to ensure that opening another file does not do so in the same tab. By disabling this option, files open in new tabs without the need to double-click them in the explorer.
The default behaviour of opening files in preview mode is great when looking at diffs in the SCM view. However, currently, it is only possible to globally enable and disable opening files in preview mode.
I would like to request adding a separate option to enable/disable opening files in preview mode for SCM.
Adding a reference to a previous request here: #149891"
microsoft/vscode,2023-07-06 15:22:18,feature,copy notebook output command for built in renderers,"The Jupyter renderer extension provides `copy image` through an icon for images, but the builtin renderers do not support any copying.
We can provide a context menu and toolbar locations for this command"
microsoft/vscode,2023-07-06 12:57:50,feature,allow extensions to update configuration enums ,"
Extensions can contribute configuration items with enum values. These enum values currently have to be hard-coded in `package.json`. Extensions do not have a way to set these enum values dynamically.
Use case: an extension might want to offer the user a configuration to choose a certain version of an installed software. The list of installed software versions is determined on extension start and cannot be hard-coded.
https://github.com/microsoft/vscode/issues/120940 added something similar, but not exposed to extensions."
microsoft/vscode,2023-07-06 04:09:04,feature,[Accessibility] Make Alt+F2 work in notification area,"
Type: Feature Request
When focused in the notification area and moving up and down each notification item in the list view, it would be so instrumental if Alt+F2 can augment the content in the accessible Monaco view.
VS Code version: Code - Insiders 1.80.0-insider (660393deaaa6d1996740ff4880f1bad43768c814, 2023-07-04T10:57:02.727Z)
OS version: Windows_NT x64 10.0.22621
Modes:
"
microsoft/vscode,2023-07-04 11:17:32,feature,Provide a way to manually reload an opened file.,"
https://github.com/microsoft/vscode/issues/17643#issue-196856396
* VSCode Version:1.8.1
* *OS Version:Windows 7 64 bit
I am trying to analyze the log files. It would be very helpful if i have a feature like a button click or a command to reload a file from disk. I can see the file being reloaded automatically but i am expecting a command to reload the file manually/only when needed.
Any suggestion or help on this feature please?
Thanks,
@jai1122.
was closed prematurely by
https://github.com/microsoft/vscode/issues/17643#issuecomment-350696652
`File > Revert File` allows to do so. It will fetch the contents of the file from disk even if not dirty.
Reverting is not reloading. I don't want to revert certain files, even temporarily. I want to *reload* them."
microsoft/vscode,2023-07-03 10:17:11,feature,"Preserve sidebar view sizes when resizing, if they are at their minimum","
when minimizing side tabs like open editors and time line the bottom one is always enlarged to maximum height, the editor should remember manual resizing so when opening and closing side tabs you go back to how you resized the tabs originally
"
microsoft/vscode,2023-07-02 15:57:23,feature,Add EnvironmentVariableCollection.description to the environment variables explanation,"Repro:
1. Create 2 terminals
2. Hover one tab
3. Click `Show environment contributions`
This should show something like ""Enables the following features: git auth provider""
![image](https://github.com/microsoft/vscode/assets/2193314/96a5c83e-bf66-4986-8732-cb0dbc8aed41)
"
microsoft/vscode,2023-06-30 15:46:38,feature,data science audio and text graph for visually impaired person,"
as we know VScode is most accessible code editor with screen readers.
however, I use matplotlib with this but unfortunately, I didn't find any accessibility with graphs.
in Google Colab with third parti library called audio-plot-lib I can access the graph with audio and screen readers but it's not work with VSCode can you do something for that library, or you can provide separate facility for all graphes
for more detail you can visit ""https://a11y-ds-intro.hassaku-labs.com/""
"
microsoft/vscode,2023-06-29 21:38:01,feature,Change accessible buffer command navigation keybinding for screen reader users,"From a screen reader user's perspective, the terminal accessible buffer is an editor and should behave as such.
`Ctrl/Cmd+Up/DownArrow` should jump to the top and bottom line.
We should keep it as is to align with terminals for non screen reader users and provide a new command navigation keybinding for screen reader users.
cc @jooyoungseo
"
microsoft/vscode,2023-06-29 21:25:23,feature,have accessible view for ghost text completions,@jooyoungseo suggested that it would be great to be able to review the text of the suggestion character by character. We could do this using an accessible view
microsoft/vscode,2023-06-29 18:27:33,feature,Sticky scroll for screen reader users,"Sticky scroll is a feature that allows sighted users to understand the nested context that they are in.
@kieferrm suggested it would be cool if we had this for screen reader users.
Imagine you jump to a line where there's a problem reported. You invoke a command which provides context - what class you are in, the function signature, the conditional, etc.
cc @rperez030 and @jooyoungseo"
microsoft/vscode,2023-06-29 06:19:02,feature,stickyScroll sticks curly brackets instead of class/function if bracket in new line ,"Type: Bug
### Steps to repoduce
- In settings.json:
```json
""editor.stickyScroll.enabled"": true,
""editor.stickyScroll.defaultModel"": ""indentationModel""
```
- or alternatively:
```json
""editor.stickyScroll.defaultModel"": ""foldingProviderModel""
```
- Create an example file containing a function or class with the opening brace on a new line.
- Scroll down the file.
### Current Behaviour
When using stickyScroll in the `outlineModel`, it functions as expected. However, I would prefer to use the `indentationModel` because it allows me to see all indentation levels at the top, which I find useful. Unfortunately, in this mode (indentationModel) and also in the `foldingProviderModel`, the curly brackets stick to the screen instead of the class/function when the bracket is on a new line.
![Code_CMOoY7ApCy](https://github.com/microsoft/vscode/assets/136900941/6d1061a8-7f60-445b-be2f-13a81638c4ea)
### Expected Behaviour
It would be great if both the `indentationModel` and `foldingProviderModel` treated opening brackets on a new line the same way as the `outlineModel` does. This means that the respective function should stick to the screen instead of the bracket.
VS Code version: Code 1.79.2 (695af097c7bd098fbf017ce3ac85e09bbc5dda06, 2023-06-14T08:57:04.379Z)
OS version: Windows_NT x64 10.0.19045
Modes:
"
microsoft/vscode,2023-06-28 16:38:51,feature,add an accessibility verbosity setting for notebooks so users can discover the notebook help menu,"alt+F1 can be used in notebooks to open the accessibility help, but I don't think that is noted anywhere."
microsoft/vscode,2023-06-28 09:00:45,feature,Scrolling at the edges of the reference view editor will scroll the outer editor,"
Does this issue occur when all extensions are disabled?: Yes/No
Yes
- VS Code Version: 1.79.2
- OS Version: Windows_NT x64 10.0.19045
Steps to Reproduce:
1. Use go to references/definitions to open a reference view window
2. Scroll to the bottom of the embedded editor
3. Keep scrolling will scroll the outer editor
It's quite annoying sometimes, make it behaves like the file tree in the right side would be nice
https://github.com/microsoft/vscode/assets/68118705/1ae23b05-4ddb-46ef-965b-2f06d5cf0107
But I saw this, so is this behavior intended? If so may I ask why?
https://github.com/microsoft/vscode/blob/efb49cc271d6eea5634ac395e36e1e4cd108a447/src/vs/editor/contrib/gotoSymbol/browser/peek/referencesWidget.ts#L308
"
microsoft/vscode,2023-06-27 21:52:22,feature,Enabling tag telemetry for Go,"
"
microsoft/vscode,2023-06-27 18:14:34,feature,"Let me click on ""hidden lines"" text to unfold","Testing #186213
I was trying to double-click on the the ""123 Hidden Lines"" text to unfold that region. I would love to be able to do that if possible
"
microsoft/vscode,2023-06-27 18:02:15,feature,Peek Call/Type Hierarchy not in command palette,"Testing #186213
"
microsoft/vscode,2023-06-27 06:55:27,feature,Can't view embedded html PDF files in vscode jupyter,"### Discussed in https://github.com/microsoft/vscode-jupyter/discussions/13769
Originally posted by **MikeLemo1** June 27, 2023
I'm trying to reference an internal PDF file to view a single page from it with embedded HTML in VSCode Jupyter plugin with no luck as it just displays nothing or blank rectangles with no luck(just for reference the same method works in mkdocs)
Here is what I tried to do it with assuming the PDF file is sitting in the same folder as the .ipynb file:
```py
from IPython.display import IFrame
# display(HTML(''))
IFrame( src = 'STM32F302xD_E_MCU.pdf#page=34&zoom=150&toolbar=1&statusbar=1', width=700, height=600)
```
Also tried:
```
%%HTML
```
Any idea what can be done to help it work? "
microsoft/vscode,2023-06-26 18:13:04,feature,Support creating a new task of a particular type programmatically,"
In our extension we would like to guide the user to create a task of a specific type. It's already possible to read all the tasks and determine whether one of the desired type exists, however the programmatic flow seems to require executing `workbench.action.tasks.configureTaskRunner` which exposes all task types.
Would it be possible to expose a function in the API to create a specific task (e.g. by executing [_configureTask](https://github.com/microsoft/vscode/blob/main/src/vs/workbench/contrib/tasks/browser/abstractTaskService.ts#L3223)) or accepting a filter in `workbench.action.tasks.configureTaskRunner` similar to [runTask](https://github.com/microsoft/vscode/blob/main/src/vs/workbench/contrib/tasks/browser/abstractTaskService.ts#L2758)?
"
microsoft/vscode,2023-06-25 12:25:18,feature,Small UI Improvement ,"Type: Bug
Looks like the tooltip (when hovering on file tabs) utilizes a light theme approach, meanwhile all the app is used a dark theme. Appearing this tooltip when you work in the dark theme for a while is painful for the eyes and difficult to read. So, would be great to have it in a dark color and there will not be such a gap in contrast difference, it will be more pleasant to eyes.
VS Code version: Code 1.79.2 (695af097c7bd098fbf017ce3ac85e09bbc5dda06, 2023-06-14T08:57:04.379Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz (8 x 1800)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|7.89GB (1.12GB free)|
|Process Argv|--crash-reporter-id d513e5e9-a478-4d2f-b140-dc7d93a863a3|
|Screen Reader|no|
|VM|0%|
Extensions: none
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627cf:30244335
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263cf:30335440
vscorecescf:30445987
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263cf:30773604
vsclangdf:30486550
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
2e4cg342:30602488
pyind779:30671433
f6dab269:30613381
pythonsymbol12:30671437
2i9eh265:30646982
showlangstatbar:30737416
vsctsb:30748421
pythonms35:30701012
03d35959:30757346
pythonfmttext:30731395
pythoncmv:30756943
fixshowwlkth:30771522
pythongtdpath:30769146
bgfeh915:30769767
gsof1:30774496
dh2dc718:30770000
pythonidxpt:30772539
pythondjangotscf:30772537
```
"
microsoft/vscode,2023-06-24 09:28:32,feature,Increase the width of breadcrumb box to support longer function name indexing.,"
Hello VSCode Team,
Breadcrumb box is quite a good feature for me when indexing functions in a larger file.
In a large project, I see lots of functions named like this: same long prefix but different short suffix, this makes troubles in finding them in breadcrumb box with a fixed width. I wonder if you can consider increase it, thanks a lot:)
XXXXXXXXXXXXXXXXXXXXXXXXXXX_YYYYYYYYY_AAAAA...
XXXXXXXXXXXXXXXXXXXXXXXXXXX_YYYYYYYYY_BBBBB...
XXXXXXXXXXXXXXXXXXXXXXXXXXX_YYYYYYYYY_CCCCC...
Best Regards,
Mingliang"
microsoft/vscode,2023-06-22 20:46:44,feature,"Picture-in-Picture for terminal, using new Google Chrome API","As seen [in this tweet](https://twitter.com/antfu7/status/1671974341032935424?s=20) by @antfu, Google Chrome mentioned in a blog post [they are planning to add a new PiP mode for html elements](https://developer.chrome.com/docs/web-platform/document-picture-in-picture/)
This would be extremely useful for the terminal in vscode.dev and github codespaces, and also on local (if electron supports it). Is there any plan to add support?
https://github.com/microsoft/vscode/assets/13242392/45f989d9-5fe2-4a26-8bc3-fbc4e5a284e2
"
microsoft/vscode,2023-06-21 14:52:48,feature,Diff Editor: Collapse Unchanged Code - Show Context Header,"It would be very helpful to include the current symbol name in the collapsed unmodified code indicator (see `DiffEditorWidget2` and `constructor`):
![chrome_BE4rflvO6W](https://github.com/microsoft/vscode/assets/2931520/97dcd4ee-7b98-44fa-9ed0-71770cbb9963)
Verification steps:
* Open a diff in vscode (not monaco editor playground)
* Enable collapsing unchanged regions (map icon in editor titlebar)
* Observe that the collapsed code shows a header indicating which symbol started inside of the unchanged code but ended outside of it.
* Verify that clicking on it reveals the symbol"
microsoft/vscode,2023-06-21 09:14:29,feature,Disable alt toggle of menubar altogether,"Since `alt` is used to `move lines up and down`, if you press it in a certain way (accidentally) it will toggle the menu bar, which I prefer to be hidden, always. Would be nice if there was a way to never show the menu bar even if `alt` is pressed, because it is disrupting."
microsoft/vscode,2023-06-20 21:12:25,feature,Consider providing screen reader with the chat response for inline chat,"as a screen reader user:
1. Start code chat
2. Type a request and hit enter
3. 🐛 tab 7 times to focus response
Perhaps we should align with the chat view and update via `status` with the response when it is ready
"
microsoft/vscode,2023-06-20 16:49:46,feature,Consider adding audio cues for inline chat,Now we have audio cues - which are off by default atm - in the chat view. We might want these also in the inline chat.
microsoft/vscode,2023-06-20 14:30:22,feature,Improve presentation of startup perf raw marks,"We should put these in a table:
![image](https://github.com/microsoft/vscode/assets/2193314/3dbeb9d5-ea68-4479-832f-f7c67bb5fa1d)
Something like this:
## Raw Perf Marks: main
| Name | Timestamp | Delta | Total
|---|---|---|---
| code/timeOrigin | 1687187191725.414 | 0 | 0
| code/didStartMain | 1687187191797 | 71.5859375 | 71.5859375
| code/willStartCrashReporter | 1687187191812 | 15 | 86.5859375
| code/didStartCrashReporter | 1687187191838 | 26 | 112.5859375
| code/willGenerateNls | 1687187191841 | 3 | 115.5859375
| code/mainAppReady | 1687187191872 | 31 | 146.5859375
| code/willLoadMainBundle | 1687187191872 | 0 | 146.5859375
| code/fork/willLoadCode | 1687187191885 | 13 | 159.5859375
| code/registerFilesystem/file | 1687187191998 | 113 | 272.5859375
| code/didLoadMainBundle | 1687187192018 | 20 | 292.5859375
cc @bpasero, @jrieken "
microsoft/vscode,2023-09-28 20:59:06,question,Breadcrumbs are limited to 6 items within a file,"
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version: recent main - 8b5719b3b65cc5ffd9145f5fca98ab225fd717d6 (also v1.82.2)
- OS Version: Windows 11
Steps to Reproduce:
1. Just open any deeply nested file. Personally, I used a JSONified version of the [Docker API specs](https://docs.docker.com/engine/api/v1.43/).
2. You'll see that the breadcrumbs only go to a certain level:
![Screenshot 2023-09-24 234426](https://github.com/microsoft/vscode/assets/16936908/4a925184-d429-4a4d-bab9-3f835b26c3ca)
Given that the breadcrumbs container is scrollable, I assume that this is a bug, not an intended limitation.
I traced it until `outlineModel.ts:OutlineGroup._getItemEnclosingPosition()`, because I was hoping that I could maybe see whether it is intended after all. But once I got there and saw that it seems to be caused by an incomplete tree (`children` are empty from that 6th level onwards), I couldn't justify sinking more time into this."
microsoft/vscode,2023-09-28 15:53:17,question,autocomplete style in javascript,"
Good morning, I recently started a JavaScript course using the self-taught official documentation from Firefox. First and foremost, I'd like to express my gratitude to all the developers who contribute to this project, both voluntarily and professionally.
I'm new to this, so please forgive me if I may be taking up your time, but I couldn't find a solution to the problem I encountered.
I kindly request that you add the "".style"" object to the default autocompletion provided by VS Code for JavaScript. You know, the one that allows you to modify certain CSS properties of HTML from JavaScript. I haven't come across any extensions that achieve this.
The solution suggested by artificial intelligences was for me to either create an extension myself or attempt to configure autocompletion suggestions from the ""settings.json"" file in VS Code. Unfortunately, neither approach worked.
Every time I write, for example:
element.style.
The autocompletion changes it to:
element.computedStyleMap.
What I end up doing is pressing space to prevent it from autocompleting.
I apologize if there was already a previous solution that I couldn't find, and I appreciate you taking the time to respond.
"
microsoft/vscode,2023-09-28 04:13:06,question,VS code stops running react code,"Type: Bug
Hi,
VS code stops automatically while running react code, this happens frequently. After this every time npm start command has to be given. This issue is very annoying. Kindly check the issue and resolve asap.
VS Code version: Code 1.82.2 (abd2f3db4bdb28f9e95536dfa84d8479f1eb312d, 2023-09-14T05:55:25.390Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz (8 x 2419)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|7.73GB (0.65GB free)|
|Process Argv|--crash-reporter-id 232489fa-7944-4d11-b31b-582170cfc1a7|
|Screen Reader|no|
|VM|0%|
Extensions (16)
Extension|Author (truncated)|Version
---|---|---
vscode-postgres|cko|1.4.3
prettier-vscode|esb|10.1.0
fabric8-analytics|red|0.7.0
java|red|1.22.1
LiveServer|rit|5.7.9
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
vscode-boot-dev-pack|vmw|0.2.1
vscode-spring-boot|vmw|1.49.0
vscode-java-debug|vsc|0.54.0
vscode-java-dependency|vsc|0.23.1
vscode-java-pack|vsc|0.25.14
vscode-java-test|vsc|0.40.0
vscode-maven|vsc|0.42.0
vscode-spring-boot-dashboard|vsc|0.13.1
vscode-spring-initializr|vsc|0.11.2
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263cf:30335440
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
vsclangdc:30486549
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
282f8724:30602487
89544117:30613380
showlangstatbar:30737416
0bi6i642:30841073
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
copilotsettingc:30839828
synctok:30821570
dsvsc013:30795093
dsvsc014:30804076
diffeditorv1:30821571
dsvsc015:30845448
```
"
microsoft/vscode,2023-09-26 20:41:36,question,Unable to deny Built-in Port Forwarding,"
Does this issue occur when all extensions are disabled?: Yes
- VS Code Version: 1.82.2
- OS Version: Windows 11 22H2
Issue: [Release notes](https://code.visualstudio.com/updates/v1_82) specify the new ""Built-in port forwarding"" feature that allows users to forward port from within VS Code and make it available publicly.
The [documentation](https://code.visualstudio.com/docs/editor/port-forwarding) specifies that we can allow/deny access to domain `global.rel.tunnels.api.visualstudio.com` to be able to control this feature.
During our testing it seems the feature still works even after we denied the domain `global.rel.tunnels.api.visualstudio.com` on our enterprise DNS secure gateway Cisco Umbrella.
Steps to Reproduce:
1. Denied domain `global.rel.tunnels.api.visualstudio.com` through our enterprise DNS secure gateway Cisco Umbrella.
2. Validated that the domain `global.rel.tunnels.api.visualstudio.com` is blocked by trying to browse to it - shows the blocked message from opendns.
3. Tried steps to 'Forward a Port' and it still allows the user to forward port.
"
microsoft/vscode,2023-09-26 05:14:01,question,Sponsored issue: Support Request - Infinite Scrolling Web App,"Description:
Hello,
I hope this message finds you well. I'm currently working on implementing infinite scrolling for a web project and have run into an issue that I could use some assistance with.
Issue:
I have set up infinite scrolling on my website to load additional content as users scroll down the page. However, I've noticed that the new content is not loading as expected when users reach the bottom of the page. Instead, the page remains static, and no new data is loaded.
I'm using a React-based frontend with a Node.js backend.
I've followed tutorials and documentation to set up the infinite scrolling feature, but I seem to have missed something.
I can share relevant code snippets or configurations if needed.
I would greatly appreciate your guidance on resolving this issue and getting infinite scrolling to work correctly on my website.
Thank you for your assistance, and I look forward to your response.
Best regards,
Aniket Mandloi
## Priority Support
- @aniketmandloi is using [Mintycode](https://mintycode.io) to fund this issue.
- If you would like to accept ![amount](https://imageupload.io/ib/zwkd2EaAY6YpPsg_1693209112.png) bounty for solving this issue join [Mintycode](https://mintycode.io/profile?action=CREATOR_SUPPORT_MODAL&requestId=aae585dd-6912-4162-8133-7e3005ba1a67&owner=microsoft&name=vscode).
- Thank you in advance for helping.
[![mintycode](https://imageupload.io/ib/LvxMwrPxam1JW9e_1693210482.png)](https://mintycode.io)
"
microsoft/vscode,2023-09-25 13:44:18,question,Terminal process failed to launch,"Type: Bug
""The terminal process failed to launch: Starting dierectory(cwd) ""C:/Program Files/--------/-------"" does not exist"" How do we solve this terminal issue?
VS Code version: Code 1.82.2 (abd2f3db4bdb28f9e95536dfa84d8479f1eb312d, 2023-09-14T05:55:25.390Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz (8 x 2803)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.68GB (3.74GB free)|
|Process Argv|--crash-reporter-id 75eb889d-432f-4f14-a87e-2fbb546928cd|
|Screen Reader|no|
|VM|0%|
Extensions (9)
Extension|Author (truncated)|Version
---|---|---
vscode-ros|ms-|0.9.2
python|ms-|2023.16.0
vscode-pylance|ms-|2023.9.20
remote-containers|ms-|0.309.0
cpptools|ms-|1.17.5
java|red|1.22.1
vscode-xml|red|0.26.1
vscode-java-debug|vsc|0.54.0
vscode-java-test|vsc|0.40.0
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vsdfh931cf:30280410
vshan820:30294714
vstes263:30335439
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593cf:30376535
pythonvs932:30410667
vsclangdc:30486549
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
3biah626:30602489
f6dab269:30613381
a9j8j154:30646983
showlangstatbar:30737416
962ge761:30841074
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
copilotsettingc:30839828
asynctok:30821568
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015cf:30829746
```
"
microsoft/vscode,2023-09-25 02:47:11,question,problema con idioma vscode,"
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version:
- OS Version:
Steps to Reproduce:
no puedo descargar la extensión spanish en vscode y ya probe todo que puedo hacer este es el error que me tira lo desinstale vscode en limpio 3 veces y lo volvi a instalar y me sigue tirando error tengo la versio 1.82.2 nesesito ayuda en windows 10 64 bits
2023-09-24 23:33:30.931 [info] [perf] Render performance baseline is 391ms
2023-09-24 23:34:44.868 [error] Error: Cannot read the extension from /c:/Users/Lenovo/.vscode/extensions/ms-ceintl.vscode-language-pack-es-1.82.2023091309
at W.w (c:\\Users\\Lenovo\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\out\\vs\\code\\node\\sharedProcess\\sharedProcessMain.js:92:15050)
at async B.u (c:\\Users\\Lenovo\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\out\\vs\\code\\node\\sharedProcess\\sharedProcessMain.js:92:18230)
1.
2.
"
microsoft/vscode,2023-09-23 04:41:43,question,Terminal problem,"Type: Performance Issue
when I installed node js I could not type in my terminal
VS Code version: Code 1.82.2 (abd2f3db4bdb28f9e95536dfa84d8479f1eb312d, 2023-09-14T05:55:25.390Z)
OS version: Windows_NT x64 6.0.6002
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|AMD Ryzen 3 2200U with Radeon Vega Mobile Gfx (4 x 2495)|
|GPU Status|2d_canvas: unavailable_software canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: disabled_software multiple_raster_threads: enabled_on opengl: disabled_off rasterization: disabled_software raw_draw: disabled_off_ok video_decode: disabled_software video_encode: disabled_software vulkan: disabled_off webgl: unavailable_software webgl2: unavailable_software webgpu: unavailable_software|
|Load (avg)|undefined|
|Memory (System)|3.66GB (0.63GB free)|
|Process Argv||
|Screen Reader|no|
|VM|0%|
Process Info
```
CPU % Mem MB PID Process
0 139 11216 code main
2 118 2200 window [2] (Issue Reporter)
0 43 4488 fileWatcher [1]
0 81 5936 extensionHost [1]
0 45 4948 electron-nodejs (""C:\\Users\\Hello\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node ""c:\\Users\\Hello\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\extensions\\html-language-features\\server\\dist\\node\\htmlServerMain"" --node-ipc --clientProcessId=5936)
0 36 9408 electron-nodejs (""C:\\Users\\Hello\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node ""c:\\Users\\Hello\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\extensions\\json-language-features\\server\\dist\\node\\jsonServerMain"" --node-ipc --clientProcessId=5936)
0 46 7192 ptyHost
0 20 10164 winpty-agent
1 7 7816 C:\\Windows\\system32\\conhost.exe 0x4
0 51 8024 C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe
0 20 10412 winpty-agent
0 7 1612 C:\\Windows\\system32\\conhost.exe 0x4
0 56 11188 C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe
0 31 9448 utility-network-service
0 58 9484 shared-process
1 69 10460 gpu-process
0 196 10852 window [1] (index.html - Import and Exports - Visual Studio Code)
```
Workspace Info
```
| Window (index.html - Import and Exports - Visual Studio Code)
| Folder (Import and Exports): 3 files
| File types: js(2) html(1)
| Conf files:;
```
Extensions: none
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627:30244334
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscorecescf:30445987
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
vsclangdc:30486549
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
282f8724:30602487
89544117:30613380
vscrp:30673768
showlangstatbar:30737416
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
copilotsettingc:30839828
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015cf:30829746
```
"
microsoft/vscode,2023-09-22 14:38:12,question,pow() function not working in c++,"#include
#include
using namespace std;
int main()
{
int n;
cin >> n;
int ans = 0;
int i = 0;
while (n != 0)
{
int bit = n & 1;
ans = (bit * pow(10, i)) + ans;
n = n >> 1;
i++;
}
cout << ans << endl;
}
"
microsoft/vscode,2023-09-22 01:45:20,question,Calculation is not being done in higher datatype for cpp code,"
Type: Bug
in .cpp file just cout<<5/2.0; It's answer shoulde be 2.5 but in vs code it is showing 2 which is wrong.
VS Code version: Code 1.82.2 (abd2f3db4bdb28f9e95536dfa84d8479f1eb312d, 2023-09-14T05:55:25.390Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz (4 x 2304)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|7.90GB (1.29GB free)|
|Process Argv|C:\\\\Users\\\\dell\\\\Desktop\\\\D --crash-reporter-id 8ac51244-69d0-49d5-8323-8dee75c6270b|
|Screen Reader|no|
|VM|67%|
Extensions (18)
Extension|Author (truncated)|Version
---|---|---
vscode-tailwindcss|bra|0.10.0
bracket-pair-toggler|dzh|0.0.3
chatgpt-gpt4-gpt3-vscode|Eas|1.1.8
auto-rename-tag|for|0.1.10
code-runner|for|0.12.0
c-cpp-runner|fra|8.1.0
vscode-pull-request-github|Git|0.73.2023091209
vscode-language-babel|mgm|0.0.39
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
vscode-thunder-client|ran|2.12.1
LiveServer|rit|5.7.9
es7-react-js-snippets|rod|1.9.3
cmake|twx|0.0.17
vscode-lldb|vad|1.10.0
vscode-icons|vsc|12.5.0
JavaScriptSnippets|xab|1.8.0
(2 theme extensions excluded)
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vsdfh931cf:30280410
vshan820:30294714
vstes263:30335439
vscoreces:30445986
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263cf:30792227
vsclangdc:30486549
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
3biah626:30602489
89544117:30613380
2i9eh265:30646982
showlangstatbar:30737416
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
copilotsettingc:30839828
synctok:30821570
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015cf:30829746
```
"
microsoft/vscode,2023-09-21 16:14:30,question,Issues with source control,"
Type: Performance Issue
i have over 937 changes to sync and it is not working, i just completed cs50p
and most of my folders are not updated to my github
how do i push these changes to my repository?
how can i clone this repository (it is private and id like it to be public)
please help
thank you
VS Code version: Code 1.82.2 (abd2f3db4bdb28f9e95536dfa84d8479f1eb312d, 2023-09-14T05:55:25.390Z)
OS version: Windows_NT x64 10.0.19045
Modes:
Remote OS version: Linux x64 5.15.0-1041-azure
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz (8 x 2691)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: unavailable_off vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|3.91GB (0.68GB free)|
|Process Argv|--crash-reporter-id ab4cdca6-5af7-4e51-8543-bccccc3656ae|
|Screen Reader|no|
|VM|0%|
|Item|Value|
|---|---|
|Remote|Codespaces: studious space couscous|
|OS|Linux x64 5.15.0-1041-azure|
|CPUs|AMD EPYC 7763 64-Core Processor (2 x 3242)|
|Memory (System)|7.75GB (5.72GB free)|
|VM|0%|
Process Info
```
CPU % Mem MB PID Process
0 74 10840 code main
0 89 8800 window [3] (Issue Reporter)
0 142 10532 window [1] (seasons.py - 135445414 [Codespaces: studious space couscous] - Visual Studio Code)
0 44 14476 shared-process
0 34 38960 fileWatcher [1]
0 136 46116 extensionHost [1]
0 12 47004 crashpad-handler
0 16 47196 utility-network-service
0 77 49332 gpu-process
Remote: Codespaces: studious space couscous
CPU % Mem MB PID Process
0 0 282 remote agent
0 0 306 fileWatcher
0 0 677 ptyHost
0 0 12579 /usr/bin/bash --login
0 0 12014 extension-host
0 0 12589 /vscode/bin/linux-x64/abd2f3db4bdb28f9e95536dfa84d8479f1eb312d/node /home/ubuntu/.vscode-remote/extensions/ms-python.vscode-pylance-2023.9.20/dist/server.bundle.js --cancellationReceive=file:e60d5c3ae6864b329640c8625645e31d2b124ef03b --node-ipc --clientProcessId=12014
0 0 13281 /vscode/bin/linux-x64/abd2f3db4bdb28f9e95536dfa84d8479f1eb312d/node /vscode/bin/linux-x64/abd2f3db4bdb28f9e95536dfa84d8479f1eb312d/extensions/json-language-features/server/dist/node/jsonServerMain --node-ipc --clientProcessId=12014
0 0 12018 fileWatcher
0 0 15315 /bin/sh -c /usr/bin/ps -ax -o pid=,ppid=,pcpu=,pmem=,command=
0 0 15316 /usr/bin/ps -ax -o pid=,ppid=,pcpu=,pmem=,command=
```
Workspace Info
```
| Remote: Codespaces: studious space couscous| Folder (135445414): 192 files| File types: py(55) gitignore(9) TAG(8) md(8) csv(4) jpg(4) json(2)
| pdf(2) png(2) pub(1)
| Conf files: settings.json(1);
```
Extensions (34)
Extension|Author (truncated)|Version
---|---|---
codespaces|Git|1.15.3
cs50|CS5|0.0.1
ddb50|CS5|2.0.0
explain50|CS5|1.0.0
extension-uninstaller|CS5|1.0.7
phpliteadmin|CS5|0.0.1
style50|CS5|0.0.1
codespaces|Git|1.15.3
vscode-pull-request-github|Git|0.72.0
prettier-sql-vscode|inf|1.6.0
vscode-pdf|mat|0.0.6
vscode-docker|ms-|1.26.1
vscode-language-pack-bg|MS-|1.48.3
vscode-language-pack-cs|MS-|1.82.2023091309
vscode-language-pack-de|MS-|1.82.2023091309
vscode-language-pack-es|MS-|1.82.2023091309
vscode-language-pack-fr|MS-|1.82.2023091309
vscode-language-pack-hu|MS-|1.48.3
vscode-language-pack-it|MS-|1.82.2023091309
vscode-language-pack-ja|MS-|1.82.2023091309
vscode-language-pack-ko|MS-|1.82.2023091309
vscode-language-pack-pl|MS-|1.82.2023091309
vscode-language-pack-pt-BR|MS-|1.82.2023091309
vscode-language-pack-ru|MS-|1.82.2023091309
vscode-language-pack-zh-hans|MS-|1.82.2023091309
vscode-language-pack-zh-hant|MS-|1.82.2023091309
python|ms-|2023.16.0
vscode-pylance|ms-|2023.9.20
cpptools|ms-|1.17.5
hexeditor|ms-|1.9.12
vsliveshare|ms-|1.0.5883
java|red|1.22.1
vscode-java-debug|vsc|0.54.0
gitdoc|vsl|0.1.0
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vswsl492:30256859
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263cf:30792227
vsclangdc:30486549
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyonecf:30548226
3biah626:30602489
89544117:30613380
vscrpc:30673769
showlangstatbar:30737416
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
copilotsettingc:30839828
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015cf:30829746
```
"
microsoft/vscode,2023-09-20 16:28:33,question,[Feature Request] Add workbench action to split editor terminal below,"
I currently have the following actions available:
```
workbench.action.createTerminalEditor
workbench.action.createTerminalEditorSameGroup
workbench.action.createTerminalEditorSide
```
I'd like to be able to split an editor terminal below. It seems like the UI can do this, because I can drag a terminal editor below another and have a horizontal split:
So I would love to have a `workbench.action.createTerminalEditorBelow` action to split it below 😊"
microsoft/vscode,2023-09-18 18:20:14,question,Giveing wrong output in C programming compilation,"Type: Bug
Mathematical calculation of some number having 5 in its digit, gives worng output which is decremented by 1 from original result.
The code which is wrote is of Armstrong Number, and i mentioning it below-
#include
#include
int main() {
int num,s,arm=0,n,count=0,r;
printf(""Enter any number\\n"");
scanf(""%d"",&num);
n=num;
s=num;
while (n!=0)
{
n=n/10;
count++;
}
printf(""DIGIT\\t= %d\\n"",count);
while (s!=0)
{
r=s%10;
arm+=pow(r,count);
s=s/10;
}
printf(""arm = %d"",arm);
}
In this code after compilation when we give input of any number having 5 in its digit for example 25, 55, 153, 50, 457, etc. It gives wrong output.
The output result decremented by 1 from original result.
VS Code version: Code 1.82.2 (abd2f3db4bdb28f9e95536dfa84d8479f1eb312d, 2023-09-14T05:55:25.390Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz (8 x 2419)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|7.76GB (2.22GB free)|
|Process Argv|--crash-reporter-id 2a3cab1c-42c1-4a04-8b5e-f30b9eb5e3ad|
|Screen Reader|no|
|VM|0%|
Extensions (15)
Extension|Author (truncated)|Version
---|---|---
code-runner|for|0.12.0
python|ms-|2023.16.0
vscode-pylance|ms-|2023.9.10
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
java|red|1.22.1
cmake|twx|0.0.17
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
vscode-java-debug|vsc|0.54.0
vscode-java-dependency|vsc|0.23.1
vscode-java-pack|vsc|0.25.14
vscode-java-test|vsc|0.39.1
vscode-maven|vsc|0.42.0
(1 theme extensions excluded)
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vswsl492:30256859
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vsdfh931cf:30280410
vshan820:30294714
vstes263:30335439
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263:30792226
vsclangdc:30486549
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vsccc:30803844
282f8724:30602487
f6dab269:30613381
vscrp:30673768
2i9eh265:30646982
showlangstatbar:30737416
962ge761:30835153
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
copilotsettingc:30834057
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015:30829745
```
"
microsoft/vscode,2023-09-17 07:02:24,question,Terminal Issue.,"
Type: Performance Issue
Intergrated Terminal exiting improperly after running the code ny giving the command to run C code , even if the code is correct.
VS Code version: Code 1.82.2 (abd2f3db4bdb28f9e95536dfa84d8479f1eb312d, 2023-09-14T05:55:25.390Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz (8 x 2419)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|7.75GB (1.34GB free)|
|Process Argv|--crash-reporter-id ed0fb855-e722-4c59-b225-db7fc284d4eb|
|Screen Reader|no|
|VM|0%|
Process Info
```
CPU % Mem MB PID Process
1 100 10088 code main
1 94 2408 window [2] (Issue Reporter)
0 75 9752 ptyHost
0 7 14636 conpty-agent
0 66 19292 C:\\WINDOWS\\System32\\WindowsPowerShell\\v1.0\\powershell.exe -noexit -command ""try { . \\""d:\\Microsoft VS Code\\resources\\app\\out\\vs\\workbench\\contrib\\terminal\\browser\\media\\shellIntegration.ps1\\"" } catch {}""
0 72 11352 fileWatcher [1]
0 216 13604 window [1] (str_problems.c - Untitled (Workspace) - Visual Studio Code)
0 37 16664 utility-network-service
0 26 17176 crashpad-handler
0 168 17528 gpu-process
0 88 20340 shared-process
0 136 21976 extensionHost [1]
0 35 3108 c:\\Users\\RAKSHIT\\.vscode\\extensions\\ms-vscode.cpptools-1.17.5-win32-x64\\bin\\cpptools.exe
0 4 10368 ""c:\\Users\\RAKSHIT\\.vscode\\extensions\\ms-vscode.cpptools-1.17.5-win32-x64\\bin\\cpptools.exe""
0 18 11952 c:\\Users\\RAKSHIT\\.vscode\\extensions\\ms-vscode.cpptools-1.17.5-win32-x64/bin/cpptools-srv.exe 3108 {135BBCE3-1572-41DA-9897-0C35EB0DB490}
0 6 13620 C:\\WINDOWS\\system32\\conhost.exe 0x4
0 6 14544 C:\\WINDOWS\\system32\\conhost.exe 0x4
0 71 11764 electron-nodejs (""D:\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node ""d:\\Microsoft VS Code\\resources\\app\\extensions\\json-language-features\\server\\dist\\node\\jsonServerMain"" --node-ipc --clientProcessId=21976)
```
Workspace Info
```
| Window (str_problems.c - Untitled (Workspace) - Visual Studio Code)
| Folder (Online-Recruitment): 25 files
| File types: jpg(8) css(4) html(4) ico(1) jpeg(1) js(1) md(1)
| Conf files:
| Folder (My_Portfolio): 5 files
| File types: html(1) yaml(1) xlsx(1) css(1)
| Conf files:
| Folder (DSA-practice-problems): 33 files
| File types: c(13) exe(12) json(4) c++(1) md(1)
| Conf files: launch.json(1) settings.json(1) tasks.json(1)
| Launch Configs: cppdbg(3)
| Folder (Java_Practice): 42 files
| File types: class(26) java(16)
| Conf files:;
```
Extensions (31)
Extension|Author (truncated)|Version
---|---|---
project-manager|ale|12.7.0
blackbox|Bla|1.1.58
gitignore|cod|0.9.0
composer-php-vscode|DEV|1.38.13918
phptools-vscode|DEV|1.38.13918
profiler-php-vscode|DEV|1.38.13918
githistory|don|0.6.20
vscode-html-css|ecm|1.13.1
code-runner|for|0.12.0
kotlin|fwc|0.2.31
Kotlin|mat|1.7.1
jupyter|ms-|2023.8.1002501831
vscode-jupyter-cell-tags|ms-|0.1.8
vscode-jupyter-slideshow|ms-|0.1.5
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
java|red|1.22.1
LiveServer|rit|5.7.9
open-in-browser|tec|2.0.0
cmake|twx|0.0.17
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
vscode-java-debug|vsc|0.54.0
vscode-java-dependency|vsc|0.23.1
vscode-java-pack|vsc|0.25.14
vscode-java-test|vsc|0.39.1
vscode-maven|vsc|0.42.0
cors-browser|Wsc|1.0.11
php-debug|xde|1.33.0
vscode-open-in-github|ziy|1.3.6
(1 theme extensions excluded)
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593cf:30376535
pythonvs932:30410667
py29gd2263cf:30792227
vsclangdc:30486549
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
vsccc:30803844
2e4cg342:30602488
f6dab269:30613381
a9j8j154:30646983
showlangstatbar:30737416
0bi6i642:30835152
03d35959:30757346
pythonfmttext:30731395
9b8hh234:30694863
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015:30829745
```
"
microsoft/vscode,2023-09-16 11:14:59,question,Please provide a complete list of files and folders generated by VSCode and their locations,"
On re-opening a project and navigating to the terminal via `Ctrl + backtick` (Toggle Terminal), I notice that the terminal states:
` * History restored `
This would mean that there is some file/folder on disk somewhere which tracks this project and stores this data.
Can a complete list of files and folders generated by VSCode and their locations be documented and put up on the web? My hard disk space is running out and I would like to keep deleting such extra folders routinely so that it does not eat into the remaining space.
In other words, what files and folders can one safely delete so as to restore VSCode to the status it would have been under right after the very first time it has been freshly installed on a new machine without any extensions, cache files, etc. without affecting its functionality."
microsoft/vscode,2023-09-15 06:09:43,question,Where are the Extensions Stored?,"
Type: Performance Issue
I need to know where are the VS Code Extensions stored. It is not in the VS Code subdirectory.
VS Code version: Code 1.82.2 (abd2f3db4bdb28f9e95536dfa84d8479f1eb312d, 2023-09-14T05:55:25.390Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz (8 x 2419)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: enabled_on direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.77GB (8.81GB free)|
|Process Argv|--crash-reporter-id d28106b3-cd04-490a-b194-f819821f7d80|
|Screen Reader|no|
|VM|0%|
Process Info
"
microsoft/vscode,2023-09-14 12:06:09,question,Please make sure vscode's window title bar changes color when the Windows focus is acquired.,"
Please make sure vscode's window title bar changes color when the Windows focus is acquired.
I will appreciate seeing the top bar changing its background color to blue (in my case) when the windows focus is on vscode.
Thanks!"
microsoft/vscode,2023-09-13 13:17:38,question,Terminate batch job issue,"
Does this issue occur when all extensions are disabled?: Yes
- VS Code Version: 1.82.0
- OS Version:
Steps to Reproduce:
1. press ctrl+c for terminate project
2. press N for no. But still project terminate if we select No.
"
microsoft/vscode,2023-09-13 00:27:38,question,please allow for multiple tunnels on same machine,"
It would be amazing if remote tunneling didn't force us into one server per machine (i.e. allow optional work around of https://github.com/microsoft/vscode/issues/171520)
An amazing feature of tunnel is to be able to use it in HPC environments, for example starting up a tunnel through an sbatch job with specific allocated resources. This allows users to connect to the newly created tunnel and use vscode's debugging with specific resources managed through cluster management systems like slurm.
The issue is that if this happens more than once on a machine, any slurm allocation after the first will just point users to the tunnel of the first allocation, leading to conflict of resources. This would be an easy solution to workflow issues described in https://github.com/microsoft/vscode-remote-release/issues/1722 .
Could supporting multiple tunnels on a machine be brought back?"
microsoft/vscode,2023-09-11 07:33:42,question,Open files from different folders in one workspace without having to split editor,"
When adding a folder to a workspace and opening a file from the new folder, it automatically opens **instead** of an already opened file in the workspace, closing the file from the other active folder in the workspace. The only workaround seems to be to ""open to the side"" which is quite tedious after a while.
So it would be nice if the files could all automatically open besides each other in the same editor without having to split."
microsoft/vscode,2023-09-09 13:37:00,question,Add terminal highlighting support,"
Type: Feature Request
The reason is that when I used WSL to compile the opencv library, the information displayed by the terminal was all white font, and it was difficult to capture some key information. Can I add a highlight effect like mobaxterm? However, I remember that GitHub's codespace had some highlighting, but as a user there was no more convenient way to set it up, or plugins.
VS Code version: Code 1.82.0 (8b617bd08fd9e3fc94d14adb8d358b56e3f72314, 2023-09-06T22:07:07.438Z)
OS version: Windows_NT x64 10.0.22621
Modes:
Connection to 'wsl+Ubuntu-22.04' could not be established
"
microsoft/vscode,2023-09-09 03:29:09,question,"logging into cs50.dev is ok, but VS code space does not load ","ADD ISSUE DESCRIPTION HERE:
logging into cs50.dev is ok, but VS code space does not load , it lasts forever but never ends.
I have reloaded, logout - login, restarted browser, rebooted my windows lap.
I can not continue with the course, really sad :(
Version: 1.82.0
Commit: 8b617bd08fd9e3fc94d14adb8d358b56e3f72314
User Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36
Embedder: codespaces
"
microsoft/vscode,2023-09-08 11:38:19,question,Search files (advanced search - filter a particular extension),"
Is it possible to search file by keyword only for a particular extension. For example, I want to search that lists all files that have extension `md` and the search term `time`"
microsoft/vscode,2023-09-07 17:47:37,question,grille.py,"Type: Bug
I cannot be successfull when I am trying to execute my programms. But, before my programms were running without problem. I use Python language.
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:18:39.991Z)
OS version: Linux x64 5.15.0-83-generic snap
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i5-4670K CPU @ 3.40GHz (4 x 3600)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: disabled_software vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: disabled_off|
|Load (avg)|1, 2, 1|
|Memory (System)|7.63GB (5.98GB free)|
|Process Argv|--no-sandbox --force-user-env --unity-launch --crash-reporter-id e9151a7f-cd85-47e8-9186-149c97afed6b|
|Screen Reader|no|
|VM|0%|
|DESKTOP_SESSION|ubuntu|
|XDG_CURRENT_DESKTOP|Unity|
|XDG_SESSION_DESKTOP|ubuntu|
|XDG_SESSION_TYPE|x11|
Extensions (3)
Extension|Author (truncated)|Version
---|---|---
vscode-language-pack-fr|MS-|1.81.2023081609
python|ms-|2023.14.0
vscode-pylance|ms-|2023.9.10
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263cf:30335440
vscoreces:30445986
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
vsclangdc:30486549
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
3biah626:30602489
89544117:30613380
2i9eh265:30646982
showlangstatbar:30737416
0bi6i642:30823812
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv1:30821571
dsvsc015:30829745
```
"
microsoft/vscode,2023-09-07 14:40:11,question,My terminal keeps giving me errors.I can't run my codes in the terminal.,"Type: Bug
Hello, I have been using visual studio code for a long time, but recently I can not run my code in the terminal, no matter what language. When I run a code in Python language, it gives the following error in the terminal:
""c:\\Users\\LENOVO-GAMNG\\Desktop\\C\\hey.py : The term 'c:\\Users\\LENOVO-GAMNG\\Desktop\\C\\hey.py' is not recognised as the name
of a cmdlet, function, script file, or operable programme. Check the spelling of the name, or if a path was included, verif
y that the path is correct and try again.
At line:1 char:1
+ c:\\Users\\LENOVO-GAMNG\\Desktop\\C\\hey.py
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ CategoryInfo : ObjectNotFound: (c:\\Users\\LENOVO-GAMNG\\Desktop\\C\\hey.py:String) [], CommandNotFoundExcepti
on
+ FullyQualifiedErrorId : CommandNotFoundException""
When I run a code in C in the terminal, it gives the following error:
""cd : Cannot find path 'C:\\Users\\LENOVO-GAMNG\\Desktop\\C\\' because it does not exist.
At line:1 char:1
+ cd ""c:\\Users\\LENOVO-GAMNG\\Desktop\\C\\"" ; if ($?) { gcc 9_From_ User_ ...
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ CategoryInfo : ObjectNotFound: (C:\\Users\\LENOVO-GAMNG\\Desktop\\C\\:String) [Set-Location], ItemN
otFoundException
+ FullyQualifiedErrorId : PathNotFound,Microsoft.PowerShell.Commands.SetLocationCommand""
because of these errors, I can not write code in the terminal in any way, what is the solution to this, please urgently.
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz (4 x 2904)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|11.88GB (6.93GB free)|
|Process Argv|--crash-reporter-id aabdbfeb-c1b6-4fbe-8753-d6b16ef5132e|
|Screen Reader|no|
|VM|0%|
Extensions (14)
Extension|Author (truncated)|Version
---|---|---
prettier-vscode|esb|10.1.0
code-runner|for|0.12.0
vscode-language-pack-tr|MS-|1.81.2023081609
python|ms-|2023.15.12301911
vscode-pylance|ms-|2023.9.10
jupyter|ms-|2023.7.1002162226
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
live-server|ms-|0.4.9
JavaScriptSnippets|Tem|0.5.0
cmake|twx|0.0.17
vscode-lldb|vad|1.9.2
vscode-icons|vsc|12.5.0
(3 theme extensions excluded)
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263cf:30335440
vscorecescf:30445987
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593cf:30376535
pythonvs932:30410667
vscaac:30438847
vsclangdf:30486550
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vsccc:30803844
2e4cg342:30602488
89544117:30613380
2i9eh265:30646982
showlangstatbar:30737416
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
asynctok:30821568
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
dsvsc015:30829745
```
"
microsoft/vscode,2023-09-06 00:39:35,question,It compiles but the console instantly closes,"Type: Bug
/*14. Realice una función que reciba como parámetros una matriz de enteros, la cantidad de
filas, la cantidad de columnas y un valor a buscar. La función debe devolver – POR
PARÁMETRO – la fila y la columna donde se encuentra el valor buscado. En el NOMBRE debe
devolver verdadero si lo encontró o falso si no lo hizo.
*/
#include
using namespace std;
bool busquedas(int mat[][999],int &filas, int &columnas,int dato);
int main(int argc, char const *argv[])
{
int matriz[999][999], fila, columna, dat;
cout << ""Ingrese cantidad de filas y columnas de la matriz "";
cin >> fila >> columna;
for (int i = 0; i < fila; i++)
{
for (int j = 0; j < columna; j++)
{
cout << ""Ingrese el numero de la fila: "" << i << "" columna :"" << j << "" "";
cin >> matriz[i][j];
}
}
cout << ""Ingrese el dato que quiere buscar: "";
cin >> dat;
bool encontro = busquedas(matriz,fila,columna,dat);
if (encontro)
cout << ""El dato fue encontrado en la fila "" << fila << "" columna: "" << columna;
else cout << ""El dato no fue encontrado"";
return 0;
}
bool busquedas(int mat[][999],int &filas, int &columnas,int dato)
{
for (int i = 0; i < filas; i++)
{
for (int j = 0; j < columnas; j++)
{
if (mat[i][j] == dato)
{
filas = i;
columnas = j;
return true;
}
}
}
return false;
} This is the full code i dont see any problems at all, but in this only exercise i have that problem
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|AMD Ryzen 5 1600 Six-Core Processor (12 x 3194)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.93GB (8.91GB free)|
|Process Argv|--crash-reporter-id a0003ca4-e173-4674-9b53-5ef381a1fb2a|
|Screen Reader|no|
|VM|0%|
Extensions (19)
Extension|Author (truncated)|Version
---|---|---
c-cpp-compile-run|dan|1.0.50
c-cpp-runner|fra|8.1.0
codespaces|Git|1.15.0
python|ms-|2023.14.0
vscode-pylance|ms-|2023.8.50
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
java|red|1.21.0
cmake|twx|0.0.17
vscode-lldb|vad|1.9.2
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
vscode-java-debug|vsc|0.54.0
vscode-java-dependency|vsc|0.23.1
vscode-java-pack|vsc|0.25.13
vscode-java-test|vsc|0.39.1
vscode-maven|vsc|0.42.0
material-theme|zhu|3.16.0
(2 theme extensions excluded)
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vswsl492cf:30256860
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263cf:30335440
vscorecescf:30445987
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263:30792226
vsclangdf:30486550
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
2e4cg342:30602488
89544117:30613380
2i9eh265:30646982
showlangstatbar:30737416
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv1:30821571
dsvsc015:30829745
```
"
microsoft/vscode,2023-09-04 06:36:37,question,PIL library encountering error,"Type: Bug
i am getting this error while i am writing the python code in VS code library for Image PIL Image. PS C:\\Users\\musta\\Desktop\\Visual Studio> & C:/Users/musta/AppData/Local/Microsoft/WindowsApps/python3.11.exe ""c:/Users/musta/Desktop/Visual Studio/main.py""
Traceback (most recent call last):
File ""c:\\Users\\musta\\Desktop\\Visual Studio\\main.py"", line 4, in
from PIL import Image, ImageTk
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ModuleNotFoundError: No module named 'PIL'
PS C:\\Users\\musta\\Desktop\\Visual Studio>
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.22000
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i5-7200U CPU @ 2.50GHz (4 x 2712)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: unavailable_off vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|5.90GB (0.94GB free)|
|Process Argv|--crash-reporter-id 9b270cbb-942f-45c7-b9a2-157b6afee602|
|Screen Reader|no|
|VM|0%|
Extensions (13)
Extension|Author (truncated)|Version
---|---|---
copilot|Git|1.108.376
gc-excelviewer|Gra|4.2.58
bash-ide-vscode|mad|1.39.0
rainbow-csv|mec|3.7.0
csharp|ms-|2.0.436
vscode-dotnet-runtime|ms-|1.7.2
python|ms-|2023.14.0
vscode-pylance|ms-|2023.8.50
cpptools|ms-|1.17.5
platformio-ide|pla|3.3.1
intellicode-api-usage-examples|Vis|0.2.8
vscodeintellicode|Vis|1.2.30
console-ninja|Wal|0.0.215
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscorecescf:30445987
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
vsclangdc:30486549
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
3biah626:30602489
f6dab269:30613381
a9j8j154:30646983
showlangstatbar:30737416
962ge761:30823813
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
asynctok:30821568
dsvsc013:30795093
dsvsc014:30804076
diffeditorv1:30821571
dsvsc015cf:30823818
```
"
microsoft/vscode,2023-09-03 12:17:46,question,wont run the program,"Type: Bug
Whenever i try to run the program on terminal in my mac and write - 'gcc Hello.c'(Hello is the file name) it wont run and show ""no such file as 'Hello.c' ""
I am new to coding and dont know much about visual code thus please help me.
VS Code version: Code 1.81.1 (Universal) (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:20:33.924Z)
OS version: Darwin arm64 22.6.0
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Apple M2 Pro (10 x 24)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled metal: disabled_off multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|1, 1, 2|
|Memory (System)|16.00GB (0.09GB free)|
|Process Argv|--crash-reporter-id da2b00bf-03ff-4eb9-9a7d-922fa4284720|
|Screen Reader|no|
|VM|0%|
Extensions (7)
Extension|Author (truncated)|Version
---|---|---
c-cpp-runner|fra|8.1.0
cpptask|kay|0.0.1
lldb-vscode|lan|0.2.3
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
o-language-support|ora|0.0.6
vscode-lldb|vad|1.9.2
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263cf:30335440
vscoreces:30445986
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263:30792226
vsclangdf:30486550
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
282f8724:30602487
89544117:30613380
showlangstatbar:30737416
962ge761:30823813
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
```
"
microsoft/vscode,2023-09-02 18:37:59,question,Autoformat stopped working for Javascript,"
Type: Bug
I keep getting `There is no formatter installed for ""Javascript""` even though I tried ESLint and native autoformatter.
""[javascript][javascriptreact][typescript]"": {
""editor.defaultFormatter"": ""vscode.typescript-language-features""
},r:
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-10875H CPU @ 2.30GHz (16 x 2304)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|31.79GB (16.84GB free)|
|Process Argv|--crash-reporter-id 767b857d-6081-42c7-a168-353955dd0e41|
|Screen Reader|no|
|VM|0%|
Extensions (15)
Extension|Author (truncated)|Version
---|---|---
better-comments|aar|3.0.2
project-manager|ale|12.7.0
iconify|ant|0.7.0
turbo-console-log|Cha|2.9.6
vscode-eslint|dba|2.4.2
githistory|don|0.6.20
todo-tree|Gru|0.0.226
vscode-peacock|joh|4.2.2
json-to-ts|Mar|1.7.5
vetur|oct|0.37.3
markdown-preview-enhanced|shd|0.7.1
vscode-icons|vsc|12.5.0
volar|Vue|1.8.8
gitblame|wad|10.5.0
vscode-import-cost|wix|3.3.0
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263cf:30335440
vscorecescf:30445987
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263cf:30792227
vsclangdf:30486550
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
3biah626:30602489
f6dab269:30613381
a9j8j154:30646983
showlangstatbar:30737416
a2ce3375:30757347
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
```
"
microsoft/vscode,2023-09-01 12:26:02,question,code is not run prperly,"Type: Performance Issue
code is not properly please helpo me
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i3-1005G1 CPU @ 1.20GHz (4 x 1190)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|7.77GB (2.00GB free)|
|Process Argv|--crash-reporter-id 84f717d1-674b-4f6f-9880-86fab8d2c667|
|Screen Reader|no|
|VM|0%|
Process Info
```
CPU % Mem MB PID Process
0 104 13636 code main
0 188 3060 window [1] (Extension: Code Runner - practice c lang - partik - Visual Studio Code)
1 155 4616 gpu-process
0 103 4920 shared-process
0 76 5064 fileWatcher [1]
0 43 7708 utility-network-service
0 147 8140 extensionHost [1]
0 4 1820 C:\\Windows\\system32\\cmd.exe /d /s /c ""cd ""c:\\Users\\91701\\Desktop\\practice c lang\\"" && gcc tempCodeRunnerFile.c -o tempCodeRunnerFile && ""c:\\Users\\91701\\Desktop\\practice c lang\\""tempCodeRunnerFile""
0 4 2120 ""c:\\Users\\91701\\Desktop\\practice c lang\\""tempCodeRunnerFile
0 11 14364 C:\\Windows\\system32\\conhost.exe 0x4
0 25 15712 c:\\Users\\91701\\.vscode\\extensions\\ms-vscode.cpptools-1.17.5-win32-x64\\bin\\cpptools.exe
0 4 10840 ""c:\\Users\\91701\\.vscode\\extensions\\ms-vscode.cpptools-1.17.5-win32-x64\\bin\\cpptools.exe""
0 14 15600 c:\\Users\\91701\\.vscode\\extensions\\ms-vscode.cpptools-1.17.5-win32-x64/bin/cpptools-srv.exe 15712 {28F83285-AD47-482A-9208-DF9450D4B259}
0 11 16112 C:\\Windows\\system32\\conhost.exe 0x4
0 17 16760 c:\\Users\\91701\\.vscode\\extensions\\ms-vscode.cpptools-1.17.5-win32-x64/bin/cpptools-srv.exe 15712 {5255AD30-BE63-4EBF-B7D8-89BF7F1F0A41}
0 11 18176 C:\\Windows\\system32\\conhost.exe 0x4
0 11 18264 C:\\Windows\\system32\\conhost.exe 0x4
0 80 15944 electron-nodejs (""C:\\Users\\91701\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node ""c:\\Users\\91701\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\extensions\\json-language-features\\server\\dist\\node\\jsonServerMain"" --node-ipc --clientProcessId=8140)
0 27 8272 crashpad-handler
1 91 9112 window [2] (Issue Reporter)
0 95 14248 window
0 88 16140 ptyHost
```
Workspace Info
```
| Window (Extension: Code Runner - practice c lang - partik - Visual Studio Code)
| Folder (practice c lang): 51 files
| File types: c(26) exe(11) json(5) txt(4) C(1) h(1) html(1)
| Conf files: launch.json(1) settings.json(1) tasks.json(1)
| Launch Configs: cppdbg(3) cppvsdbg;
```
Extensions (5)
Extension|Author (truncated)|Version
---|---|---
code-runner|for|0.12.0
cmake-tools|ms-|1.15.31
cpptools|ms-|1.17.5
cpptools-extension-pack|ms-|1.3.0
cmake|twx|0.0.17
(1 theme extensions excluded)
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscoreces:30445986
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593cf:30376535
pythonvs932:30410667
py29gd2263cf:30792227
vsclangdf:30486550
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
2e4cg342:30602488
f6dab269:30613381
2i9eh265:30646982
showlangstatbar:30737416
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30821572
```
"
microsoft/vscode,2023-08-31 16:13:32,question,local host ,"Type: Bug
hello dear ,
actually am faceing a problem during debuging, crome gives only local host error
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.19045
Modes:
"
microsoft/vscode,2023-08-31 02:41:06,question,"vscode: PERM: operation not permitted, copyfile","Type: Bug
This issue is detailed here: https://stackoverflow.com/questions/77010682/vscode-perm-operation-not-permitted-copyfile
1. Install Google Drive. this should install a G: drive on your windows 11 system
-When setting this up, select 'streaming' instead of mirrored
2. Create project folder somewhere on the G drive.
3. Open project in vscode and create a dev container.
4. Im thinking now we have to wait for the files to be 'unmirrored'
5. Re-open the project and then the devcontainer
6. Try to cut and paste a file or folder from one location to another folder.
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.22621
Modes:
Connection to 'dev-container+7b22686f737450617468223a222f686f6d652f6d6168656e6472612f70726f6a656374732f72616173222c226c6f63616c446f636b6572223a66616c73652c2273657474696e6773223a7b22686f7374223a227373683a2f2f6e727a646f636b657274657374227d2c22636f6e66696746696c65223a7b22246d6964223a312c2270617468223a222f686f6d652f6d6168656e6472612f70726f6a656374732f726161732f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c22736368656d65223a227673636f64652d66696c65486f7374227d7d' could not be established
Remote OS version: Linux x64 5.15.90.1-microsoft-standard-WSL2
System Info
|Item|Value|
|---|---|
|CPUs|AMD Ryzen 9 4900HS with Radeon Graphics (16 x 2994)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|39.42GB (21.81GB free)|
|Process Argv|--crash-reporter-id a94c5544-2680-458f-803c-0aecde90a1fc|
|Screen Reader|no|
|VM|0%|
Connection to 'dev-container+7b22686f737450617468223a222f686f6d652f6d6168656e6472612f70726f6a656374732f72616173222c226c6f63616c446f636b6572223a66616c73652c2273657474696e6773223a7b22686f7374223a227373683a2f2f6e727a646f636b657274657374227d2c22636f6e66696746696c65223a7b22246d6964223a312c2270617468223a222f686f6d652f6d6168656e6472612f70726f6a656374732f726161732f2e646576636f6e7461696e65722f646576636f6e7461696e65722e6a736f6e222c22736368656d65223a227673636f64652d66696c65486f7374227d7d' could not be established
|Item|Value|
|---|---|
|Remote|Dev Container: redacted|
|OS|Linux x64 5.15.90.1-microsoft-standard-WSL2|
|CPUs|AMD Ryzen 9 4900HS with Radeon Graphics (16 x 2994)|
|Memory (System)|19.25GB (16.93GB free)|
|VM|0%|
Extensions (35)
Extension|Author (truncated)|Version
---|---|---
terraform|4op|0.2.5
vscode-azurevirtualmachines|ms-|0.6.5
remote-containers|ms-|0.304.0
remote-ssh|ms-|0.102.0
remote-ssh-edit|ms-|0.86.0
remote-wsl|ms-|0.81.0
vscode-remote-extensionpack|ms-|0.24.0
remote-explorer|ms-|0.4.1
remote-server|ms-|1.4.3
terraform|4op|0.2.5
vscode-postgres|cko|1.4.3
vscode-eslint|dba|2.4.2
gitlens|eam|14.2.1
git-graph|mhu|1.30.0
azure-dev|ms-|0.7.0
vscode-azureappservice|ms-|0.25.0
vscode-azurecontainerapps|ms-|0.5.1
vscode-azurefunctions|ms-|1.12.4
vscode-azureresourcegroups|ms-|0.7.5
vscode-azurestaticwebapps|ms-|0.12.2
vscode-azurestorage|ms-|0.15.3
vscode-azurevirtualmachines|ms-|0.6.5
vscode-cosmosdb|ms-|0.19.4
python|ms-|2023.14.0
vscode-pylance|ms-|2023.8.40
jupyter|ms-|2023.7.1002162226
jupyter-keymap|ms-|1.1.2
jupyter-renderers|ms-|1.0.17
vscode-jupyter-cell-tags|ms-|0.1.8
vscode-jupyter-slideshow|ms-|0.1.5
azure-account|ms-|0.11.5
azurecli|ms-|0.5.0
vscode-node-azure-pack|ms-|1.2.0
sqltools|mtx|0.28.0
sqltools-driver-pg|mtx|0.5.1
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627:30244334
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscoreces:30445986
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593cf:30376535
pythonvs932:30410667
vsclangdc:30486549
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vsccc:30803844
282f8724:30602487
89544117:30613380
showlangstatbar:30737416
962ge761:30823813
a2ce3375:30757347
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
gsofb:30804716
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
asynctok:30821568
dsvsc013:30795093
dsvsc014:30804076
diffeditorv1:30821571
```
"
microsoft/vscode,2023-08-30 14:45:45,question,can't able to create project,"Type: Feature Request
hi in my vs code it will don't show the create project option on the screen i am installing any version it will display only new file ,open files,open folder options only in the window.
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.19043
Modes:
"
microsoft/vscode,2023-08-27 12:40:01,question,mysql,"
Type: Feature Request
i tried but find very diffculties to query mysql in vscode
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.22000
Modes:
"
microsoft/vscode,2023-08-24 09:56:18,question,Can you provide ways to distinguish between vscode windows.,"
Hi vscode developers,
The following description is based on windows.
I'm constantly working with multiple vscode windows. Many times I found it hard to find a particular window when switching from other softwares.
vscode shows filename before workspace/folder name, sometime the latter is hidden.
I tried to set title bar color for different windows but it does not show in the thumbnail windows pompts when mouse is moved to vscode icon in the taskbar.
So I'm suggesting a setting item, maybe a property to give to workspaces so they appear differently. This may help users the navigate the the window they want.
Thanks."
microsoft/vscode,2023-08-23 16:02:16,question,"The ""&"" symbol in the terminal is coming automatically after each time I change the directory using the CD command.","
Type: Performance Issue
The ""&"" symbol in the terminal is coming automatically after each time I change the directory using the CD command. How can I get rid from the ""&"" ? I can delete it manually each-time going backward after going the directory. Which is pretty exhausting.
Record of the issue:
https://www.loom.com/share/5f469083e1c1457ca7dd3dd95749832e
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.22621
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-10750H CPU @ 2.60GHz (12 x 2592)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|15.75GB (5.47GB free)|
|Process Argv|--crash-reporter-id 1626e408-1b63-4e0a-abaa-16e76df5c15a|
|Screen Reader|no|
|VM|0%|
Process Info
```
CPU % Mem MB PID Process
0 116 22028 code main
0 89 3172 window [2] (Issue Reporter)
0 156 13064 window [1] (Welcome - 2.2+Native+Modules - Visual Studio Code)
0 190 15408 gpu-process
0 42 17728 utility-network-service
0 73 19972 ptyHost
0 6 26440 conpty-agent
0 49 26584 C:\\WINDOWS\\System32\\WindowsPowerShell\\v1.0\\powershell.exe -noexit -command ""try { . \\""c:\\Users\\webta\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\out\\vs\\workbench\\contrib\\terminal\\browser\\media\\shellIntegration.ps1\\"" } catch {}""
0 87 20384 shared-process
0 115 20912 extensionHost [1]
0 92 15420 electron-nodejs (""C:\\Users\\webta\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node --max-old-space-size=3072 ""c:\\Users\\webta\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\extensions\\node_modules\\typescript\\lib\\tsserver.js"" --serverMode partialSemantic --useInferredProjectPerProjectRoot --disableAutomaticTypingAcquisition --cancellationPipeName C:\\Users\\webta\\AppData\\Local\\Temp\\vscode-typescript\\4854c7face779ce21e00\\tscancellation-3176bc4ca8571adec3a9.tmp* --locale en --noGetErrOnBackgroundUpdate --validateDefaultNpmLocation --useNodeIpc)
0 93 21340 electron-nodejs (""C:\\Users\\webta\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node --max-old-space-size=3072 ""c:\\Users\\webta\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\extensions\\node_modules\\typescript\\lib\\tsserver.js"" --useInferredProjectPerProjectRoot --enableTelemetry --cancellationPipeName C:\\Users\\webta\\AppData\\Local\\Temp\\vscode-typescript\\4854c7face779ce21e00\\tscancellation-ac8b167bdb98eaeffa73.tmp* --locale en --noGetErrOnBackgroundUpdate --validateDefaultNpmLocation --useNodeIpc)
0 80 20676 electron-nodejs (""C:\\Users\\webta\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node ""c:/Users/webta/AppData/Local/Programs/Microsoft VS Code/resources/app/extensions/node_modules/typescript/lib/typingsInstaller.js"" --globalTypingsCacheLocation C:/Users/webta/AppData/Local/Microsoft/TypeScript/5.1 --enableTelemetry --typesMapLocation ""c:/Users/webta/AppData/Local/Programs/Microsoft VS Code/resources/app/extensions/node_modules/typescript/lib/typesMap.json"" --validateDefaultNpmLocation)
0 6 22856 ""C:\\Program Files\\Google\\Drive File Stream\\79.0.2.0\\crashpad_handler.exe"" --database=C:\\Users\\webta\\AppData\\Local\\Google\\DriveFS\\Crashpad --url=https://clients2.google.com/cr/report --annotation=application=Code.exe --annotation=prod=DriveFS --annotation=ver=79.0.2.0 --initial-client-data=0x1180,0x1390,0x143c,0x1398,0x140c,0x7fff8fa3eff0,0x7fff8fa3f000,0x7fff8fa3f010
0 27 24408 crashpad-handler
0 76 25656 fileWatcher [1]
```
Workspace Info
```
| Window (Welcome - 2.2+Native+Modules - Visual Studio Code)
| Folder (2.2+Native+Modules): 4 files
| File types: js(2) txt(1)
| Conf files:;
```
Extensions (3)
Extension|Author (truncated)|Version
---|---|---
copilot|Git|1.105.350
material-icon-theme|PKi|4.29.0
LiveServer|rit|5.7.9
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vstes627:30244334
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263:30792226
vsclangdf:30486550
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
282f8724:30602487
89544117:30613380
a9j8j154:30646983
showlangstatbar:30737416
03d35959:30757346
pythonfmttext:30731395
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
gsofb:30804716
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
asynctok:30815620
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30812749
```
"
microsoft/vscode,2023-08-22 00:42:24,question,FONT,"Type: Bug
Currently I'm learning to program in HTML, I'm in the ""FONT SIZE, FONT FACE"" phase, but when I use ""FONT"" it's like it doesn't exist.
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|AMD A4-4000 APU with Radeon(tm) HD Graphics (2 x 3232)|
|GPU Status|2d_canvas: unavailable_software canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: disabled_software multiple_raster_threads: disabled_off opengl: disabled_off rasterization: disabled_software raw_draw: disabled_off_ok video_decode: disabled_software video_encode: disabled_software vulkan: disabled_off webgl: unavailable_software webgl2: unavailable_software webgpu: unavailable_software|
|Load (avg)|undefined|
|Memory (System)|3.43GB (0.38GB free)|
|Process Argv|--crash-reporter-id 21bdeb6d-4608-40bf-9f3a-deb26c9321f8|
|Screen Reader|no|
|VM|0%|
Extensions: none
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscoreces:30445986
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263cf:30792227
vscaac:30438847
vsclangdc:30486549
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
vscccc:30803845
2e4cg342:30602488
f6dab269:30613381
vscrp:30673768
showlangstatbar:30737416
a2ce3375:30757347
pythonfmttext:30731395
pythoncmvfstrcf:30756944
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
gsofa:30804715
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
synctok:30815622
dsvsc013:30795093
dsvsc014:30804076
diffeditorv1:30812748
```
![image](https://github.com/microsoft/vscode/assets/142854414/4d6d714b-71ff-4ef2-aeb7-d5b354b48bfb)
idk if i'm missing something"
microsoft/vscode,2023-08-21 18:35:46,question,About Winerror 87,"when i import eel lib on my python code it keeps shown me this error on ""eel.start('index.html')""
is there a problem with my code or it's a bug?
```[tasklist]
### Tasks
```
"
microsoft/vscode,2023-08-19 20:41:27,question,Make pinned tabs small,"Type: Feature Request
It would be nice if the pinned tabs get collapsed like in most web browsers. Please make an option in settings to collapse pinned tabs.
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:18:39.991Z)
OS version: Linux x64 5.15.0-79-generic
Modes:
"
microsoft/vscode,2023-08-19 01:55:54,question,Material-UI website not loading,"ADD ISSUE DESCRIPTION HERE
Version: 1.81.1
Commit: 6c3e3dba23e8fadc360aed75ce363ba185c49794
User Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36
Embedder: codespaces
"
microsoft/vscode,2023-08-18 08:45:01,question,** On entry to DGEBAL parameter number 3 had an illegal value,"Type: Bug
i am trying to run a python code in the visual studio code. In the fourth loop it is showing the following error:""** On entry to DGEBAL parameter number 3 had an illegal value"". please help me fix this.
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:18:39.991Z)
OS version: Linux x64 5.15.0-78-generic
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz (8 x 3400)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: disabled_software vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: disabled_off|
|Load (avg)|1, 0, 0|
|Memory (System)|31.23GB (24.85GB free)|
|Process Argv|--unity-launch --crash-reporter-id 047b8f1f-5a5d-46c5-b96d-1e3970b7a336|
|Screen Reader|no|
|VM|0%|
|DESKTOP_SESSION|ubuntu|
|XDG_CURRENT_DESKTOP|Unity|
|XDG_SESSION_DESKTOP|ubuntu|
|XDG_SESSION_TYPE|x11|
Extensions (3)
Extension|Author (truncated)|Version
---|---|---
python-image-preview|076|0.1.2
python|ms-|2023.14.0
vscode-pylance|ms-|2023.8.30
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242cf:30382550
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593cf:30376535
pythonvs932:30410667
vsclangdf:30486550
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyonecf:30548226
vsccc:30803844
2e4cg342:30602488
f6dab269:30613381
2i9eh265:30646982
showlangstatbar:30737416
a2ce3375:30757347
pythonfmttext:30731395
pythoncmv:30756943
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
gsofa:30804715
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
asynctok:30815620
dsvsc013:30795093
dsvsc014:30804076
diffeditorv1:30812748
```
"
microsoft/vscode,2023-08-17 12:40:51,question,Something wrong with the terminal encoding,"
Does this issue occur when all extensions are disabled?: Yes
- VS Code Version: 1.81.1
- OS Version: Windows 10.1 x64
When I'm trying to build C++ app, I'm getting following error that I am not able to read:
""cmd"" �� ���� ����७��� ��� ���譥�
��������, �ᯮ��塞�� �ணࠬ��� ��� ������ 䠩���.
I've already set OEMCP at regedit to 65001, but it didn't help.
Steps to Reproduce:
1. Try to build a C++ code
2. Recieve this abracadabra as a response.
"
microsoft/vscode,2023-08-16 13:52:55,question,CODE NOT RUNNING,"Type: Bug
I tried running a code to extract audio features from a recordings dataset, but the only output in the terminal is different file paths. Someone else tried running it on their computer and it worked for them. Please could you help me fix this issue? Thank you.
VS Code version: Code 1.70.2 (e4503b30fc78200f846c62cf8091b76ff5547662, 2022-08-16T05:37:58.957Z)
OS version: Windows_NT ia32 6.2.9200
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i5 CPU M 560 @ 2.67GHz (4 x 2660)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: unavailable_off raw_draw: disabled_off_ok skia_renderer: enabled_on video_decode: enabled video_encode: unavailable_off vulkan: disabled_off webgl: enabled webgl2: unavailable_off|
|Load (avg)|undefined|
|Memory (System)|1.86GB (0.19GB free)|
|Process Argv||
|Screen Reader|no|
|VM|0%|
Extensions (5)
Extension|Author (truncated)|Version
---|---|---
python|ms-|2022.16.1
vscode-pylance|ms-|2023.1.10
jupyter|ms-|2022.7.1102252217
jupyter-keymap|ms-|1.1.0
jupyter-renderers|ms-|1.0.9
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vsdfh931:30280409
vshan820:30294714
vstes263:30335439
vscod805:30301674
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
py29gd2263:30792226
vsclangdf:30486550
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
3biah626:30602489
f6dab269:30613381
showlangstatbar:30737416
pythonfmttext:30731395
pythoncmv:30756943
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
gsofb:30804716
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
diffeditorv2:30812749
```
"
microsoft/vscode,2023-08-16 02:41:13,question,to sets of pythan,"Type: Bug
I have Pythan from the extintions and one from Axs and when i type in a pythan code it shows me the same code i typed in how to i repair it for one set of code?
VS Code version: Code 1.81.1 (Universal) (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:20:33.924Z)
OS version: Darwin x64 22.6.0
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i5-8210Y CPU @ 1.60GHz (4 x 1600)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled metal: disabled_off multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|2, 11, 34|
|Memory (System)|8.00GB (0.10GB free)|
|Process Argv|--crash-reporter-id 87b7a8a5-bf43-4a47-bc81-11da23b14e63|
|Screen Reader|no|
|VM|0%|
Extensions (16)
Extension|Author (truncated)|Version
---|---|---
ruff|cha|2023.32.0
python-environment-manager|don|1.0.4
codewiz|fel|0.0.2
vscode-docker|ms-|1.26.0
black-formatter|ms-|2023.5.12151008
python|ms-|2023.15.12221007
vscode-pylance|ms-|2023.8.21
remote-containers|ms-|0.305.0
remote-ssh|ms-|0.102.0
remote-ssh-edit|ms-|0.86.0
remote-wsl|ms-|0.81.0
vscode-remote-extensionpack|ms-|0.24.0
remote-explorer|ms-|0.4.1
remote-server|ms-|1.4.0
autodocstring|njp|0.6.1
even-better-toml|tam|0.19.2
A/B Experiments
```
vsliv368cf:30146710
vsreu685:30147344
python383:30185418
vspor879:30202332
vspor708:30202333
vspor363:30204092
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscoreces:30445986
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
vsclangdc:30486549
c4g48928:30535728
dsvsc012cf:30540253
pynewext54:30695312
azure-dev_surveyone:30548225
vsccc:30803844
282f8724:30602487
f6dab269:30613381
showlangstatbar:30737416
03d35959:30757346
pythonfmttext:30731395
pythoncmv:30756943
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
gsofa:30804715
pythonnosmt12:30797651
pythonidxpt:30805730
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
```
"
microsoft/vscode,2023-08-15 06:17:39,question,MacOS M1 VScode Github copilot chat icon no show,"
Does this issue occur when all extensions are disabled?: Yes/No
- VS Code Version:
- OS Version:
Steps to Reproduce:
1. After installed ""Github copilot chat"" and reloaded vscode, no extension on side bar
Versions:
macoOS: 12.6.8
vscode:
Version: 1.81.1
Commit: 6c3e3dba23e8fadc360aed75ce363ba185c49794
Date: 2023-08-09T22:40:25.698Z (5 days ago)
Electron: 22.3.18
ElectronBuildId: 22689846
Chromium: 108.0.5359.215
Node.js: 16.17.1
V8: 10.8.168.25-electron.0
OS: Darwin arm64 21.6.0
Copilot Chat: Both of below version do not work:
v0.7.2023081101 (Pre-Release)
v0.6.0
"
microsoft/vscode,2023-08-14 14:23:24,question,Inform user about workspace tsdk.,"
### Problem
Due to security restrictions, we cannot overwrite the tsdk without user consent. It's annoying, but understandable...
The problem is that this is not really visible to the user. This can causes several issues for the team members...
For example, some plugins just stop working when using the build-in version instead of the workspace TSDK version.
The dev doesn't know about this and may only notice it much later. It's annoying and time consuming. Just avoidable.
**.vscode/settings.json** (workspace)
```json
{
""typescript.tsdk"": ""node_modules\\\\typescript\\\\lib"",
}
```
[Docs](https://code.visualstudio.com/docs/typescript/typescript-compiling#_using-newer-typescript-versions)
### Solution
This feature reqeust or improvement is about implementing a better way to inform the user about the workspace setting and make it easier to switch the correct version.
We could display a bold blinking modal in the center of the screen, similar to ad banners. ""Click here to win."" But seriously... It must be prominent visible and intuitively understandable. We could show a persistent notification until the user makes a choice. ...
What's exactly the problem with the workspace setting? Potentially everyone could set any value? A path to an NPM package with vulnerabilities or other problems? We could validate the value of `typescript.tsdk` with a allow list like ""mode_modules/typescript/lib"" which is the official typescript package. But ok, this does not cover to outdated versions that may have security risks... Anyway, this should be visible to the user. Setting the tsdk in the workspace settings is done for serious reasons and should be used within the whole dev team.
It might look like the ""Do you trust the authors of the files in this folder?"" dialog. Something like:
> This project recommends using the workspace's TypeScript TSDK. Do you trust this project and want to allow that?
And check this on every start. It seems sometimes, the version is switched back for some reasons. I cannot reproduce. But checking that on every start is not a bad idea. And remain displayed until the user makes a decision.
![demo](https://github.com/microsoft/vscode/assets/60390085/cc4e07c6-6466-443a-abfd-ce01ab36d8ba)
We could also say, just load the tsdk setting of the workspace if the user confirmed the already existing trust-dialog.
### Info
Version: 1.81.1 (user setup)
Electron: 22.3.18
Chromium: 108.0.5359.215
Node.js: 16.17.1
OS: Windows_NT x64 10.0.19045"
microsoft/vscode,2023-08-14 07:36:41,question,is it possible to setup Mqtt connection from a server to vs code extension,"
I am developing a Source control extension for vscode , and I want to establish an mqtt connection between extension and server, so that after activation of the extension if there is any event or change happened on server side, the extension will get the notification of it.
is this possible and how?
I am trying to look if, vs code allows to include such type of subscription to the server, without hampering the other processes in the extension.
I tried to execute a CLI command, which will keep listening to the server, but the executor method was just executing it and coming out of the method, it was not staying to read the sdt output, and if I tried to set an time interval, then other process were getting queued up
so now rather than executing any command, I want to create a method which will create mqtt connection with the server
"
microsoft/vscode,2023-08-12 10:06:09,question,comment supprimer waka time Api key,"
Type: Feature Request
je voudrais supprimer waka time Api key
VS Code version: Code 1.81.1 (6c3e3dba23e8fadc360aed75ce363ba185c49794, 2023-08-09T22:22:42.175Z)
OS version: Windows_NT x64 10.0.19045
Modes:
"
microsoft/vscode,2023-08-11 11:19:14,question,reload on the local host site not responding,"Type: Performance Issue
After startup while on my project the local host is not responding and if i type npm start on node.js command prompt it gives this message below
C:\\Users\\User>npm start
npm ERR! code ENOENT
npm ERR! syscall open
npm ERR! path C:\\Users\\User\\package.json
npm ERR! errno -4058
npm ERR! enoent Could not read package.json: Error: ENOENT: no such file or directory, open 'C:\\Users\\User\\package.json'
npm ERR! enoent This is related to npm not being able to find a file.
npm ERR! enoent
npm ERR! A complete log of this run can be found in: C:\\Users\\User\\AppData\\Local\\npm-cache\\_logs\\2023-08-11T10_21_03_535Z-debug-0.log
C:\\Users\\User>
VS Code version: Code 1.81.0 (6445d93c81ebe42c4cbd7a60712e0b17d9463e97, 2023-08-02T12:37:13.485Z)
OS version: Windows_NT x64 10.0.19045
Modes:
System Info
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-8550U CPU @ 1.80GHz (8 x 1992)|
|GPU Status|2d_canvas: enabled canvas_oop_rasterization: disabled_off direct_rendering_display_compositor: disabled_off_ok gpu_compositing: enabled multiple_raster_threads: enabled_on opengl: enabled_on rasterization: enabled raw_draw: disabled_off_ok video_decode: enabled video_encode: enabled vulkan: disabled_off webgl: enabled webgl2: enabled webgpu: enabled|
|Load (avg)|undefined|
|Memory (System)|7.90GB (3.30GB free)|
|Process Argv|--crash-reporter-id bfe49888-c0e3-47a0-9708-693722c43bc6|
|Screen Reader|no|
|VM|0%|
Process Info
```
CPU % Mem MB PID Process
1 92 15340 code main
1 136 144 gpu-process
3 161 3728 window [1] (Message.js - reactp - Visual Studio Code)
0 69 7892 ptyHost
0 38 12444 C:\\WINDOWS\\System32\\WindowsPowerShell\\v1.0\\powershell.exe -noexit -command ""try { . \\""c:\\Users\\User\\Desktop\\Microsoft VS Code\\resources\\app\\out\\vs\\workbench\\contrib\\terminal\\browser\\media\\shellIntegration.ps1\\"" } catch {}""
0 6 12548 conpty-agent
0 26 8544 crashpad-handler
3 90 10932 window [3] (Issue Reporter)
0 78 11380 extensionHost [1]
0 79 12028 electron-nodejs (""C:\\Users\\User\\Desktop\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node --max-old-space-size=3072 ""c:\\Users\\User\\Desktop\\Microsoft VS Code\\resources\\app\\extensions\\node_modules\\typescript\\lib\\tsserver.js"" --serverMode partialSemantic --useInferredProjectPerProjectRoot --disableAutomaticTypingAcquisition --cancellationPipeName C:\\Users\\User\\AppData\\Local\\Temp\\vscode-typescript\\ebd781def187ffd9e89e\\tscancellation-666ce688eecc1201e015.tmp* --locale en --noGetErrOnBackgroundUpdate --validateDefaultNpmLocation --useNodeIpc)
0 82 13104 electron-nodejs (""C:\\Users\\User\\Desktop\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node --max-old-space-size=3072 ""c:\\Users\\User\\Desktop\\Microsoft VS Code\\resources\\app\\extensions\\node_modules\\typescript\\lib\\tsserver.js"" --useInferredProjectPerProjectRoot --enableTelemetry --cancellationPipeName C:\\Users\\User\\AppData\\Local\\Temp\\vscode-typescript\\ebd781def187ffd9e89e\\tscancellation-fde1f8ef9373787702c5.tmp* --locale en --noGetErrOnBackgroundUpdate --validateDefaultNpmLocation --useNodeIpc)
0 69 6592 electron-nodejs (""C:\\Users\\User\\Desktop\\Microsoft VS Code\\Code.exe"" --ms-enable-electron-run-as-node ""c:/Users/User/Desktop/Microsoft VS Code/resources/app/extensions/node_modules/typescript/lib/typingsInstaller.js"" --globalTypingsCacheLocation C:/Users/User/AppData/Local/Microsoft/TypeScript/5.1 --enableTelemetry --typesMapLocation ""c:/Users/User/Desktop/Microsoft VS Code/resources/app/extensions/node_modules/typescript/lib/typesMap.json"" --validateDefaultNpmLocation)
0 83 13404 shared-process
0 66 15600 fileWatcher [1]
0 42 15956 utility-network-service
```
Workspace Info
```
| Window (Message.js - reactp - Visual Studio Code)
| Folder (reactp): 26 files
| File types: js(7) json(3) png(2) css(2) gitignore(1) ico(1) html(1)
| txt(1) md(1) svg(1)
| Conf files: package.json(1);
```
Extensions: none
A/B Experiments
```
vsliv368:30146709
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
vswsl492cf:30256860
vslsvsres303:30308271
vserr242:30382549
pythontb:30283811
vsjup518:30340749
pythonptprofiler:30281270
vsdfh931cf:30280410
vshan820:30294714
vstes263:30335439
vscod805cf:30301675
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593:30376534
pythonvs932:30410667
vsclangdf:30486550
c4g48928:30535728
dsvsc012:30540252
pynewext54:30695312
azure-dev_surveyone:30548225
vsccc:30803844
3biah626:30602489
89544117:30613380
showlangstatbar:30737416
vsctsb:30748421
03d35959:30757346
pythonfmttext:30731395
pythoncmv:30756943
fixshowwlkth:30771522
showindicator:30805244
pythongtdpath:30769146
i26e3531:30792625
gsofa:30804715
pythonnosmt12:30797651
pythonidxptcf:30805731
pythonnoceb:30805159
dsvsc013:30795093
dsvsc014:30804076
```
"
microsoft/vscode,2023-08-10 16:22:56,question,"I'm not able to make changes to the settings, everything I change appears this sentence ""Unable to record in user settings. Open user settings to correct errors / warnings and try again."" and does not save.","{
""editor.minimap.renderCharacters"": true,
""workbench.editorAssociations"": {
""*.jfif"": ""default""
},
""powermode.enabled"": true,
""workbench.colorTheme"": ""Omni"",
""php.validate.executablePath"": """"
if(isset($_POST['url']) && strlen($_POST['url']) == 0 ) {
}
if(isset($_POST['acao']) && $_POST['acao'] == 'enviar'){
}
if(isset($_FILES['arquivo']) && $_FILES['arquivo']['error'] === UPLOAD_ERR_OK) {
$arquivo = $_FILES['arquivo'];
$arquivonome = $_POST['arquivo'];
} else {
// Trate o caso de nenhum arquivo ter sido enviado
// Por exemplo, atribuir valores padrão ou mostrar uma mensagem de erro
}
$nome = $_POST['nome'];
$email = $_POST['email'];
$assunto = $_POST['assunto'];
$arquivo = $_FILES['arquivo'];
$arquivonome = $_POST['arquivo'];
$mensagem = $_POST['mensagem'];
$data = date('d/m/Y H:i');
if($nome == '' || $email == '' || $assunto == '' || $mensagem == ''){
echo '';
if (!class_exists('PHPMailer')) {
require_once(""phpmailer/class.phpmailer.php"");
}
// Inicia a classe PHPMailer
$mail = new PHPMailer();
$mail->CharSet = ""UTF-8"";
// Define os dados do servidor e tipo de conexão
//$mail->IsSMTP(); // Define que a mensagem será SMTP
$mail->Host = ""mail.seusite.com.br""; // Endereço do servidor SMTP
$mail->SMTPAuth = true;
$mail->Port = '465';
$mail->Username = 'webmaster@seusite.com.br'; // Usuário do servidor SMTP
$mail->Password = '123456'; // Senha do servidor SMTP
// Define o remetente
$mail->From = $_POST['email']; // Seu e-mail
$mail->FromName = $_POST['nome']; // Seu nome
$mail->Sender = 'contato@seusite.com.br';
// Define os destinatário(s)
$mail->AddAddress('contato@seusite.com.br');
//$mail->AddCC('atendimento@seusite.com.br', 'Ciclano'); // Copia
//$mail->AddBCC('fulano@seusite.com.br.com.br', 'Fulano da Silva'); // Cópia Oculta
// Define os dados técnicos da Mensagem
$mail->IsHTML(true); // Define que o e-mail será enviado como HTML
//$mail->CharSet = 'iso-8859-1'; // Charset da mensagem (opcional)
// Define a mensagem (Texto e Assunto)
$local = ""Mensagem do Artigo sobre Formulario - Seu site"";
$mail->Subject = $local; // Assunto da mensagem
$mail->Body = '
| | |