{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "name": "C3_W1_Lab_2_TFX_Tuner_and_Trainer.ipynb", "provenance": [], "collapsed_sections": [] }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "gpuClass": "standard", "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { "14c48dad3a62457e95d14c5969617a95": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_fabc8d79861e415588fa87b61592cc92", "IPY_MODEL_fead14ca47824dc5bad3a89c53a598ff", "IPY_MODEL_4400762ea5bd402ab5063c46732de816" ], "layout": "IPY_MODEL_a8c5f80562474004a8ece3892c9a1660" } }, "fabc8d79861e415588fa87b61592cc92": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0183fa20298845198493992279557500", "placeholder": "​", "style": "IPY_MODEL_cbeaba2795a3416cb03c63eadd5c6784", "value": "Dl Completed...: 100%" } }, "fead14ca47824dc5bad3a89c53a598ff": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_5c21be423d2545a496bafbb124c7a54b", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_f38a51e9325f43628f0962c056896603", "value": 1 } }, "4400762ea5bd402ab5063c46732de816": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_610f895d406c403cb4fe5552e39860e1", "placeholder": "​", "style": "IPY_MODEL_795e2d771c90482b87dd503e819b15c8", "value": " 4/4 [00:04<00:00, 1.23s/ url]" } }, "a8c5f80562474004a8ece3892c9a1660": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "0183fa20298845198493992279557500": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "cbeaba2795a3416cb03c63eadd5c6784": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "5c21be423d2545a496bafbb124c7a54b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": "20px" } }, "f38a51e9325f43628f0962c056896603": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "610f895d406c403cb4fe5552e39860e1": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "795e2d771c90482b87dd503e819b15c8": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "2a019d148bd042ab93869b5ab4691d98": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_34fb080edea84f04a1244c27b8c77476", "IPY_MODEL_62cd37ee65484395a56af6fd09dcffae", "IPY_MODEL_22dd37f87e4f4835b5dd46f5906a9c50" ], "layout": "IPY_MODEL_6c724cb6f42e4ddfbce30bf91394661c" } }, "34fb080edea84f04a1244c27b8c77476": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_4977b600d6ba4bbc958fbc50becd078d", "placeholder": "​", "style": "IPY_MODEL_c432812346a64951b6a6f86dc0c57f74", "value": "Dl Size...: 100%" } }, "62cd37ee65484395a56af6fd09dcffae": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_182781ed74b0428f9aa4eabdc4ec5106", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_79f67094663e4b5f90232aa629df848f", "value": 1 } }, "22dd37f87e4f4835b5dd46f5906a9c50": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_b36f3f17f367414d9574f86b40485c35", "placeholder": "​", "style": "IPY_MODEL_2ae7180e489b415b8f6ede1040b0d81a", "value": " 29/29 [00:04<00:00, 11.73 MiB/s]" } }, "6c724cb6f42e4ddfbce30bf91394661c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "4977b600d6ba4bbc958fbc50becd078d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c432812346a64951b6a6f86dc0c57f74": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "182781ed74b0428f9aa4eabdc4ec5106": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": "20px" } }, "79f67094663e4b5f90232aa629df848f": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "b36f3f17f367414d9574f86b40485c35": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2ae7180e489b415b8f6ede1040b0d81a": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "587cb35f14ef4a489ba168f2372489df": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_38b163a029e6451b8ca10b7c1e5a5a74", "IPY_MODEL_695d49869df947179a239d44cc304d93", "IPY_MODEL_565f0096a30d4b69af8fa138427af892" ], "layout": "IPY_MODEL_fbb01ef2dbb3442eaf6fe44f598c30f4" } }, "38b163a029e6451b8ca10b7c1e5a5a74": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_5b26d50f3ae04705a5810826ecbf9ae1", "placeholder": "​", "style": "IPY_MODEL_9c7d271caadc4a6883469034a58b7ba4", "value": "Extraction completed...: 100%" } }, "695d49869df947179a239d44cc304d93": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ca7f1f33823b4357b7a9c569eead8a80", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_5a9cbc4f46624f1785045779d7488f19", "value": 1 } }, "565f0096a30d4b69af8fa138427af892": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_3c355066e45f4f53ad1514b7381d71ff", "placeholder": "​", "style": "IPY_MODEL_c834f2b9ab444e46a8ea6d8f7fc95147", "value": " 4/4 [00:04<00:00, 1.36s/ file]" } }, "fbb01ef2dbb3442eaf6fe44f598c30f4": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "5b26d50f3ae04705a5810826ecbf9ae1": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9c7d271caadc4a6883469034a58b7ba4": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "ca7f1f33823b4357b7a9c569eead8a80": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": "20px" } }, "5a9cbc4f46624f1785045779d7488f19": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "3c355066e45f4f53ad1514b7381d71ff": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c834f2b9ab444e46a8ea6d8f7fc95147": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "23e42f4aa10346f98b852898b0fd63eb": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_6e6bccbca81346618968ddeb05b937fe", "IPY_MODEL_f5287636d07e4cd193d085611e5823ab", "IPY_MODEL_c64160ec90d94174b286218662fdcdab" ], "layout": "IPY_MODEL_f01255ad84d541938532dc5edd72590a" } }, "6e6bccbca81346618968ddeb05b937fe": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_fd9763902ed241dea6f217a735f7adde", "placeholder": "​", "style": "IPY_MODEL_38fa113736f048c48bb4d4250fbc06a8", "value": "" } }, "f5287636d07e4cd193d085611e5823ab": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "info", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0eeaab0ae4d3467593ea36a894e95157", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_986dd41579d5466facff4534973fc155", "value": 1 } }, "c64160ec90d94174b286218662fdcdab": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_3d2a0087e78b474ca0ee33ee4d6b1fc6", "placeholder": "​", "style": "IPY_MODEL_377b3e912b124131b7cf151ba7c24e47", "value": " 59896/0 [00:42<00:00, 1548.15 examples/s]" } }, "f01255ad84d541938532dc5edd72590a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "fd9763902ed241dea6f217a735f7adde": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "38fa113736f048c48bb4d4250fbc06a8": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "0eeaab0ae4d3467593ea36a894e95157": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": "20px" } }, "986dd41579d5466facff4534973fc155": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "3d2a0087e78b474ca0ee33ee4d6b1fc6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "377b3e912b124131b7cf151ba7c24e47": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "300bf99edd8146b8a9426ee9abc01974": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_8f184c41d0d04ce5a002631515bb7a6a", "IPY_MODEL_0dcaccd02b99425294116cb8b7cc803c", "IPY_MODEL_e4f3faec638f47febcfafeb7bd9ff017" ], "layout": "IPY_MODEL_62f1279d8bcb41bcbbf4d47363eb84ff" } }, "8f184c41d0d04ce5a002631515bb7a6a": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ca1046cfa8e84e36a708982aa0a85bef", "placeholder": "​", "style": "IPY_MODEL_189333397a164614b31f1ae701908fca", "value": "100%" } }, "0dcaccd02b99425294116cb8b7cc803c": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "danger", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_cf17a2d991364fb5abf688f879199aff", "max": 60000, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_c91e30cfa8f742bc9e3c1e6828fc8609", "value": 59999 } }, "e4f3faec638f47febcfafeb7bd9ff017": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_3be165e1eb044c6db50eee5f3717e1a1", "placeholder": "​", "style": "IPY_MODEL_4c657a6146e4490f97f8ce39aee3beb6", "value": " 59999/60000 [00:00<00:00, 208222.30 examples/s]" } }, "62f1279d8bcb41bcbbf4d47363eb84ff": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ca1046cfa8e84e36a708982aa0a85bef": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "189333397a164614b31f1ae701908fca": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "cf17a2d991364fb5abf688f879199aff": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c91e30cfa8f742bc9e3c1e6828fc8609": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "3be165e1eb044c6db50eee5f3717e1a1": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "4c657a6146e4490f97f8ce39aee3beb6": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "caae62fb8b074f618fb9b00d602725c0": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_c68af1ca52f94778afec1151c7e9f508", "IPY_MODEL_d07656452ce74b019129b5968f3339a0", "IPY_MODEL_9e816381e9c74b9897b8737f824a9a7f" ], "layout": "IPY_MODEL_209a917093a7472e9f93ac222df237d7" } }, "c68af1ca52f94778afec1151c7e9f508": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_bc23e131dc444d23918b0c9d8499dd04", "placeholder": "​", "style": "IPY_MODEL_4ecc4f8469b849b7a7582c27ec726d21", "value": "" } }, "d07656452ce74b019129b5968f3339a0": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "info", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_2bdce4f6dce34bb3805fbf839286090c", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_0f4978b1aeb94df9870a3e70149df983", "value": 1 } }, "9e816381e9c74b9897b8737f824a9a7f": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0841ef877edd4441831d18008056d08e", "placeholder": "​", "style": "IPY_MODEL_9fdefc8fc2df4c7c8f4011c08deec1ef", "value": " 9871/0 [00:06<00:00, 1519.50 examples/s]" } }, "209a917093a7472e9f93ac222df237d7": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "bc23e131dc444d23918b0c9d8499dd04": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "4ecc4f8469b849b7a7582c27ec726d21": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "2bdce4f6dce34bb3805fbf839286090c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": "20px" } }, "0f4978b1aeb94df9870a3e70149df983": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "0841ef877edd4441831d18008056d08e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9fdefc8fc2df4c7c8f4011c08deec1ef": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "a8ef6e43873f4d8aa9d6767ca36f3f11": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_ae10e2d4b104449eb2d6ec971a08537d", "IPY_MODEL_1ecc77266de5499cb00ced59b939b90c", "IPY_MODEL_d670f78db66a4a73b501f7eadc63982c" ], "layout": "IPY_MODEL_462d40c2408a4cb6bbbd4ca4bea5b6e0" } }, "ae10e2d4b104449eb2d6ec971a08537d": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_93891a42d1104fb6bd513e34cd48895e", "placeholder": "​", "style": "IPY_MODEL_1dc6f18a88564f3fa709f9334dcd501e", "value": "100%" } }, "1ecc77266de5499cb00ced59b939b90c": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "danger", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_102e5805439144209cca2d5eac7fdd59", "max": 10000, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_963dc9f3b27044a5aa77942a616e1197", "value": 9999 } }, "d670f78db66a4a73b501f7eadc63982c": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_625ab8d0ea4744cea1979bf8d3671b0b", "placeholder": "​", "style": "IPY_MODEL_c555b2d46dc44f3a8e9efc49651e2222", "value": " 9999/10000 [00:00<00:00, 119760.26 examples/s]" } }, "462d40c2408a4cb6bbbd4ca4bea5b6e0": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "93891a42d1104fb6bd513e34cd48895e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "1dc6f18a88564f3fa709f9334dcd501e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "102e5805439144209cca2d5eac7fdd59": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "963dc9f3b27044a5aa77942a616e1197": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "625ab8d0ea4744cea1979bf8d3671b0b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c555b2d46dc44f3a8e9efc49651e2222": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } } } } }, "cells": [ { "cell_type": "markdown", "metadata": { "id": "G4vEEajUbvNc" }, "source": [ "# Ungraded Lab: Hyperparameter tuning and model training with TFX\n", "\n", "In this lab, you will be again doing hyperparameter tuning but this time, it will be within a [Tensorflow Extended (TFX)](https://www.tensorflow.org/tfx/) pipeline. \n", "\n", "We have already introduced some TFX components in Course 2 of this specialization related to data ingestion, validation, and transformation. In this notebook, you will get to work with two more which are related to model development and training: *Tuner* and *Trainer*.\n", "\n", "tfx pipeline\n", "image source: https://www.tensorflow.org/tfx/guide\n", "\n", "* The *Tuner* utilizes the [Keras Tuner](https://keras-team.github.io/keras-tuner/) API under the hood to tune your model's hyperparameters.\n", "* You can get the best set of hyperparameters from the Tuner component and feed it into the *Trainer* component to optimize your model for training.\n", "\n", "You will again be working with the [FashionMNIST](https://github.com/zalandoresearch/fashion-mnist) dataset and will feed it though the TFX pipeline up to the Trainer component.You will quickly review the earlier components from Course 2, then focus on the two new components introduced.\n", "\n", "Let's begin!\n", "\n" ] }, { "cell_type": "markdown", "metadata": { "id": "MUXex9ctTuDB" }, "source": [ "## Setup" ] }, { "cell_type": "markdown", "metadata": { "id": "YEFWSi_-umNz" }, "source": [ "### Install TFX\n", "\n", "You will first install [TFX](https://www.tensorflow.org/tfx), a framework for developing end-to-end machine learning pipelines." ] }, { "cell_type": "code", "metadata": { "id": "IqR2PQG4ZaZ0", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "60ee146a-1707-485d-b95e-b685781ce85b" }, "source": [ "!pip install -U pip\n", "!pip install -U tfx==1.3\n", "\n", "# These are downgraded to work with the packages used by TFX 1.3\n", "# Please do not delete because it will cause import errors in the next cell\n", "!pip install --upgrade tensorflow-estimator==2.6.0\n", "!pip install --upgrade keras==2.6.0" ], "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", "Requirement already satisfied: pip in /usr/local/lib/python3.7/dist-packages (21.1.3)\n", "Collecting pip\n", " Downloading pip-22.1.2-py3-none-any.whl (2.1 MB)\n", "\u001b[K |████████████████████████████████| 2.1 MB 36.2 MB/s \n", "\u001b[?25hInstalling collected packages: pip\n", " Attempting uninstall: pip\n", " Found existing installation: pip 21.1.3\n", " Uninstalling pip-21.1.3:\n", " Successfully uninstalled pip-21.1.3\n", "Successfully installed pip-22.1.2\n", "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", "Collecting tfx==1.3\n", " Downloading tfx-1.3.0-py3-none-any.whl (2.4 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m31.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: click<8,>=7 in /usr/local/lib/python3.7/dist-packages (from tfx==1.3) (7.1.2)\n", "Collecting google-cloud-bigquery<3,>=2.26.0\n", " Downloading google_cloud_bigquery-2.34.4-py2.py3-none-any.whl (206 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m206.6/206.6 kB\u001b[0m \u001b[31m21.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: protobuf<4,>=3.13 in /usr/local/lib/python3.7/dist-packages (from tfx==1.3) (3.17.3)\n", "Requirement already satisfied: tensorflow-hub<0.13,>=0.9.0 in /usr/local/lib/python3.7/dist-packages (from tfx==1.3) (0.12.0)\n", "Collecting tensorflow-data-validation<1.4.0,>=1.3.0\n", " Downloading tensorflow_data_validation-1.3.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.4 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.4/1.4 MB\u001b[0m \u001b[31m36.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tfx-bsl<1.4.0,>=1.3.0\n", " Downloading tfx_bsl-1.3.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (19.0 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.0/19.0 MB\u001b[0m \u001b[31m43.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: jinja2<4,>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from tfx==1.3) (2.11.3)\n", "Collecting pyarrow<3,>=1\n", " Downloading pyarrow-2.0.0-cp37-cp37m-manylinux2014_x86_64.whl (17.7 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m17.7/17.7 MB\u001b[0m \u001b[31m46.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-apitools<1,>=0.5\n", " Downloading google_apitools-0.5.32-py3-none-any.whl (135 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m135.7/135.7 kB\u001b[0m \u001b[31m7.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tensorflow-serving-api!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15\n", " Downloading tensorflow_serving_api-2.9.0-py2.py3-none-any.whl (37 kB)\n", "Collecting ml-pipelines-sdk==1.3.0\n", " Downloading ml_pipelines_sdk-1.3.0-py3-none-any.whl (1.2 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m32.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting absl-py<0.13,>=0.9\n", " Downloading absl_py-0.12.0-py3-none-any.whl (129 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m129.4/129.4 kB\u001b[0m \u001b[31m13.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: grpcio<2,>=1.28.1 in /usr/local/lib/python3.7/dist-packages (from tfx==1.3) (1.46.3)\n", "Requirement already satisfied: pyyaml<6,>=3.12 in /usr/local/lib/python3.7/dist-packages (from tfx==1.3) (3.13)\n", "Requirement already satisfied: google-api-python-client<2,>=1.8 in /usr/local/lib/python3.7/dist-packages (from tfx==1.3) (1.12.11)\n", "Requirement already satisfied: portpicker<2,>=1.3.1 in /usr/local/lib/python3.7/dist-packages (from tfx==1.3) (1.3.9)\n", "Collecting tensorflow-model-analysis<0.35,>=0.34.1\n", " Downloading tensorflow_model_analysis-0.34.1-py3-none-any.whl (1.8 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m50.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting ml-metadata<1.4.0,>=1.3.0\n", " Downloading ml_metadata-1.3.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (6.5 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.5/6.5 MB\u001b[0m \u001b[31m54.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting apache-beam[gcp]<3,>=2.32\n", " Downloading apache_beam-2.40.0-cp37-cp37m-manylinux2010_x86_64.whl (10.9 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10.9/10.9 MB\u001b[0m \u001b[31m54.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting kubernetes<13,>=10.0.1\n", " Downloading kubernetes-12.0.1-py2.py3-none-any.whl (1.7 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m53.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting attrs<21,>=19.3.0\n", " Downloading attrs-20.3.0-py2.py3-none-any.whl (49 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.3/49.3 kB\u001b[0m \u001b[31m5.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting keras-tuner<2,>=1.0.4\n", " Downloading keras_tuner-1.1.2-py3-none-any.whl (133 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m133.7/133.7 kB\u001b[0m \u001b[31m14.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-aiplatform<2,>=0.5.0\n", " Downloading google_cloud_aiplatform-1.15.0-py2.py3-none-any.whl (2.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.1/2.1 MB\u001b[0m \u001b[31m76.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting docker<5,>=4.1\n", " Downloading docker-4.4.4-py2.py3-none-any.whl (147 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m147.0/147.0 kB\u001b[0m \u001b[31m17.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting numpy<1.20,>=1.16\n", " Downloading numpy-1.19.5-cp37-cp37m-manylinux2010_x86_64.whl (14.8 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m14.8/14.8 MB\u001b[0m \u001b[31m28.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting packaging<21,>=20\n", " Downloading packaging-20.9-py2.py3-none-any.whl (40 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m40.9/40.9 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2 in /usr/local/lib/python3.7/dist-packages (from tfx==1.3) (2.8.2+zzzcolab20220527125636)\n", "Collecting tensorflow-transform<1.4.0,>=1.3.0\n", " Downloading tensorflow_transform-1.3.0-py3-none-any.whl (407 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m407.7/407.7 kB\u001b[0m \u001b[31m39.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from absl-py<0.13,>=0.9->tfx==1.3) (1.15.0)\n", "Collecting proto-plus<2,>=1.7.1\n", " Downloading proto_plus-1.20.6-py3-none-any.whl (46 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.4/46.4 kB\u001b[0m \u001b[31m6.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: httplib2<0.21.0,>=0.8 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (0.17.4)\n", "Requirement already satisfied: python-dateutil<3,>=2.8.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (2.8.2)\n", "Requirement already satisfied: pydot<2,>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (1.3.0)\n", "Requirement already satisfied: pytz>=2018.3 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (2022.1)\n", "Collecting cloudpickle<3,>=2.1.0\n", " Downloading cloudpickle-2.1.0-py3-none-any.whl (25 kB)\n", "Collecting pymongo<4.0.0,>=3.8.0\n", " Downloading pymongo-3.12.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (508 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m508.1/508.1 kB\u001b[0m \u001b[31m45.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting requests<3.0.0,>=2.24.0\n", " Downloading requests-2.28.1-py3-none-any.whl (62 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.8/62.8 kB\u001b[0m \u001b[31m7.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: crcmod<2.0,>=1.7 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (1.7)\n", "Collecting dill<0.3.2,>=0.3.1.1\n", " Downloading dill-0.3.1.1.tar.gz (151 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m152.0/152.0 kB\u001b[0m \u001b[31m2.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "Requirement already satisfied: typing-extensions>=3.7.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (4.1.1)\n", "Collecting orjson<4.0\n", " Downloading orjson-3.7.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (272 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m272.8/272.8 kB\u001b[0m \u001b[31m26.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting fastavro<2,>=0.23.6\n", " Downloading fastavro-1.5.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.3/2.3 MB\u001b[0m \u001b[31m78.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting hdfs<3.0.0,>=2.1.0\n", " Downloading hdfs-2.7.0-py3-none-any.whl (34 kB)\n", "Requirement already satisfied: cachetools<5,>=3.1.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (4.2.4)\n", "Collecting google-cloud-dlp<4,>=3.0.0\n", " Downloading google_cloud_dlp-3.7.1-py2.py3-none-any.whl (118 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m118.2/118.2 kB\u001b[0m \u001b[31m15.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-pubsublite<2,>=1.2.0\n", " Downloading google_cloud_pubsublite-1.4.2-py2.py3-none-any.whl (265 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m265.8/265.8 kB\u001b[0m \u001b[31m28.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-bigquery-storage>=2.6.3\n", " Downloading google_cloud_bigquery_storage-2.13.2-py2.py3-none-any.whl (180 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m180.2/180.2 kB\u001b[0m \u001b[31m20.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-spanner<2,>=1.13.0\n", " Downloading google_cloud_spanner-1.19.3-py2.py3-none-any.whl (255 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m255.6/255.6 kB\u001b[0m \u001b[31m28.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting grpcio-gcp<1,>=0.2.2\n", " Downloading grpcio_gcp-0.2.2-py2.py3-none-any.whl (9.4 kB)\n", "Requirement already satisfied: google-cloud-datastore<2,>=1.8.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (1.8.0)\n", "Requirement already satisfied: google-auth<3,>=1.18.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (1.35.0)\n", "Requirement already satisfied: google-cloud-core<2,>=0.28.1 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]<3,>=2.32->tfx==1.3) (1.0.3)\n", "Collecting google-cloud-videointelligence<2,>=1.8.0\n", " Downloading google_cloud_videointelligence-1.16.3-py2.py3-none-any.whl (183 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m183.9/183.9 kB\u001b[0m \u001b[31m24.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-auth-httplib2<0.2.0,>=0.1.0\n", " Downloading google_auth_httplib2-0.1.0-py2.py3-none-any.whl (9.3 kB)\n", "Collecting google-apitools<1,>=0.5\n", " Downloading google-apitools-0.5.31.tar.gz (173 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m173.5/173.5 kB\u001b[0m \u001b[31m22.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "Collecting google-cloud-language<2,>=1.3.0\n", " Downloading google_cloud_language-1.3.2-py2.py3-none-any.whl (83 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m83.6/83.6 kB\u001b[0m \u001b[31m11.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-vision<2,>=0.38.0\n", " Downloading google_cloud_vision-1.0.2-py2.py3-none-any.whl (435 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m435.1/435.1 kB\u001b[0m \u001b[31m42.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-pubsub<3,>=2.1.0\n", " Downloading google_cloud_pubsub-2.13.0-py2.py3-none-any.whl (234 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m234.5/234.5 kB\u001b[0m \u001b[31m28.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-bigtable<2,>=0.31.1\n", " Downloading google_cloud_bigtable-1.7.2-py2.py3-none-any.whl (267 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m267.7/267.7 kB\u001b[0m \u001b[31m29.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-recommendations-ai<=0.2.0,>=0.1.0\n", " Downloading google_cloud_recommendations_ai-0.2.0-py2.py3-none-any.whl (180 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m180.2/180.2 kB\u001b[0m \u001b[31m14.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting websocket-client>=0.32.0\n", " Downloading websocket_client-1.3.3-py3-none-any.whl (54 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.3/54.3 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: google-api-core<3dev,>=1.21.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client<2,>=1.8->tfx==1.3) (1.31.6)\n", "Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client<2,>=1.8->tfx==1.3) (3.0.1)\n", "Collecting fasteners>=0.14\n", " Downloading fasteners-0.17.3-py3-none-any.whl (18 kB)\n", "Requirement already satisfied: oauth2client>=1.4.12 in /usr/local/lib/python3.7/dist-packages (from google-apitools<1,>=0.5->tfx==1.3) (4.1.3)\n", "Collecting google-cloud-storage<3.0.0dev,>=1.32.0\n", " Downloading google_cloud_storage-2.4.0-py2.py3-none-any.whl (106 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m107.0/107.0 kB\u001b[0m \u001b[31m14.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-resource-manager<3.0.0dev,>=1.3.3\n", " Downloading google_cloud_resource_manager-1.5.1-py2.py3-none-any.whl (230 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m230.2/230.2 kB\u001b[0m \u001b[31m28.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting protobuf<4,>=3.13\n", " Downloading protobuf-3.20.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl (1.0 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m59.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-cloud-core<2,>=0.28.1\n", " Downloading google_cloud_core-1.7.2-py2.py3-none-any.whl (28 kB)\n", "Collecting google-resumable-media<3.0dev,>=0.6.0\n", " Downloading google_resumable_media-2.3.3-py2.py3-none-any.whl (76 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m76.9/76.9 kB\u001b[0m \u001b[31m10.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2<4,>=2.7.3->tfx==1.3) (2.0.1)\n", "Requirement already satisfied: tensorboard in /usr/local/lib/python3.7/dist-packages (from keras-tuner<2,>=1.0.4->tfx==1.3) (2.8.0)\n", "Collecting kt-legacy\n", " Downloading kt_legacy-1.0.4-py3-none-any.whl (9.6 kB)\n", "Requirement already satisfied: ipython in /usr/local/lib/python3.7/dist-packages (from keras-tuner<2,>=1.0.4->tfx==1.3) (5.5.0)\n", "Requirement already satisfied: setuptools>=21.0.0 in /usr/local/lib/python3.7/dist-packages (from kubernetes<13,>=10.0.1->tfx==1.3) (57.4.0)\n", "Requirement already satisfied: certifi>=14.05.14 in /usr/local/lib/python3.7/dist-packages (from kubernetes<13,>=10.0.1->tfx==1.3) (2022.6.15)\n", "Requirement already satisfied: requests-oauthlib in /usr/local/lib/python3.7/dist-packages (from kubernetes<13,>=10.0.1->tfx==1.3) (1.3.1)\n", "Requirement already satisfied: urllib3>=1.24.2 in /usr/local/lib/python3.7/dist-packages (from kubernetes<13,>=10.0.1->tfx==1.3) (1.24.3)\n", "Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging<21,>=20->tfx==1.3) (3.0.9)\n", "Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (0.2.0)\n", "Requirement already satisfied: keras<2.9,>=2.8.0rc0 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (2.8.0)\n", "Requirement already satisfied: keras-preprocessing>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (1.1.2)\n", "Requirement already satisfied: gast>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (0.5.3)\n", "Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (3.1.0)\n", "Collecting tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2\n", " Downloading tensorflow-2.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (511.7 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m511.7/511.7 MB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting keras<2.10.0,>=2.9.0rc0\n", " Downloading keras-2.9.0-py2.py3-none-any.whl (1.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m19.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tensorboard\n", " Downloading tensorboard-2.9.1-py3-none-any.whl (5.8 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.8/5.8 MB\u001b[0m \u001b[31m90.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2\n", " Downloading tensorflow-2.9.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (511.7 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m511.7/511.7 MB\u001b[0m \u001b[31m3.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading https://us-python.pkg.dev/colab-wheels/public/tensorflow/tensorflow-2.8.2%2Bzzzcolab20220629235552-cp37-cp37m-linux_x86_64.whl (668.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m668.6/668.6 MB\u001b[0m \u001b[31m2.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading https://us-python.pkg.dev/colab-wheels/public/tensorflow/tensorflow-2.8.2%2Bzzzcolab20220523105045-cp37-cp37m-linux_x86_64.whl (668.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m668.6/668.6 MB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading tensorflow-2.8.2-cp37-cp37m-manylinux2010_x86_64.whl (497.9 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m497.9/497.9 MB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading https://us-python.pkg.dev/colab-wheels/public/tensorflow/tensorflow-2.8.1%2Bzzzcolab20220518083849-cp37-cp37m-linux_x86_64.whl (668.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m668.6/668.6 MB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading https://us-python.pkg.dev/colab-wheels/public/tensorflow/tensorflow-2.8.1%2Bzzzcolab20220516111314-cp37-cp37m-linux_x86_64.whl (668.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m668.6/668.6 MB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading tensorflow-2.8.1-cp37-cp37m-manylinux2010_x86_64.whl (497.9 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m497.9/497.9 MB\u001b[0m \u001b[31m3.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading https://us-python.pkg.dev/colab-wheels/public/tensorflow/tensorflow-2.8.0%2Bzzzcolab20220506162203-cp37-cp37m-linux_x86_64.whl (668.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m668.3/668.3 MB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tf-estimator-nightly==2.8.0.dev2021122109\n", " Downloading tf_estimator_nightly-2.8.0.dev2021122109-py2.py3-none-any.whl (462 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m462.5/462.5 kB\u001b[0m \u001b[31m37.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2\n", " Downloading tensorflow-2.8.0-cp37-cp37m-manylinux2010_x86_64.whl (497.5 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m497.5/497.5 MB\u001b[0m \u001b[31m3.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading https://us-python.pkg.dev/colab-wheels/public/tensorflow/tensorflow-2.7.3%2Bzzzcolab20220523111007-cp37-cp37m-linux_x86_64.whl (671.4 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m671.4/671.4 MB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tensorflow-estimator<2.8,~=2.7.0rc0\n", " Downloading tensorflow_estimator-2.7.0-py2.py3-none-any.whl (463 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m463.1/463.1 kB\u001b[0m \u001b[31m40.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting keras<2.8,>=2.7.0rc0\n", " Downloading keras-2.7.0-py2.py3-none-any.whl (1.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m35.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (3.3.0)\n", "Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (1.1.0)\n", "Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (1.14.1)\n", "Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (1.6.3)\n", "Requirement already satisfied: wheel<1.0,>=0.32.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (0.37.1)\n", "Requirement already satisfied: flatbuffers<3.0,>=1.12 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (2.0)\n", "Requirement already satisfied: libclang>=9.0.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (14.0.1)\n", "Collecting protobuf<4,>=3.13\n", " Downloading protobuf-3.19.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.1/1.1 MB\u001b[0m \u001b[31m32.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: tensorflow-io-gcs-filesystem>=0.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (0.26.0)\n", "Collecting gast<0.5.0,>=0.2.1\n", " Downloading gast-0.4.0-py3-none-any.whl (9.8 kB)\n", "Requirement already satisfied: pandas<2,>=1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-data-validation<1.4.0,>=1.3.0->tfx==1.3) (1.3.5)\n", "Collecting joblib<0.15,>=0.12\n", " Downloading joblib-0.14.1-py2.py3-none-any.whl (294 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m294.9/294.9 kB\u001b[0m \u001b[31m30.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tensorflow-metadata<1.3,>=1.2\n", " Downloading tensorflow_metadata-1.2.0-py3-none-any.whl (48 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m48.5/48.5 kB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting ipython\n", " Downloading ipython-7.34.0-py3-none-any.whl (793 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m793.8/793.8 kB\u001b[0m \u001b[31m34.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: ipywidgets<8,>=7 in /usr/local/lib/python3.7/dist-packages (from tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (7.7.0)\n", "Requirement already satisfied: scipy<2,>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (1.4.1)\n", "INFO: pip is looking at multiple versions of tensorflow-serving-api to determine which version is compatible with other requirements. This could take a while.\n", "Collecting tensorflow-serving-api!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15\n", " Downloading tensorflow_serving_api-2.8.2-py2.py3-none-any.whl (37 kB)\n", " Downloading tensorflow_serving_api-2.8.0-py2.py3-none-any.whl (37 kB)\n", " Downloading tensorflow_serving_api-2.7.0-py2.py3-none-any.whl (37 kB)\n", " Downloading tensorflow_serving_api-2.6.5-py2.py3-none-any.whl (37 kB)\n", "Collecting tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2\n", " Downloading https://us-python.pkg.dev/colab-wheels/public/tensorflow/tensorflow-2.6.5%2Bzzzcolab20220523104206-cp37-cp37m-linux_x86_64.whl (570.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m570.3/570.3 MB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading tensorflow-2.6.5-cp37-cp37m-manylinux2010_x86_64.whl (464.2 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m464.2/464.2 MB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hINFO: pip is looking at multiple versions of tensorflow-transform to determine which version is compatible with other requirements. This could take a while.\n", "Collecting tensorflow-serving-api!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15\n", " Downloading tensorflow_serving_api-2.6.3-py2.py3-none-any.whl (37 kB)\n", "Collecting tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2\n", " Downloading https://us-python.pkg.dev/colab-wheels/public/tensorflow/tensorflow-2.6.4%2Bzzzcolab20220516125453-cp37-cp37m-linux_x86_64.whl (570.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m570.3/570.3 MB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading tensorflow-2.6.4-cp37-cp37m-manylinux2010_x86_64.whl (464.2 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m464.2/464.2 MB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading tensorflow-2.6.3-cp37-cp37m-manylinux2010_x86_64.whl (463.8 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m463.8/463.8 MB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tensorflow-serving-api!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15\n", " Downloading tensorflow_serving_api-2.6.2-py2.py3-none-any.whl (37 kB)\n", "Collecting tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2\n", " Downloading tensorflow-2.6.2-cp37-cp37m-manylinux2010_x86_64.whl (458.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m458.3/458.3 MB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting tensorflow-serving-api!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15\n", " Downloading tensorflow_serving_api-2.6.1-py2.py3-none-any.whl (37 kB)\n", "Collecting tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2\n", " Downloading tensorflow-2.6.1-cp37-cp37m-manylinux2010_x86_64.whl (458.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m458.3/458.3 MB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hINFO: pip is looking at multiple versions of tensorflow-serving-api to determine which version is compatible with other requirements. This could take a while.\n", "Collecting tensorflow-serving-api!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15\n", " Downloading tensorflow_serving_api-2.6.0-py2.py3-none-any.whl (37 kB)\n", "Collecting tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2\n", " Downloading https://us-python.pkg.dev/colab-wheels/public/tensorflow/tensorflow-2.6.0%2Bzzzcolab20220506153740-cp37-cp37m-linux_x86_64.whl (564.4 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m564.4/564.4 MB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting clang~=5.0\n", " Downloading clang-5.0.tar.gz (30 kB)\n", " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "Collecting wrapt>=1.11.0\n", " Downloading wrapt-1.12.1.tar.gz (27 kB)\n", " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "Collecting typing-extensions>=3.7.0\n", " Downloading typing_extensions-3.7.4.3-py3-none-any.whl (22 kB)\n", "Collecting flatbuffers<3.0,>=1.12\n", " Downloading flatbuffers-1.12-py2.py3-none-any.whl (15 kB)\n", "Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3dev,>=1.21.0->google-api-python-client<2,>=1.8->tfx==1.3) (1.56.2)\n", "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.18.0->apache-beam[gcp]<3,>=2.32->tfx==1.3) (4.8)\n", "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.18.0->apache-beam[gcp]<3,>=2.32->tfx==1.3) (0.2.8)\n", "Collecting grpc-google-iam-v1<0.13dev,>=0.12.3\n", " Downloading grpc_google_iam_v1-0.12.4-py2.py3-none-any.whl (26 kB)\n", "Collecting grpcio-status>=1.16.0\n", " Downloading grpcio_status-1.47.0-py3-none-any.whl (10.0 kB)\n", "Collecting overrides<7.0.0,>=6.0.1\n", " Downloading overrides-6.1.0-py3-none-any.whl (14 kB)\n", "Collecting google-cloud-storage<3.0.0dev,>=1.32.0\n", " Downloading google_cloud_storage-2.3.0-py2.py3-none-any.whl (107 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m107.1/107.1 kB\u001b[0m \u001b[31m14.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Downloading google_cloud_storage-2.2.1-py2.py3-none-any.whl (107 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m107.1/107.1 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting google-crc32c<2.0dev,>=1.0\n", " Downloading google_crc32c-1.3.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (38 kB)\n", "Requirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py>=2.9.0->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,<3,>=1.15.2->tfx==1.3) (1.5.2)\n", "Requirement already satisfied: docopt in /usr/local/lib/python3.7/dist-packages (from hdfs<3.0.0,>=2.1.0->apache-beam[gcp]<3,>=2.32->tfx==1.3) (0.6.2)\n", "Requirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (4.4.2)\n", "Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (5.1.1)\n", "Collecting prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0\n", " Downloading prompt_toolkit-3.0.30-py3-none-any.whl (381 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m381.7/381.7 kB\u001b[0m \u001b[31m32.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: backcall in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (0.2.0)\n", "Requirement already satisfied: jedi>=0.16 in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (0.18.1)\n", "Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (0.1.3)\n", "Requirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (0.7.5)\n", "Requirement already satisfied: pygments in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (2.6.1)\n", "Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.7/dist-packages (from ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (4.8.0)\n", "Requirement already satisfied: ipykernel>=4.5.1 in /usr/local/lib/python3.7/dist-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (4.10.1)\n", "Requirement already satisfied: widgetsnbextension~=3.6.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (3.6.0)\n", "Requirement already satisfied: nbformat>=4.2.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (5.4.0)\n", "Requirement already satisfied: ipython-genutils~=0.2.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (0.2.0)\n", "Requirement already satisfied: jupyterlab-widgets>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (1.1.0)\n", "Requirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.7/dist-packages (from oauth2client>=1.4.12->google-apitools<1,>=0.5->tfx==1.3) (0.4.8)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam[gcp]<3,>=2.32->tfx==1.3) (2.10)\n", "Requirement already satisfied: charset-normalizer<3,>=2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam[gcp]<3,>=2.32->tfx==1.3) (2.0.12)\n", "Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx==1.3) (1.0.1)\n", "Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx==1.3) (1.8.1)\n", "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx==1.3) (3.3.7)\n", "Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx==1.3) (0.6.1)\n", "Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx==1.3) (0.4.6)\n", "Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib->kubernetes<13,>=10.0.1->tfx==1.3) (3.2.0)\n", "Collecting grpcio<2,>=1.28.1\n", " Downloading grpcio-1.47.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.5 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.5/4.5 MB\u001b[0m \u001b[31m61.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: tornado>=4.0 in /usr/local/lib/python3.7/dist-packages (from ipykernel>=4.5.1->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (5.1.1)\n", "Requirement already satisfied: jupyter-client in /usr/local/lib/python3.7/dist-packages (from ipykernel>=4.5.1->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (5.3.5)\n", "Requirement already satisfied: parso<0.9.0,>=0.8.0 in /usr/local/lib/python3.7/dist-packages (from jedi>=0.16->ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (0.8.3)\n", "Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard->keras-tuner<2,>=1.0.4->tfx==1.3) (4.11.4)\n", "Requirement already satisfied: jsonschema>=2.6 in /usr/local/lib/python3.7/dist-packages (from nbformat>=4.2.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (4.3.3)\n", "Requirement already satisfied: jupyter-core in /usr/local/lib/python3.7/dist-packages (from nbformat>=4.2.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (4.10.0)\n", "Requirement already satisfied: fastjsonschema in /usr/local/lib/python3.7/dist-packages (from nbformat>=4.2.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (2.15.3)\n", "Collecting typing-utils>=0.0.3\n", " Downloading typing_utils-0.1.0-py3-none-any.whl (10 kB)\n", "Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.7/dist-packages (from pexpect>4.3->ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (0.7.0)\n", "Requirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->keras-tuner<2,>=1.0.4->tfx==1.3) (0.2.5)\n", "Requirement already satisfied: notebook>=4.4.1 in /usr/local/lib/python3.7/dist-packages (from widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (5.3.1)\n", "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard->keras-tuner<2,>=1.0.4->tfx==1.3) (3.8.0)\n", "Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema>=2.6->nbformat>=4.2.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (0.18.1)\n", "Requirement already satisfied: importlib-resources>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema>=2.6->nbformat>=4.2.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (5.7.1)\n", "Requirement already satisfied: terminado>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (0.13.3)\n", "Requirement already satisfied: nbconvert in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (5.6.1)\n", "Requirement already satisfied: Send2Trash in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (1.8.0)\n", "Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.7/dist-packages (from jupyter-client->ipykernel>=4.5.1->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (23.1.0)\n", "Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (1.5.0)\n", "Requirement already satisfied: defusedxml in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (0.7.1)\n", "Requirement already satisfied: entrypoints>=0.2.2 in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (0.4)\n", "Requirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (0.8.4)\n", "Requirement already satisfied: testpath in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (0.6.0)\n", "Requirement already satisfied: bleach in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (5.0.0)\n", "Requirement already satisfied: webencodings in /usr/local/lib/python3.7/dist-packages (from bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.35,>=0.34.1->tfx==1.3) (0.5.1)\n", "Building wheels for collected packages: google-apitools, clang, dill, wrapt\n", " Building wheel for google-apitools (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Created wheel for google-apitools: filename=google_apitools-0.5.31-py3-none-any.whl size=131039 sha256=12792bbb192212a7b2a8e4fca526a7a40124b9a99df855107da1d4693aaa0bcf\n", " Stored in directory: /root/.cache/pip/wheels/19/b5/2f/1cc3cf2b31e7a9cd1508731212526d9550271274d351c96f16\n", " Building wheel for clang (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Created wheel for clang: filename=clang-5.0-py3-none-any.whl size=30694 sha256=8b0601c614fa84bf7dde017c9e25c4d0359e691557b1202a6ac0875d8bfbdcf0\n", " Stored in directory: /root/.cache/pip/wheels/98/91/04/971b4c587cf47ae952b108949b46926f426c02832d120a082a\n", " Building wheel for dill (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Created wheel for dill: filename=dill-0.3.1.1-py3-none-any.whl size=78544 sha256=850855ee2ae8f7ab6429d70ec8411bbe7f62cc026f2bb211ae4e2314b8e44719\n", " Stored in directory: /root/.cache/pip/wheels/a4/61/fd/c57e374e580aa78a45ed78d5859b3a44436af17e22ca53284f\n", " Building wheel for wrapt (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Created wheel for wrapt: filename=wrapt-1.12.1-cp37-cp37m-linux_x86_64.whl size=68719 sha256=e63cd05a6e18767e996732096a0a68b59b1c34c73b1f3a2e382534a0e89f05a6\n", " Stored in directory: /root/.cache/pip/wheels/62/76/4c/aa25851149f3f6d9785f6c869387ad82b3fd37582fa8147ac6\n", "Successfully built google-apitools clang dill wrapt\n", "Installing collected packages: wrapt, typing-extensions, tensorflow-estimator, kt-legacy, keras, joblib, flatbuffers, clang, websocket-client, typing-utils, requests, pymongo, protobuf, prompt-toolkit, packaging, orjson, numpy, grpcio, google-crc32c, gast, fasteners, fastavro, dill, cloudpickle, attrs, absl-py, pyarrow, proto-plus, overrides, ml-metadata, ipython, hdfs, grpcio-gcp, google-resumable-media, docker, tensorflow-metadata, kubernetes, grpcio-status, google-auth-httplib2, google-apitools, apache-beam, grpc-google-iam-v1, google-cloud-core, tensorflow, ml-pipelines-sdk, keras-tuner, google-cloud-vision, google-cloud-videointelligence, google-cloud-storage, google-cloud-spanner, google-cloud-resource-manager, google-cloud-recommendations-ai, google-cloud-pubsub, google-cloud-language, google-cloud-dlp, google-cloud-bigtable, google-cloud-bigquery-storage, google-cloud-bigquery, tensorflow-serving-api, google-cloud-pubsublite, google-cloud-aiplatform, tfx-bsl, tensorflow-transform, tensorflow-model-analysis, tensorflow-data-validation, tfx\n", " Attempting uninstall: wrapt\n", " Found existing installation: wrapt 1.14.1\n", " Uninstalling wrapt-1.14.1:\n", " Successfully uninstalled wrapt-1.14.1\n", " Attempting uninstall: typing-extensions\n", " Found existing installation: typing_extensions 4.1.1\n", " Uninstalling typing_extensions-4.1.1:\n", " Successfully uninstalled typing_extensions-4.1.1\n", " Attempting uninstall: tensorflow-estimator\n", " Found existing installation: tensorflow-estimator 2.8.0\n", " Uninstalling tensorflow-estimator-2.8.0:\n", " Successfully uninstalled tensorflow-estimator-2.8.0\n", " Attempting uninstall: keras\n", " Found existing installation: keras 2.8.0\n", " Uninstalling keras-2.8.0:\n", " Successfully uninstalled keras-2.8.0\n", " Attempting uninstall: joblib\n", " Found existing installation: joblib 1.1.0\n", " Uninstalling joblib-1.1.0:\n", " Successfully uninstalled joblib-1.1.0\n", " Attempting uninstall: flatbuffers\n", " Found existing installation: flatbuffers 2.0\n", " Uninstalling flatbuffers-2.0:\n", " Successfully uninstalled flatbuffers-2.0\n", " Attempting uninstall: requests\n", " Found existing installation: requests 2.23.0\n", " Uninstalling requests-2.23.0:\n", " Successfully uninstalled requests-2.23.0\n", " Attempting uninstall: pymongo\n", " Found existing installation: pymongo 4.1.1\n", " Uninstalling pymongo-4.1.1:\n", " Successfully uninstalled pymongo-4.1.1\n", " Attempting uninstall: protobuf\n", " Found existing installation: protobuf 3.17.3\n", " Uninstalling protobuf-3.17.3:\n", " Successfully uninstalled protobuf-3.17.3\n", " Attempting uninstall: prompt-toolkit\n", " Found existing installation: prompt-toolkit 1.0.18\n", " Uninstalling prompt-toolkit-1.0.18:\n", " Successfully uninstalled prompt-toolkit-1.0.18\n", " Attempting uninstall: packaging\n", " Found existing installation: packaging 21.3\n", " Uninstalling packaging-21.3:\n", " Successfully uninstalled packaging-21.3\n", " Attempting uninstall: numpy\n", " Found existing installation: numpy 1.21.6\n", " Uninstalling numpy-1.21.6:\n", " Successfully uninstalled numpy-1.21.6\n", " Attempting uninstall: grpcio\n", " Found existing installation: grpcio 1.46.3\n", " Uninstalling grpcio-1.46.3:\n", " Successfully uninstalled grpcio-1.46.3\n", " Attempting uninstall: gast\n", " Found existing installation: gast 0.5.3\n", " Uninstalling gast-0.5.3:\n", " Successfully uninstalled gast-0.5.3\n", " Attempting uninstall: dill\n", " Found existing installation: dill 0.3.5.1\n", " Uninstalling dill-0.3.5.1:\n", " Successfully uninstalled dill-0.3.5.1\n", " Attempting uninstall: cloudpickle\n", " Found existing installation: cloudpickle 1.3.0\n", " Uninstalling cloudpickle-1.3.0:\n", " Successfully uninstalled cloudpickle-1.3.0\n", " Attempting uninstall: attrs\n", " Found existing installation: attrs 21.4.0\n", " Uninstalling attrs-21.4.0:\n", " Successfully uninstalled attrs-21.4.0\n", " Attempting uninstall: absl-py\n", " Found existing installation: absl-py 1.1.0\n", " Uninstalling absl-py-1.1.0:\n", " Successfully uninstalled absl-py-1.1.0\n", " Attempting uninstall: pyarrow\n", " Found existing installation: pyarrow 6.0.1\n", " Uninstalling pyarrow-6.0.1:\n", " Successfully uninstalled pyarrow-6.0.1\n", " Attempting uninstall: ipython\n", " Found existing installation: ipython 5.5.0\n", " Uninstalling ipython-5.5.0:\n", " Successfully uninstalled ipython-5.5.0\n", " Attempting uninstall: google-resumable-media\n", " Found existing installation: google-resumable-media 0.4.1\n", " Uninstalling google-resumable-media-0.4.1:\n", " Successfully uninstalled google-resumable-media-0.4.1\n", " Attempting uninstall: tensorflow-metadata\n", " Found existing installation: tensorflow-metadata 1.8.0\n", " Uninstalling tensorflow-metadata-1.8.0:\n", " Successfully uninstalled tensorflow-metadata-1.8.0\n", " Attempting uninstall: google-auth-httplib2\n", " Found existing installation: google-auth-httplib2 0.0.4\n", " Uninstalling google-auth-httplib2-0.0.4:\n", " Successfully uninstalled google-auth-httplib2-0.0.4\n", " Attempting uninstall: google-cloud-core\n", " Found existing installation: google-cloud-core 1.0.3\n", " Uninstalling google-cloud-core-1.0.3:\n", " Successfully uninstalled google-cloud-core-1.0.3\n", " Attempting uninstall: tensorflow\n", " Found existing installation: tensorflow 2.8.2+zzzcolab20220527125636\n", " Uninstalling tensorflow-2.8.2+zzzcolab20220527125636:\n", " Successfully uninstalled tensorflow-2.8.2+zzzcolab20220527125636\n", " Attempting uninstall: google-cloud-storage\n", " Found existing installation: google-cloud-storage 1.18.1\n", " Uninstalling google-cloud-storage-1.18.1:\n", " Successfully uninstalled google-cloud-storage-1.18.1\n", " Attempting uninstall: google-cloud-language\n", " Found existing installation: google-cloud-language 1.2.0\n", " Uninstalling google-cloud-language-1.2.0:\n", " Successfully uninstalled google-cloud-language-1.2.0\n", " Attempting uninstall: google-cloud-bigquery-storage\n", " Found existing installation: google-cloud-bigquery-storage 1.1.2\n", " Uninstalling google-cloud-bigquery-storage-1.1.2:\n", " Successfully uninstalled google-cloud-bigquery-storage-1.1.2\n", " Attempting uninstall: google-cloud-bigquery\n", " Found existing installation: google-cloud-bigquery 1.21.0\n", " Uninstalling google-cloud-bigquery-1.21.0:\n", " Successfully uninstalled google-cloud-bigquery-1.21.0\n", "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", "xarray-einstats 0.2.2 requires numpy>=1.21, but you have numpy 1.19.5 which is incompatible.\n", "pandas-gbq 0.13.3 requires google-cloud-bigquery[bqstorage,pandas]<2.0.0dev,>=1.11.1, but you have google-cloud-bigquery 2.34.4 which is incompatible.\n", "multiprocess 0.70.13 requires dill>=0.3.5.1, but you have dill 0.3.1.1 which is incompatible.\n", "jupyter-console 5.2.0 requires prompt-toolkit<2.0.0,>=1.0.0, but you have prompt-toolkit 3.0.30 which is incompatible.\n", "gym 0.17.3 requires cloudpickle<1.7.0,>=1.2.0, but you have cloudpickle 2.1.0 which is incompatible.\n", "google-colab 1.0.0 requires ipython~=5.5.0, but you have ipython 7.34.0 which is incompatible.\n", "google-colab 1.0.0 requires requests~=2.23.0, but you have requests 2.28.1 which is incompatible.\n", "datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.\n", "albumentations 0.1.12 requires imgaug<0.2.7,>=0.2.5, but you have imgaug 0.2.9 which is incompatible.\u001b[0m\u001b[31m\n", "\u001b[0mSuccessfully installed absl-py-0.12.0 apache-beam-2.40.0 attrs-20.3.0 clang-5.0 cloudpickle-2.1.0 dill-0.3.1.1 docker-4.4.4 fastavro-1.5.2 fasteners-0.17.3 flatbuffers-1.12 gast-0.4.0 google-apitools-0.5.31 google-auth-httplib2-0.1.0 google-cloud-aiplatform-1.15.0 google-cloud-bigquery-2.34.4 google-cloud-bigquery-storage-2.13.2 google-cloud-bigtable-1.7.2 google-cloud-core-1.7.2 google-cloud-dlp-3.7.1 google-cloud-language-1.3.2 google-cloud-pubsub-2.13.0 google-cloud-pubsublite-1.4.2 google-cloud-recommendations-ai-0.2.0 google-cloud-resource-manager-1.5.1 google-cloud-spanner-1.19.3 google-cloud-storage-2.2.1 google-cloud-videointelligence-1.16.3 google-cloud-vision-1.0.2 google-crc32c-1.3.0 google-resumable-media-2.3.3 grpc-google-iam-v1-0.12.4 grpcio-1.47.0 grpcio-gcp-0.2.2 grpcio-status-1.47.0 hdfs-2.7.0 ipython-7.34.0 joblib-0.14.1 keras-2.7.0 keras-tuner-1.1.2 kt-legacy-1.0.4 kubernetes-12.0.1 ml-metadata-1.3.0 ml-pipelines-sdk-1.3.0 numpy-1.19.5 orjson-3.7.7 overrides-6.1.0 packaging-20.9 prompt-toolkit-3.0.30 proto-plus-1.20.6 protobuf-3.19.4 pyarrow-2.0.0 pymongo-3.12.3 requests-2.28.1 tensorflow-2.6.0+zzzcolab20220506153740 tensorflow-data-validation-1.3.0 tensorflow-estimator-2.7.0 tensorflow-metadata-1.2.0 tensorflow-model-analysis-0.34.1 tensorflow-serving-api-2.6.0 tensorflow-transform-1.3.0 tfx-1.3.0 tfx-bsl-1.3.0 typing-extensions-3.7.4.3 typing-utils-0.1.0 websocket-client-1.3.3 wrapt-1.12.1\n", "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", "\u001b[0mLooking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", "Collecting tensorflow-estimator==2.6.0\n", " Downloading tensorflow_estimator-2.6.0-py2.py3-none-any.whl (462 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m462.9/462.9 kB\u001b[0m \u001b[31m2.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: tensorflow-estimator\n", " Attempting uninstall: tensorflow-estimator\n", " Found existing installation: tensorflow-estimator 2.7.0\n", " Uninstalling tensorflow-estimator-2.7.0:\n", " Successfully uninstalled tensorflow-estimator-2.7.0\n", "Successfully installed tensorflow-estimator-2.6.0\n", "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", "\u001b[0mLooking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", "Collecting keras==2.6.0\n", " Downloading keras-2.6.0-py2.py3-none-any.whl (1.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m53.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: keras\n", " Attempting uninstall: keras\n", " Found existing installation: keras 2.7.0\n", " Uninstalling keras-2.7.0:\n", " Successfully uninstalled keras-2.7.0\n", "Successfully installed keras-2.6.0\n", "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", "\u001b[0m" ] } ] }, { "cell_type": "markdown", "metadata": { "id": "Yr2ulfeNvvom" }, "source": [ "*Note: In Google Colab, you need to restart the runtime at this point to finalize updating the packages you just installed. You can do so by clicking the `Restart Runtime` at the end of the output cell above (after installation), or by selecting `Runtime > Restart Runtime` in the Menu bar. **Please do not proceed to the next section without restarting.** You can also ignore the errors about version incompatibility of some of the bundled packages because we won't be using those in this notebook.*" ] }, { "cell_type": "markdown", "metadata": { "id": "T_MPhjWTvNSr" }, "source": [ "### Imports\n", "\n", "You will then import the packages you will need for this exercise." ] }, { "cell_type": "code", "metadata": { "id": "_leAIdFKAxAD" }, "source": [ "import tensorflow as tf\n", "from tensorflow import keras\n", "import tensorflow_datasets as tfds\n", "\n", "import os\n", "import pprint\n", "\n", "from tfx.components import ImportExampleGen\n", "from tfx.components import ExampleValidator\n", "from tfx.components import SchemaGen\n", "from tfx.components import StatisticsGen\n", "from tfx.components import Transform\n", "from tfx.components import Tuner\n", "from tfx.components import Trainer\n", "\n", "from tfx.proto import example_gen_pb2\n", "from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext" ], "execution_count": 1, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "ReV_UXOgCZvx" }, "source": [ "## Download and prepare the dataset\n", "\n", "As mentioned earlier, you will be using the Fashion MNIST dataset just like in the previous lab. This will allow you to compare the similarities and differences when using Keras Tuner as a standalone library and within an ML pipeline.\n", "\n", "You will first need to setup the directories that you will use to store the dataset, as well as the pipeline artifacts and metadata store." ] }, { "cell_type": "code", "metadata": { "id": "cNQlwf5_t8Fc" }, "source": [ "# Location of the pipeline metadata store\n", "_pipeline_root = './pipeline/'\n", "\n", "# Directory of the raw data files\n", "_data_root = './data/fmnist'\n", "\n", "# Temporary directory\n", "tempdir = './tempdir'" ], "execution_count": 2, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "BqwtVwAsslgN" }, "source": [ "# Create the dataset directory\n", "!mkdir -p {_data_root}\n", "\n", "# Create the TFX pipeline files directory\n", "!mkdir {_pipeline_root}" ], "execution_count": 3, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "JyjfgG0ax9uv" }, "source": [ "You will now download FashionMNIST from [Tensorflow Datasets](https://www.tensorflow.org/datasets). The `with_info` flag will be set to `True` so you can display information about the dataset in the next cell (i.e. using `ds_info`)." ] }, { "cell_type": "code", "metadata": { "id": "aUzvq3WFvKyl", "colab": { "base_uri": "https://localhost:8080/", "height": 299, "referenced_widgets": [ "14c48dad3a62457e95d14c5969617a95", "fabc8d79861e415588fa87b61592cc92", "fead14ca47824dc5bad3a89c53a598ff", "4400762ea5bd402ab5063c46732de816", "a8c5f80562474004a8ece3892c9a1660", "0183fa20298845198493992279557500", "cbeaba2795a3416cb03c63eadd5c6784", "5c21be423d2545a496bafbb124c7a54b", "f38a51e9325f43628f0962c056896603", "610f895d406c403cb4fe5552e39860e1", "795e2d771c90482b87dd503e819b15c8", "2a019d148bd042ab93869b5ab4691d98", "34fb080edea84f04a1244c27b8c77476", "62cd37ee65484395a56af6fd09dcffae", "22dd37f87e4f4835b5dd46f5906a9c50", "6c724cb6f42e4ddfbce30bf91394661c", "4977b600d6ba4bbc958fbc50becd078d", "c432812346a64951b6a6f86dc0c57f74", "182781ed74b0428f9aa4eabdc4ec5106", "79f67094663e4b5f90232aa629df848f", "b36f3f17f367414d9574f86b40485c35", "2ae7180e489b415b8f6ede1040b0d81a", "587cb35f14ef4a489ba168f2372489df", "38b163a029e6451b8ca10b7c1e5a5a74", "695d49869df947179a239d44cc304d93", "565f0096a30d4b69af8fa138427af892", "fbb01ef2dbb3442eaf6fe44f598c30f4", "5b26d50f3ae04705a5810826ecbf9ae1", "9c7d271caadc4a6883469034a58b7ba4", "ca7f1f33823b4357b7a9c569eead8a80", "5a9cbc4f46624f1785045779d7488f19", "3c355066e45f4f53ad1514b7381d71ff", "c834f2b9ab444e46a8ea6d8f7fc95147", "23e42f4aa10346f98b852898b0fd63eb", "6e6bccbca81346618968ddeb05b937fe", "f5287636d07e4cd193d085611e5823ab", "c64160ec90d94174b286218662fdcdab", "f01255ad84d541938532dc5edd72590a", "fd9763902ed241dea6f217a735f7adde", "38fa113736f048c48bb4d4250fbc06a8", "0eeaab0ae4d3467593ea36a894e95157", "986dd41579d5466facff4534973fc155", "3d2a0087e78b474ca0ee33ee4d6b1fc6", "377b3e912b124131b7cf151ba7c24e47", "300bf99edd8146b8a9426ee9abc01974", "8f184c41d0d04ce5a002631515bb7a6a", "0dcaccd02b99425294116cb8b7cc803c", "e4f3faec638f47febcfafeb7bd9ff017", "62f1279d8bcb41bcbbf4d47363eb84ff", "ca1046cfa8e84e36a708982aa0a85bef", "189333397a164614b31f1ae701908fca", "cf17a2d991364fb5abf688f879199aff", "c91e30cfa8f742bc9e3c1e6828fc8609", "3be165e1eb044c6db50eee5f3717e1a1", "4c657a6146e4490f97f8ce39aee3beb6", "caae62fb8b074f618fb9b00d602725c0", "c68af1ca52f94778afec1151c7e9f508", "d07656452ce74b019129b5968f3339a0", "9e816381e9c74b9897b8737f824a9a7f", "209a917093a7472e9f93ac222df237d7", "bc23e131dc444d23918b0c9d8499dd04", "4ecc4f8469b849b7a7582c27ec726d21", "2bdce4f6dce34bb3805fbf839286090c", "0f4978b1aeb94df9870a3e70149df983", "0841ef877edd4441831d18008056d08e", "9fdefc8fc2df4c7c8f4011c08deec1ef", "a8ef6e43873f4d8aa9d6767ca36f3f11", "ae10e2d4b104449eb2d6ec971a08537d", "1ecc77266de5499cb00ced59b939b90c", "d670f78db66a4a73b501f7eadc63982c", "462d40c2408a4cb6bbbd4ca4bea5b6e0", "93891a42d1104fb6bd513e34cd48895e", "1dc6f18a88564f3fa709f9334dcd501e", "102e5805439144209cca2d5eac7fdd59", "963dc9f3b27044a5aa77942a616e1197", "625ab8d0ea4744cea1979bf8d3671b0b", "c555b2d46dc44f3a8e9efc49651e2222" ] }, "outputId": "a8283e3c-cdd9-4afa-9289-cb04c7bea2b6" }, "source": [ "# Download the dataset\n", "ds, ds_info = tfds.load('fashion_mnist', data_dir=tempdir, with_info=True)" ], "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[1mDownloading and preparing dataset fashion_mnist/3.0.1 (download: 29.45 MiB, generated: 36.42 MiB, total: 65.87 MiB) to ./tempdir/fashion_mnist/3.0.1...\u001b[0m\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "Dl Completed...: 0 url [00:00, ? url/s]" ], "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, "model_id": "14c48dad3a62457e95d14c5969617a95" } }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "Dl Size...: 0 MiB [00:00, ? MiB/s]" ], "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, "model_id": "2a019d148bd042ab93869b5ab4691d98" } }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "Extraction completed...: 0 file [00:00, ? file/s]" ], "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, "model_id": "587cb35f14ef4a489ba168f2372489df" } }, "metadata": {} }, { "output_type": "stream", "name": "stdout", "text": [ "\n", "\n", "\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "0 examples [00:00, ? examples/s]" ], "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, "model_id": "23e42f4aa10346f98b852898b0fd63eb" } }, "metadata": {} }, { "output_type": "stream", "name": "stdout", "text": [ "Shuffling and writing examples to ./tempdir/fashion_mnist/3.0.1.incomplete573I3J/fashion_mnist-train.tfrecord\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ " 0%| | 0/60000 [00:00\n", ".tfx-object.expanded {\n", " padding: 4px 8px 4px 8px;\n", " background: white;\n", " border: 1px solid #bbbbbb;\n", " box-shadow: 4px 4px 2px rgba(0,0,0,0.05);\n", "}\n", ".tfx-object, .tfx-object * {\n", " font-size: 11pt;\n", "}\n", ".tfx-object > .title {\n", " cursor: pointer;\n", "}\n", ".tfx-object .expansion-marker {\n", " color: #999999;\n", "}\n", ".tfx-object.expanded > .title > .expansion-marker:before {\n", " content: '▼';\n", "}\n", ".tfx-object.collapsed > .title > .expansion-marker:before {\n", " content: '▶';\n", "}\n", ".tfx-object .class-name {\n", " font-weight: bold;\n", "}\n", ".tfx-object .deemphasize {\n", " opacity: 0.5;\n", "}\n", ".tfx-object.collapsed > table.attr-table {\n", " display: none;\n", "}\n", ".tfx-object.expanded > table.attr-table {\n", " display: block;\n", "}\n", ".tfx-object table.attr-table {\n", " border: 2px solid white;\n", " margin-top: 5px;\n", "}\n", ".tfx-object table.attr-table td.attr-name {\n", " vertical-align: top;\n", " font-weight: bold;\n", "}\n", ".tfx-object table.attr-table td.attrvalue {\n", " text-align: left;\n", "}\n", "\n", "\n", "
ExecutionResult at 0x7f6f667cc110
.execution_id1
.component\n", "\n", "
ImportExampleGen at 0x7f6f502514d0
.inputs{}
.outputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f6f50249990
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: ./pipeline/ImportExampleGen/examples/1) at 0x7f6f50249e90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri./pipeline/ImportExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
.exec_properties
['input_base']./data/fmnist
['input_config']{\n", " "splits": [\n", " {\n", " "name": "single_split",\n", " "pattern": "*"\n", " }\n", " ]\n", "}
['output_config']{\n", " "split_config": {\n", " "splits": [\n", " {\n", " "hash_buckets": 8,\n", " "name": "train"\n", " },\n", " {\n", " "hash_buckets": 2,\n", " "name": "eval"\n", " }\n", " ]\n", " }\n", "}
['output_data_format']6
['output_file_format']5
['custom_config']None
['range_config']None
['span']0
['version']None
['input_fingerprint']split:single_split,num_files:1,total_bytes:33677636,xor_checksum:1657175698,sum_checksum:1657175698
.component.inputs{}
.component.outputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f6f50249990
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: ./pipeline/ImportExampleGen/examples/1) at 0x7f6f50249e90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri./pipeline/ImportExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
" ] }, "metadata": {}, "execution_count": 9 } ] }, { "cell_type": "code", "metadata": { "id": "dIdWfRWGxvHp", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "63c4f463-8264-4fb1-ca1a-0be7114c6500" }, "source": [ "# Print split names and URI\n", "artifact = example_gen.outputs['examples'].get()[0]\n", "print(artifact.split_names, artifact.uri)" ], "execution_count": 10, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "[\"train\", \"eval\"] ./pipeline/ImportExampleGen/examples/1\n" ] } ] }, { "cell_type": "markdown", "metadata": { "id": "os6NhLaY4oB3" }, "source": [ "### StatisticsGen\n", "\n", "Next, you will compute the statistics of the dataset with the [StatisticsGen](https://www.tensorflow.org/tfx/guide/statsgen) component." ] }, { "cell_type": "code", "metadata": { "id": "pVDS4oEIzZ83", "colab": { "base_uri": "https://localhost:8080/", "height": 694 }, "outputId": "423d979a-ff89-440f-bc3c-3391d5ec99ff" }, "source": [ "# Run StatisticsGen\n", "statistics_gen = StatisticsGen(\n", " examples=example_gen.outputs['examples'])\n", "\n", "context.run(statistics_gen)" ], "execution_count": 13, "outputs": [ { "output_type": "error", "ename": "TypeCheckError", "evalue": "ignored", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mTypeCheckError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m examples=example_gen.outputs['examples'])\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatistics_gen\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/orchestration/experimental/interactive/interactive_context.py\u001b[0m in \u001b[0;36mrun_if_ipython\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# __IPYTHON__ variable is set by IPython, see\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;31m# https://ipython.org/ipython-doc/rel-0.10.2/html/interactive/reference.html#embedding-ipython.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m absl.logging.warning(\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/orchestration/experimental/interactive/interactive_context.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, component, enable_cache, beam_pipeline_args)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0mtelemetry_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLABEL_TFX_RUNNER\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mrunner_label\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m }):\n\u001b[0;32m--> 183\u001b[0;31m \u001b[0mexecution_id\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlauncher\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlaunch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecution_id\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m return execution_result.ExecutionResult(\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/orchestration/launcher/base_component_launcher.py\u001b[0m in \u001b[0;36mlaunch\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0mcopy\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdeepcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexecution_decision\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minput_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 202\u001b[0m \u001b[0mexecution_decision\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 203\u001b[0;31m copy.deepcopy(execution_decision.exec_properties))\n\u001b[0m\u001b[1;32m 204\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m absl.logging.info('Running publisher for %s',\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/orchestration/launcher/in_process_component_launcher.py\u001b[0m in \u001b[0;36m_run_executor\u001b[0;34m(self, execution_id, input_dict, output_dict, exec_properties)\u001b[0m\n\u001b[1;32m 72\u001b[0m \u001b[0;31m# output_dict can still be changed, specifically properties.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m executor.Do(\n\u001b[0;32m---> 74\u001b[0;31m copy.deepcopy(input_dict), output_dict, copy.deepcopy(exec_properties))\n\u001b[0m", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/components/statistics_gen/executor.py\u001b[0m in \u001b[0;36mDo\u001b[0;34m(self, input_dict, output_dict, exec_properties)\u001b[0m\n\u001b[1;32m 138\u001b[0m \u001b[0mstats_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGenerateStatistics\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstats_options\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0;34m'WriteStatsOutput[%s]'\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0msplit\u001b[0m \u001b[0;34m>>\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 140\u001b[0;31m stats_api.WriteStatisticsToBinaryFile(output_path))\n\u001b[0m\u001b[1;32m 141\u001b[0m logging.info('Statistics for split %s written to %s.', split,\n\u001b[1;32m 142\u001b[0m output_uri)\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/apache_beam/pvalue.py\u001b[0m in \u001b[0;36m__or__\u001b[0;34m(self, ptransform)\u001b[0m\n\u001b[1;32m 135\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 136\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__or__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mptransform\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 137\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpipeline\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mptransform\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 138\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/apache_beam/pipeline.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, transform, pvalueish, label)\u001b[0m\n\u001b[1;32m 651\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtransform\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mptransform\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_NamedPTransform\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 652\u001b[0m return self.apply(\n\u001b[0;32m--> 653\u001b[0;31m transform.transform, pvalueish, label or transform.label)\n\u001b[0m\u001b[1;32m 654\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 655\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtransform\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mptransform\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mPTransform\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/apache_beam/pipeline.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, transform, pvalueish, label)\u001b[0m\n\u001b[1;32m 661\u001b[0m \u001b[0mold_label\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtransform\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlabel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtransform\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlabel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlabel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 662\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 663\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtransform\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpvalueish\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 664\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 665\u001b[0m \u001b[0mtransform\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlabel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mold_label\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/apache_beam/pipeline.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, transform, pvalueish, label)\u001b[0m\n\u001b[1;32m 710\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 711\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mtype_options\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mtype_options\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpipeline_type_check\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 712\u001b[0;31m \u001b[0mtransform\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtype_check_outputs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpvalueish_result\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 713\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 714\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtag\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mptransform\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_named_nested_pvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpvalueish_result\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/apache_beam/transforms/ptransform.py\u001b[0m in \u001b[0;36mtype_check_outputs\u001b[0;34m(self, pvalueish)\u001b[0m\n\u001b[1;32m 464\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 465\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtype_check_outputs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpvalueish\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 466\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtype_check_inputs_or_outputs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpvalueish\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 467\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 468\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtype_check_inputs_or_outputs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpvalueish\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_or_output\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/apache_beam/transforms/ptransform.py\u001b[0m in \u001b[0;36mtype_check_inputs_or_outputs\u001b[0;34m(self, pvalueish, input_or_output)\u001b[0m\n\u001b[1;32m 495\u001b[0m \u001b[0mhint\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhint\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 496\u001b[0m \u001b[0mactual_type\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpvalue_\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0melement_type\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 497\u001b[0;31m debug_str=type_hints.debug_str()))\n\u001b[0m\u001b[1;32m 498\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 499\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_infer_output_coder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_type\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_coder\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mTypeCheckError\u001b[0m: Output type hint violation at WriteStatsOutput[train]: expected , got \nFull type hint:\nIOTypeHints[inputs=((,), {}), outputs=((,), {})]\nFile \"\", line 677, in _load_unlocked\nFile \"\", line 728, in exec_module\nFile \"\", line 219, in _call_with_frames_removed\nFile \"/usr/local/lib/python3.7/dist-packages/tensorflow_data_validation/api/stats_api.py\", line 113, in \n class WriteStatisticsToBinaryFile(beam.PTransform):\nFile \"/usr/local/lib/python3.7/dist-packages/apache_beam/typehints/decorators.py\", line 776, in annotate_input_types\n *converted_positional_hints, **converted_keyword_hints)\n\nbased on:\n IOTypeHints[inputs=None, outputs=((,), {})]\n File \"\", line 677, in _load_unlocked\n File \"\", line 728, in exec_module\n File \"\", line 219, in _call_with_frames_removed\n File \"/usr/local/lib/python3.7/dist-packages/tensorflow_data_validation/api/stats_api.py\", line 113, in \n class WriteStatisticsToBinaryFile(beam.PTransform):\n File \"/usr/local/lib/python3.7/dist-packages/apache_beam/typehints/decorators.py\", line 863, in annotate_output_types\n f._type_hints = th.with_output_types(return_type_hint) # pylint: disable=protected-access" ] } ] }, { "cell_type": "markdown", "metadata": { "id": "D48bfGK95sES" }, "source": [ "### SchemaGen\n", "\n", "You can then infer the dataset schema with [SchemaGen](https://www.tensorflow.org/tfx/guide/schemagen). This will be used to validate incoming data to ensure that it is formatted correctly." ] }, { "cell_type": "code", "metadata": { "id": "7UhV3Jr7zp7p", "colab": { "base_uri": "https://localhost:8080/", "height": 433 }, "outputId": "b2c5fcb4-6691-40ea-d40b-0163fcb60714" }, "source": [ "# Run SchemaGen\n", "schema_gen = SchemaGen(\n", " statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)\n", "context.run(schema_gen)" ], "execution_count": 14, "outputs": [ { "output_type": "error", "ename": "NotFoundError", "evalue": "ignored", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNotFoundError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/dsl/io/plugins/tensorflow_gfile.py\u001b[0m in \u001b[0;36mlistdir\u001b[0;34m(path)\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 65\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mio\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgfile\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlistdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 66\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNotFoundError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/lib/io/file_io.py\u001b[0m in \u001b[0;36mlist_directory_v2\u001b[0;34m(path)\u001b[0m\n\u001b[1;32m 771\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 772\u001b[0;31m message=\"Could not find directory {}\".format(path))\n\u001b[0m\u001b[1;32m 773\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mNotFoundError\u001b[0m: Could not find directory ./pipeline/StatisticsGen/statistics/4/Split-train", "\nThe above exception was the direct cause of the following exception:\n", "\u001b[0;31mNotFoundError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 2\u001b[0m schema_gen = SchemaGen(\n\u001b[1;32m 3\u001b[0m statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mschema_gen\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/orchestration/experimental/interactive/interactive_context.py\u001b[0m in \u001b[0;36mrun_if_ipython\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# __IPYTHON__ variable is set by IPython, see\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;31m# https://ipython.org/ipython-doc/rel-0.10.2/html/interactive/reference.html#embedding-ipython.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m absl.logging.warning(\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/orchestration/experimental/interactive/interactive_context.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, component, enable_cache, beam_pipeline_args)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0mtelemetry_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLABEL_TFX_RUNNER\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mrunner_label\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m }):\n\u001b[0;32m--> 183\u001b[0;31m \u001b[0mexecution_id\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlauncher\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlaunch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecution_id\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m return execution_result.ExecutionResult(\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/orchestration/launcher/base_component_launcher.py\u001b[0m in \u001b[0;36mlaunch\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0mcopy\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdeepcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexecution_decision\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minput_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 202\u001b[0m \u001b[0mexecution_decision\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 203\u001b[0;31m copy.deepcopy(execution_decision.exec_properties))\n\u001b[0m\u001b[1;32m 204\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m absl.logging.info('Running publisher for %s',\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/orchestration/launcher/in_process_component_launcher.py\u001b[0m in \u001b[0;36m_run_executor\u001b[0;34m(self, execution_id, input_dict, output_dict, exec_properties)\u001b[0m\n\u001b[1;32m 72\u001b[0m \u001b[0;31m# output_dict can still be changed, specifically properties.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m executor.Do(\n\u001b[0;32m---> 74\u001b[0;31m copy.deepcopy(input_dict), output_dict, copy.deepcopy(exec_properties))\n\u001b[0m", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/components/schema_gen/executor.py\u001b[0m in \u001b[0;36mDo\u001b[0;34m(self, input_dict, output_dict, exec_properties)\u001b[0m\n\u001b[1;32m 78\u001b[0m \u001b[0mlogging\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Processing schema from statistics for split %s.'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msplit\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 79\u001b[0m stats_uri = io_utils.get_only_uri_in_dir(\n\u001b[0;32m---> 80\u001b[0;31m artifact_utils.get_split_uri([stats_artifact], split))\n\u001b[0m\u001b[1;32m 81\u001b[0m if artifact_utils.is_artifact_version_older_than(\n\u001b[1;32m 82\u001b[0m stats_artifact, artifact_utils._ARTIFACT_VERSION_FOR_STATS_UPDATE): # pylint: disable=protected-access\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/utils/io_utils.py\u001b[0m in \u001b[0;36mget_only_uri_in_dir\u001b[0;34m(dir_path)\u001b[0m\n\u001b[1;32m 78\u001b[0m \u001b[0;34m\"\"\"Gets the only uri from given directory.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 79\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 80\u001b[0;31m \u001b[0mfiles\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfileio\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlistdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdir_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 81\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiles\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 82\u001b[0m raise RuntimeError(\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/dsl/io/fileio.py\u001b[0m in \u001b[0;36mlistdir\u001b[0;34m(path)\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mlistdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mPathType\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mList\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mPathType\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 72\u001b[0m \u001b[0;34m\"\"\"Return the list of files in a directory.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 73\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_get_filesystem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlistdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 74\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 75\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tfx/dsl/io/plugins/tensorflow_gfile.py\u001b[0m in \u001b[0;36mlistdir\u001b[0;34m(path)\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mio\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgfile\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlistdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNotFoundError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 67\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mfilesystem\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNotFoundError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 68\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mstaticmethod\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mNotFoundError\u001b[0m: " ] } ] }, { "cell_type": "code", "metadata": { "id": "EtS2ZEgCzvAf" }, "source": [ "# Visualize the results\n", "context.show(schema_gen.outputs['schema'])" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "2_yXqq1y6LR6" }, "source": [ "### ExampleValidator\n", "\n", "You can assume that the dataset is clean since we downloaded it from TFDS. But just to review, let's run it through [ExampleValidator](https://www.tensorflow.org/tfx/guide/exampleval) to detect if there are anomalies within the dataset." ] }, { "cell_type": "code", "metadata": { "id": "EaTJiYPpzzZM" }, "source": [ "# Run ExampleValidator\n", "example_validator = ExampleValidator(\n", " statistics=statistics_gen.outputs['statistics'],\n", " schema=schema_gen.outputs['schema'])\n", "context.run(example_validator)" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "v6YzedBSz5KE" }, "source": [ "# Visualize the results. There should be no anomalies.\n", "context.show(example_validator.outputs['anomalies'])" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "tpUFIO9M6yMH" }, "source": [ "### Transform\n", "\n", "Let's now use the [Transform](https://www.tensorflow.org/tfx/guide/transform) component to scale the image pixels and convert the data types to float. You will first define the transform module containing these operations before you run the component." ] }, { "cell_type": "code", "metadata": { "id": "xL4zrcJ7z9K9" }, "source": [ "_transform_module_file = 'fmnist_transform.py'" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "43xmp2UD0Cc5" }, "source": [ "%%writefile {_transform_module_file}\n", "\n", "import tensorflow as tf\n", "import tensorflow_transform as tft\n", "\n", "# Keys\n", "_LABEL_KEY = 'label'\n", "_IMAGE_KEY = 'image'\n", "\n", "\n", "def _transformed_name(key):\n", " return key + '_xf'\n", "\n", "def _image_parser(image_str):\n", " '''converts the images to a float tensor'''\n", " image = tf.image.decode_image(image_str, channels=1)\n", " image = tf.reshape(image, (28, 28, 1))\n", " image = tf.cast(image, tf.float32)\n", " return image\n", "\n", "\n", "def _label_parser(label_id):\n", " '''converts the labels to a float tensor'''\n", " label = tf.cast(label_id, tf.float32)\n", " return label\n", "\n", "\n", "def preprocessing_fn(inputs):\n", " \"\"\"tf.transform's callback function for preprocessing inputs.\n", " Args:\n", " inputs: map from feature keys to raw not-yet-transformed features.\n", " Returns:\n", " Map from string feature key to transformed feature operations.\n", " \"\"\"\n", " \n", " # Convert the raw image and labels to a float array\n", " with tf.device(\"/cpu:0\"):\n", " outputs = {\n", " _transformed_name(_IMAGE_KEY):\n", " tf.map_fn(\n", " _image_parser,\n", " tf.squeeze(inputs[_IMAGE_KEY], axis=1),\n", " dtype=tf.float32),\n", " _transformed_name(_LABEL_KEY):\n", " tf.map_fn(\n", " _label_parser,\n", " inputs[_LABEL_KEY],\n", " dtype=tf.float32)\n", " }\n", " \n", " # scale the pixels from 0 to 1\n", " outputs[_transformed_name(_IMAGE_KEY)] = tft.scale_to_0_1(outputs[_transformed_name(_IMAGE_KEY)])\n", " \n", " return outputs" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "0uNYsebhLC69" }, "source": [ "You will run the component by passing in the examples, schema, and transform module file.\n", "\n", "*Note: You can safely ignore the warnings and `udf_utils` related errors.*" ] }, { "cell_type": "code", "metadata": { "id": "qthHA2hO1JST" }, "source": [ "# Ignore TF warning messages\n", "tf.get_logger().setLevel('ERROR')\n", "\n", "# Setup the Transform component\n", "transform = Transform(\n", " examples=example_gen.outputs['examples'],\n", " schema=schema_gen.outputs['schema'],\n", " module_file=os.path.abspath(_transform_module_file))\n", "\n", "# Run the component\n", "context.run(transform)" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "QZkbL7sO8Y1N" }, "source": [ "### Tuner\n", "\n", "As the name suggests, the [Tuner](https://www.tensorflow.org/tfx/guide/tuner) component tunes the hyperparameters of your model. To use this, you will need to provide a *tuner module file* which contains a `tuner_fn()` function. In this function, you will mostly do the same steps as you did in the previous ungraded lab but with some key differences in handling the dataset. \n", "\n", "The Transform component earlier saved the transformed examples as TFRecords compressed in `.gz` format and you will need to load that into memory. Once loaded, you will need to create batches of features and labels so you can finally use it for hypertuning. This process is modularized in the `_input_fn()` below. \n", "\n", "Going back, the `tuner_fn()` function will return a `TunerFnResult` [namedtuple](https://docs.python.org/3/library/collections.html#collections.namedtuple) containing your `tuner` object and a set of arguments to pass to `tuner.search()` method. You will see these in action in the following cells. When reviewing the module file, we recommend viewing the `tuner_fn()` first before looking at the other auxiliary functions." ] }, { "cell_type": "code", "metadata": { "id": "aE1PLAs_6CVt" }, "source": [ "# Declare name of module file\n", "_tuner_module_file = 'tuner.py'" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "G0F-XhqVlUDB" }, "source": [ "%%writefile {_tuner_module_file}\n", "\n", "# Define imports\n", "from kerastuner.engine import base_tuner\n", "import kerastuner as kt\n", "from tensorflow import keras\n", "from typing import NamedTuple, Dict, Text, Any, List\n", "from tfx.components.trainer.fn_args_utils import FnArgs, DataAccessor\n", "import tensorflow as tf\n", "import tensorflow_transform as tft\n", "\n", "# Declare namedtuple field names\n", "TunerFnResult = NamedTuple('TunerFnResult', [('tuner', base_tuner.BaseTuner),\n", " ('fit_kwargs', Dict[Text, Any])])\n", "\n", "# Label key\n", "LABEL_KEY = 'label_xf'\n", "\n", "# Callback for the search strategy\n", "stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)\n", "\n", "\n", "def _gzip_reader_fn(filenames):\n", " '''Load compressed dataset\n", " \n", " Args:\n", " filenames - filenames of TFRecords to load\n", "\n", " Returns:\n", " TFRecordDataset loaded from the filenames\n", " '''\n", "\n", " # Load the dataset. Specify the compression type since it is saved as `.gz`\n", " return tf.data.TFRecordDataset(filenames, compression_type='GZIP')\n", " \n", "\n", "def _input_fn(file_pattern,\n", " tf_transform_output,\n", " num_epochs=None,\n", " batch_size=32) -> tf.data.Dataset:\n", " '''Create batches of features and labels from TF Records\n", "\n", " Args:\n", " file_pattern - List of files or patterns of file paths containing Example records.\n", " tf_transform_output - transform output graph\n", " num_epochs - Integer specifying the number of times to read through the dataset. \n", " If None, cycles through the dataset forever.\n", " batch_size - An int representing the number of records to combine in a single batch.\n", "\n", " Returns:\n", " A dataset of dict elements, (or a tuple of dict elements and label). \n", " Each dict maps feature keys to Tensor or SparseTensor objects.\n", " '''\n", "\n", " # Get feature specification based on transform output\n", " transformed_feature_spec = (\n", " tf_transform_output.transformed_feature_spec().copy())\n", " \n", " # Create batches of features and labels\n", " dataset = tf.data.experimental.make_batched_features_dataset(\n", " file_pattern=file_pattern,\n", " batch_size=batch_size,\n", " features=transformed_feature_spec,\n", " reader=_gzip_reader_fn,\n", " num_epochs=num_epochs,\n", " label_key=LABEL_KEY)\n", " \n", " return dataset\n", "\n", "\n", "def model_builder(hp):\n", " '''\n", " Builds the model and sets up the hyperparameters to tune.\n", "\n", " Args:\n", " hp - Keras tuner object\n", "\n", " Returns:\n", " model with hyperparameters to tune\n", " '''\n", "\n", " # Initialize the Sequential API and start stacking the layers\n", " model = keras.Sequential()\n", " model.add(keras.layers.Flatten(input_shape=(28, 28, 1)))\n", "\n", " # Tune the number of units in the first Dense layer\n", " # Choose an optimal value between 32-512\n", " hp_units = hp.Int('units', min_value=32, max_value=512, step=32)\n", " model.add(keras.layers.Dense(units=hp_units, activation='relu', name='dense_1'))\n", "\n", " # Add next layers\n", " model.add(keras.layers.Dropout(0.2))\n", " model.add(keras.layers.Dense(10, activation='softmax'))\n", "\n", " # Tune the learning rate for the optimizer\n", " # Choose an optimal value from 0.01, 0.001, or 0.0001\n", " hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])\n", "\n", " model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),\n", " loss=keras.losses.SparseCategoricalCrossentropy(),\n", " metrics=['accuracy'])\n", "\n", " return model\n", "\n", "def tuner_fn(fn_args: FnArgs) -> TunerFnResult:\n", " \"\"\"Build the tuner using the KerasTuner API.\n", " Args:\n", " fn_args: Holds args as name/value pairs.\n", "\n", " - working_dir: working dir for tuning.\n", " - train_files: List of file paths containing training tf.Example data.\n", " - eval_files: List of file paths containing eval tf.Example data.\n", " - train_steps: number of train steps.\n", " - eval_steps: number of eval steps.\n", " - schema_path: optional schema of the input data.\n", " - transform_graph_path: optional transform graph produced by TFT.\n", " \n", " Returns:\n", " A namedtuple contains the following:\n", " - tuner: A BaseTuner that will be used for tuning.\n", " - fit_kwargs: Args to pass to tuner's run_trial function for fitting the\n", " model , e.g., the training and validation dataset. Required\n", " args depend on the above tuner's implementation.\n", " \"\"\"\n", "\n", " # Define tuner search strategy\n", " tuner = kt.Hyperband(model_builder,\n", " objective='val_accuracy',\n", " max_epochs=10,\n", " factor=3,\n", " directory=fn_args.working_dir,\n", " project_name='kt_hyperband')\n", "\n", " # Load transform output\n", " tf_transform_output = tft.TFTransformOutput(fn_args.transform_graph_path)\n", "\n", " # Use _input_fn() to extract input features and labels from the train and val set\n", " train_set = _input_fn(fn_args.train_files[0], tf_transform_output)\n", " val_set = _input_fn(fn_args.eval_files[0], tf_transform_output)\n", "\n", "\n", " return TunerFnResult(\n", " tuner=tuner,\n", " fit_kwargs={ \n", " \"callbacks\":[stop_early],\n", " 'x': train_set,\n", " 'validation_data': val_set,\n", " 'steps_per_epoch': fn_args.train_steps,\n", " 'validation_steps': fn_args.eval_steps\n", " }\n", " )" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "lzJbeuXNtI-7" }, "source": [ "With the module defined, you can now setup the Tuner component. You can see the description of each argument [here](https://www.tensorflow.org/tfx/api_docs/python/tfx/components/Tuner). \n", "\n", "Notice that we passed a `num_steps` argument to the train and eval args and this was used in the `steps_per_epoch` and `validation_steps` arguments in the tuner module above. This can be useful if you don't want to go through the entire dataset when tuning. For example, if you have 10GB of training data, it would be incredibly time consuming if you will iterate through it entirely just for one epoch and one set of hyperparameters. You can set the number of steps so your program will only go through a fraction of the dataset. \n", "\n", "You can compute for the total number of steps in one epoch by: `number of examples / batch size`. For this particular example, we have `48000 examples / 32 (default size)` which equals `1500` steps per epoch for the train set (compute val steps from 12000 examples). Since you passed `500` in the `num_steps` of the train args, this means that some examples will be skipped. This will likely result in lower accuracy readings but will save time in doing the hypertuning. Try modifying this value later and see if you arrive at the same set of hyperparameters." ] }, { "cell_type": "code", "metadata": { "id": "VqVSc6sS5A1m" }, "source": [ "from tfx.proto import trainer_pb2\n", "\n", "# Setup the Tuner component\n", "tuner = Tuner(\n", " module_file=_tuner_module_file,\n", " examples=transform.outputs['transformed_examples'],\n", " transform_graph=transform.outputs['transform_graph'],\n", " schema=schema_gen.outputs['schema'],\n", " train_args=trainer_pb2.TrainArgs(splits=['train'], num_steps=500),\n", " eval_args=trainer_pb2.EvalArgs(splits=['eval'], num_steps=100)\n", " )" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "HdycQnAR7AvG" }, "source": [ "# Run the component. This will take around 10 minutes to run.\n", "# When done, it will summarize the results and show the 10 best trials.\n", "context.run(tuner, enable_cache=False)" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "uW50JS0d9Hd4" }, "source": [ "### Trainer\n", "\n", "Like the Tuner component, the [Trainer](https://www.tensorflow.org/tfx/guide/trainer) component also requires a module file to setup the training process. It will look for a `run_fn()` function that defines and trains the model. The steps will look similar to the tuner module file:\n", "\n", "* Define the model - You can get the results of the Tuner component through the `fn_args.hyperparameters` argument. You will see it passed into the `model_builder()` function below. If you didn't run `Tuner`, then you can just explicitly define the number of hidden units and learning rate.\n", "\n", "* Load the train and validation sets - You have done this in the Tuner component. For this module, you will pass in a `num_epochs` value (10) to indicate how many batches will be prepared. You can opt not to do this and pass a `num_steps` value as before.\n", "\n", "* Setup and train the model - This will look very familiar if you're already used to the [Keras Models Training API](https://keras.io/api/models/model_training_apis/). You can pass in callbacks like the [TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) so you can visualize the results later.\n", "\n", "* Save the model - This is needed so you can analyze and serve your model. You will get to do this in later parts of the course and specialization." ] }, { "cell_type": "code", "metadata": { "id": "abSJjDM2ipKS" }, "source": [ "# Declare trainer module file\n", "_trainer_module_file = 'trainer.py'" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "QdgbwOFFihSg" }, "source": [ "%%writefile {_trainer_module_file}\n", "\n", "from tensorflow import keras\n", "from typing import NamedTuple, Dict, Text, Any, List\n", "from tfx.components.trainer.fn_args_utils import FnArgs, DataAccessor\n", "import tensorflow as tf\n", "import tensorflow_transform as tft\n", "\n", "# Define the label key\n", "LABEL_KEY = 'label_xf'\n", "\n", "def _gzip_reader_fn(filenames):\n", " '''Load compressed dataset\n", " \n", " Args:\n", " filenames - filenames of TFRecords to load\n", "\n", " Returns:\n", " TFRecordDataset loaded from the filenames\n", " '''\n", "\n", " # Load the dataset. Specify the compression type since it is saved as `.gz`\n", " return tf.data.TFRecordDataset(filenames, compression_type='GZIP')\n", " \n", "\n", "def _input_fn(file_pattern,\n", " tf_transform_output,\n", " num_epochs=None,\n", " batch_size=32) -> tf.data.Dataset:\n", " '''Create batches of features and labels from TF Records\n", "\n", " Args:\n", " file_pattern - List of files or patterns of file paths containing Example records.\n", " tf_transform_output - transform output graph\n", " num_epochs - Integer specifying the number of times to read through the dataset. \n", " If None, cycles through the dataset forever.\n", " batch_size - An int representing the number of records to combine in a single batch.\n", "\n", " Returns:\n", " A dataset of dict elements, (or a tuple of dict elements and label). \n", " Each dict maps feature keys to Tensor or SparseTensor objects.\n", " '''\n", " transformed_feature_spec = (\n", " tf_transform_output.transformed_feature_spec().copy())\n", " \n", " dataset = tf.data.experimental.make_batched_features_dataset(\n", " file_pattern=file_pattern,\n", " batch_size=batch_size,\n", " features=transformed_feature_spec,\n", " reader=_gzip_reader_fn,\n", " num_epochs=num_epochs,\n", " label_key=LABEL_KEY)\n", " \n", " return dataset\n", "\n", "\n", "def model_builder(hp):\n", " '''\n", " Builds the model and sets up the hyperparameters to tune.\n", "\n", " Args:\n", " hp - Keras tuner object\n", "\n", " Returns:\n", " model with hyperparameters to tune\n", " '''\n", "\n", " # Initialize the Sequential API and start stacking the layers\n", " model = keras.Sequential()\n", " model.add(keras.layers.Flatten(input_shape=(28, 28, 1)))\n", "\n", " # Get the number of units from the Tuner results\n", " hp_units = hp.get('units')\n", " model.add(keras.layers.Dense(units=hp_units, activation='relu'))\n", "\n", " # Add next layers\n", " model.add(keras.layers.Dropout(0.2))\n", " model.add(keras.layers.Dense(10, activation='softmax'))\n", "\n", " # Get the learning rate from the Tuner results\n", " hp_learning_rate = hp.get('learning_rate')\n", "\n", " # Setup model for training\n", " model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),\n", " loss=keras.losses.SparseCategoricalCrossentropy(),\n", " metrics=['accuracy'])\n", "\n", " # Print the model summary\n", " model.summary()\n", " \n", " return model\n", "\n", "\n", "def run_fn(fn_args: FnArgs) -> None:\n", " \"\"\"Defines and trains the model.\n", " Args:\n", " fn_args: Holds args as name/value pairs. Refer here for the complete attributes: \n", " https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs#attributes\n", " \"\"\"\n", "\n", " # Callback for TensorBoard\n", " tensorboard_callback = tf.keras.callbacks.TensorBoard(\n", " log_dir=fn_args.model_run_dir, update_freq='batch')\n", " \n", " # Load transform output\n", " tf_transform_output = tft.TFTransformOutput(fn_args.transform_graph_path)\n", " \n", " # Create batches of data good for 10 epochs\n", " train_set = _input_fn(fn_args.train_files[0], tf_transform_output, 10)\n", " val_set = _input_fn(fn_args.eval_files[0], tf_transform_output, 10)\n", "\n", " # Load best hyperparameters\n", " hp = fn_args.hyperparameters.get('values')\n", "\n", " # Build the model\n", " model = model_builder(hp)\n", "\n", " # Train the model\n", " model.fit(\n", " x=train_set,\n", " validation_data=val_set,\n", " callbacks=[tensorboard_callback]\n", " )\n", " \n", " # Save the model\n", " model.save(fn_args.serving_model_dir, save_format='tf')" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "Lu3fQwFX6E8Q" }, "source": [ "You can pass the output of the `Tuner` component to the `Trainer` by filling the `hyperparameters` argument with the `Tuner` output. This is indicated by the `tuner.outputs['best_hyperparameters']` below. You can see the definition of the other arguments [here](https://www.tensorflow.org/tfx/api_docs/python/tfx/components/Trainer)." ] }, { "cell_type": "code", "metadata": { "id": "u0JOuqSKGsoQ" }, "source": [ "# Setup the Trainer component\n", "trainer = Trainer(\n", " module_file=_trainer_module_file,\n", " examples=transform.outputs['transformed_examples'],\n", " hyperparameters=tuner.outputs['best_hyperparameters'],\n", " transform_graph=transform.outputs['transform_graph'],\n", " schema=schema_gen.outputs['schema'],\n", " train_args=trainer_pb2.TrainArgs(splits=['train']),\n", " eval_args=trainer_pb2.EvalArgs(splits=['eval']))" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "lQfTLKGf7BFk" }, "source": [ "Take note that when re-training your model, you don't always have to retune your hyperparameters. Once you have a set that you think performs well, you can just import it with the ImporterNode as shown in the [official docs](https://www.tensorflow.org/tfx/guide/tuner):\n", "\n", "```\n", "hparams_importer = ImporterNode(\n", " instance_name='import_hparams',\n", " # This can be Tuner's output file or manually edited file. The file contains\n", " # text format of hyperparameters (kerastuner.HyperParameters.get_config())\n", " source_uri='path/to/best_hyperparameters.txt',\n", " artifact_type=HyperParameters)\n", "\n", "trainer = Trainer(\n", " ...\n", " # An alternative is directly use the tuned hyperparameters in Trainer's user\n", " # module code and set hyperparameters to None here.\n", " hyperparameters = hparams_importer.outputs['result'])\n", "```" ] }, { "cell_type": "code", "metadata": { "id": "IwM2743um1w3" }, "source": [ "# Run the component\n", "context.run(trainer, enable_cache=False)" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "PiuE7i0A8qEb" }, "source": [ "Your model should now be saved in your pipeline directory and you can navigate through it as shown below. The file is saved as `saved_model.pb`." ] }, { "cell_type": "code", "metadata": { "id": "mQPZBkw_yl2i" }, "source": [ "# Get artifact uri of trainer model output\n", "model_artifact_dir = trainer.outputs['model'].get()[0].uri\n", "\n", "# List subdirectories artifact uri\n", "print(f'contents of model artifact directory:{os.listdir(model_artifact_dir)}')\n", "\n", "# Define the model directory\n", "model_dir = os.path.join(model_artifact_dir, 'Format-Serving')\n", "\n", "# List contents of model directory\n", "print(f'contents of model directory: {os.listdir(model_dir)}')" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "bu5Bsn0J9ol3" }, "source": [ "You can also visualize the training results by loading the logs saved by the Tensorboard callback." ] }, { "cell_type": "code", "metadata": { "id": "GPqoMMXv5NoY" }, "source": [ "model_run_artifact_dir = trainer.outputs['model_run'].get()[0].uri\n", "\n", "%load_ext tensorboard\n", "%tensorboard --logdir {model_run_artifact_dir}" ], "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "Q6H6eCKC9xLp" }, "source": [ "***Congratulations! You have now created an ML pipeline that includes hyperparameter tuning and model training. You will know more about the next components in future lessons but in the next section, you will first learn about a framework for automatically building ML pipelines: AutoML. Enjoy the rest of the course!***" ] } ] }